code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="9yqK1X26qvmA"
import imutils
import numpy as np
import cv2
from google.colab.patches import cv2_imshow
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
# + id="6MS6thN2q0O4"
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
//await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
# + [markdown] id="lTr1-26Dwx2e"
# ADD YOUR DESIRED URL
# + id="HNgWVzDfq5TW"
from IPython.display import Javascript
def open_web():
url = 'your favourite URL' # make sure you allow pop ıps
display(Javascript('window.open("{url}");'.format(url=url)))
# + [markdown] id="SIaRr3DWw2O4"
# FILL THE INFORMATION
#
# In order to send e mail from gmail you should turn on this button from this link but be aware of security vulnerabilities "https://www.google.com/settings/security/lesssecureapps"
# + id="YKmC3TLsveQe"
def send_mail():
# Import Python Packages
import smtplib
# Set Global Variables
gmail_user = 'your_email'
gmail_password = '<PASSWORD>'
mail_from = gmail_user
mail_to = 'destination_mailadress'
mail_subject = ''
mail_message_body = ''
mail_message = '''\
From: %s
To: %s
Subject: %s
%s
''' % (mail_from, mail_to, mail_subject, mail_message_body)
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login(gmail_user, gmail_password)
server.sendmail(mail_from, mail_to, mail_message)
server.close()
# + [markdown] id="h08WYm45w5fv"
# Delete comment from lines 33 or 34 in the below code depending on your choice and have fun
#
#
# + id="PGu3jaNWq1e8"
import webbrowser
import time
a = 0
while a == 0:
image_file = take_photo()
time.sleep(5) # sample rate takes 1 picture in 1 second this way you can change the value as you wish
#image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
image = cv2.imread(image_file)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
(h, w) = image.shape[:2]
print(w,h)
cv2_imshow(image)
# !wget -N https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
# !wget -N https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel
print("[INFO] loading model...")
prototxt = 'deploy.prototxt'
model = 'res10_300x300_ssd_iter_140000.caffemodel'
net = cv2.dnn.readNetFromCaffe(prototxt, model)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the prediction
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
#send_mail()
#open_web()
a = 1
else:
False
|
realtime_face_recognition.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import pandas as pd
# File to Load
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
# -
#Look at merged DF
school_data_complete.head()
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Calculate the percentage of students who passed math **and** reading (% Overall Passing)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
# I wasn't sure whether I should look at a summary of type 'District' schools or of all schools in the district. After rereading the instructions, I realized it should be (here, at least) all schools in the district. However, you'll see a similar analysis of type 'District' schools below. It can be ignored. I do use one value later on in the homework.
#make list of all schools from the district
schools_list = school_data_complete["school_name"].unique()
schools_list
#count items in list of schools
schools_total_num = len(schools_list)
schools_total_num
#count total number of students
student_total_num= len(school_data_complete["Student ID"])
student_total_num
#find total budget for all schools
#problem: depends on unique value for schools -- what if the budgets were identical for two schools?
total_budget_all_schools = school_data_complete["budget"].unique()
total_budget_all_schools = total_budget_all_schools.sum()
'{:20,.2f}'.format(total_budget_all_schools)
#handled more precisely with groupby
budget_tot = school_data_complete[["school_name", "budget"]].groupby(["school_name"]).first()
budget_tot.sum()
#average math score for ALL schools
average_math_score_all = school_data_complete["math_score"].mean()
average_math_score_all
#percentage of passing math scores
passing_math_pct_all = school_data_complete.loc[school_data_complete["math_score"]>70]
passing_math_pct_all = passing_math_pct_all.count()
passing_math_pct_all = passing_math_pct_all[0]/student_total_num
'{:.0%}'.format(passing_math_pct_all)
#average reading score for ALL schools
average_reading_score_all = school_data_complete["reading_score"].mean()
average_reading_score_all
#percentage of passing reading scores
passing_reading_pct_all = school_data_complete.loc[school_data_complete["reading_score"]>70]
passing_reading_pct_all = passing_reading_pct_all.count()
passing_reading_pct_all = passing_reading_pct_all[0]/student_total_num
'{:.0%}'.format(passing_reading_pct_all)
#percentage of passing math and reading scores
passing_both_pct_all = school_data_complete.loc[(school_data_complete["reading_score"]>70)&(school_data_complete["math_score"]>70)]
passing_both_pct_all = passing_both_pct_all.count()
passing_both_pct_all = passing_both_pct_all[0]/student_total_num
'{:.0%}'.format(passing_both_pct_all)
# +
#HERE'S WHERE THERE IS UNNECESSARY MATERIAL
#filter school_data variable for district
district_schools = school_data.loc[school_data["type"]=="District"]
district_schools
# -
#count type = district schools
number_district_schools = len(district_schools)
number_district_schools
#filter merged data frame for 'district' type only
district_schools_students_merge = school_data_complete.loc[school_data_complete["type"]=="District"]
district_schools_students_merge
#count total number of students in type = district schools
total_district_students = len(district_schools_students_merge["Student ID"])
total_district_students
#2. sum budgets in filtered for district
#is this correct? or am I adding together 'budget' item in every row in new df
district_schools_total_budget = district_schools["budget"].sum()
district_schools_total_budget
#find total budget for all schools
#this confirms that above is getting right value, but I'm confused why
district_schools_total_budget = district_schools["budget"].unique()
district_schools_total_budget = district_schools_total_budget.sum()
#'{:20,.2f}'.format(district_schools_total_budget)
#average math scores in district schools only
average_math_score_district = district_schools_students_merge["math_score"].mean()
average_math_score_district
#percentage of passing math scores
passing_math_pct_district = district_schools_students_merge.loc[district_schools_students_merge["math_score"]>70]
passing_math_pct_district = passing_math_pct_district.count()
passing_math_pct_district = passing_math_pct_district[0]/student_total_num
#'{:.0%}'.format(passing_math_pct_district)
#average reading scores in district schools only
average_reading_score_district = district_schools_students_merge["reading_score"].mean()
average_reading_score_district
#percentage of passing reading scores
passing_reading_pct_district = district_schools_students_merge.loc[district_schools_students_merge["reading_score"]>70]
passing_reading_pct_district = passing_reading_pct_district.count()
passing_reading_pct_district = passing_reading_pct_district[0]/student_total_num
#'{:.0%}'.format(passing_reading_pct_district)
#percentage of passing reading scores
passing_both_pct_district = district_schools_students_merge.loc[(district_schools_students_merge["reading_score"]>70)&(district_schools_students_merge["math_score"]>70)]
passing_both_pct_district = passing_both_pct_district.count()
passing_both_pct_district = passing_both_pct_district[0]/student_total_num
#'{:.0%}'.format(passing_both_pct_district)
#summary dataframe
summary_df = pd.DataFrame({"# Total Schools": [schools_total_num],
"# District Schools": number_district_schools,
"Total # Students": student_total_num,
"# Stu.s in District Schools": total_district_students,
"Total Budget All Schools": '{:20,.2f}'.format(total_budget_all_schools),
"Total Budget District Schools": '{:20,.2f}'.format(district_schools_total_budget),
"Avg. Math ALL": average_math_score_all,
"Passing % ALL":'{:.0%}'.format(passing_math_pct_all),
"Avg. Math DISTRICT": average_math_score_district,
"Passing % District": '{:.0%}'.format(passing_math_pct_district),
"Avg. Reading ALL": average_reading_score_all,
"Passing Reading ALL": '{:.0%}'.format(passing_reading_pct_all),
"Avg. Reading DISTRICT": average_reading_score_district,
"Passing % DISTRICT": '{:.0%}'.format(passing_reading_pct_district),
"ALL: pass R&M %": '{:.0%}'.format(passing_both_pct_all),
"DISTRICT: pass R&M %": '{:.0%}'.format(passing_both_pct_district)
})
summary_df
# ## School Summary
# * Create an overview table that summarizes key metrics about each school, including:
# * School Name
# * School Type
# * Total Students
# * Total School Budget
# * Per Student Budget
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * % Overall Passing (The percentage of students that passed math **and** reading.)
#
# * Create a dataframe to hold the above results
school_data_complete
#create new groupby object grouped by each school for all schools and pull total students
all_school_group = school_data_complete.groupby(["school_name"])
all_byschool_total = all_school_group["Student ID"].count()
all_byschool_total
#experiment with groupby:
school_data_complete[["school_name", "Student ID"]].groupby(["school_name"]).count()
# +
#create new groupby object grouped by each school of type 'district' and pull total students
district_school_group = district_schools_students_merge.groupby(['school_name'])
district_school_group
district_byschool_total = district_school_group["Student ID"].count()
district_byschool_total
# -
#create new series with budget for all schools
all_byschool_budget = all_school_group["budget"].first()
all_byschool_budget
#calculate by school average math score
all_byschool_math = all_school_group["math_score"].mean()
all_byschool_math
#better way?
math_avg_df = school_data_complete[["school_name", "math_score"]].groupby(["school_name"]).mean()
math_avg_df
#calculate by school average reading score
all_byschool_reading = all_school_group["reading_score"].mean()
all_byschool_reading
#better way?
reading_avg_df = school_data_complete[["school_name", "reading_score"]].groupby(["school_name"]).mean()
reading_avg_df
#calculate by school % passing math
all_byschool_passmath_count = school_data_complete.loc[school_data_complete["math_score"]>69.9]
all_byschool_passmath_count = all_byschool_passmath_count[["school_name", "math_score"]].groupby("school_name").count()
all_byschool_passmath_count["math_score"]
all_byschool_passmath_pct = all_byschool_passmath_count['math_score']/all_byschool_total
all_byschool_passmath_pct
# all_byschool_count_total =school_data_complete[["Student ID"]].groupby(["school_name"]).count()
# all_byschool_math_pct = all_byschool_passmath_count/all_byschool_count_total
# all_byschool_math_pct
all_byschool_passread_count = school_data_complete.loc[school_data_complete["reading_score"]>69.9]
all_byschool_passread_count = all_byschool_passread_count[["school_name", "reading_score"]].groupby("school_name").count()
all_byschool_passread_count["reading_score"]
all_byschool_passread_pct = all_byschool_passread_count['reading_score']/all_byschool_total
all_byschool_passread_pct
#calculate by school % passing math and reading
all_byschool_passboth_count = school_data_complete.loc[(school_data_complete["math_score"]>69.9)&(school_data_complete["reading_score"]>69.9)]
all_byschool_passboth_counts = all_byschool_passboth_count.groupby("school_name").count()
all_byschool_passboth_pct = all_byschool_passboth_counts["Student ID"]/ all_byschool_total
all_byschool_passboth_pct
# ## Top Performing Schools (By % Overall Passing)
# * Sort and display the top five performing schools by % overall passing.
top_passing=all_byschool_passboth_pct.sort_values(ascending=False).head(5)
top_passing
# ## Bottom Performing Schools (By % Overall Passing)
# * Sort and display the five worst-performing schools by % overall passing.
bottom_passing=all_byschool_passboth_pct.sort_values().head(5)
bottom_passing
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
#look again at data
school_data_complete.head()
#find math scores by grade, grouped by school
#note, doesn't use conditional statement -- am I missing something?
school_data_complete[["school_name", "grade", "math_score"]].groupby(["school_name", "grade"]).mean()
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
school_data_complete[["school_name", "grade", "reading_score"]].groupby(["school_name", "grade"]).mean()
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
# +
#create a summary df prior to binning
sum_bybudget_df = pd.DataFrame()
sum_bybudget_df["Avg. Math"] = all_byschool_math
sum_bybudget_df["Avg. Read"] = all_byschool_reading
sum_bybudget_df["%math"] = all_byschool_passmath_pct
sum_bybudget_df["%read"] = all_byschool_passread_pct
sum_bybudget_df["%both"] = (all_byschool_passmath_pct + all_byschool_passread_pct)/2
#note: this produces different values than all_byschool_passboth_pct above. It is *not* the % who pass both tests -- rather,
#it approximates (or, if counts are equal, equals) the likelihood of a test (either math or reading) being passed
sum_bybudget_df["budget"] = all_byschool_budget
sum_bybudget_df
# -
#make bins and group labels
bins = [0, 750000, 1500000, 2250000, 5000000]
group_labels = ["0 to 750k", "751k to 1.5m", "1.51m to 2.25m", "above 2.25m"]
sum_bybudget_df["budget_group"] = pd.cut(sum_bybudget_df["budget"], bins, labels=group_labels)
groupedbybudget = sum_bybudget_df.groupby(["budget_group", "school_name"])
groupedbybudget.sum().dropna()
#still very confused about groupby objects. how to optimally display them?
# ## Scores by School Size
# * Perform the same operations as above, based on school size.
sum_bysize_df = sum_bybudget_df
sum_bysize_df.drop(columns = ["budget", "budget_group"])
sum_bysize_df["Size"] = all_byschool_total
sum_bysize_df
#make bins and group labels
sizebins = [0, 1499, 2499, 3499, 10000]
size_labels = ["0 to 1499", "1500 to 2499", "2500 to 3499", "3500+"]
sum_bysize_df["size_group"] = pd.cut(sum_bybudget_df["Size"], sizebins, labels=size_labels)
groupedbysize = sum_bysize_df.groupby(["size_group", "school_name"])
groupedbysize.sum().dropna()
#still very confused about groupby objects. how to optimally display them?
# ## Scores by School Type
# * Perform the same operations as above, based on school type
sum_bytype_df = sum_bysize_df
schooltypes = school_data_complete[["school_name", "type"]].groupby(["school_name"]).first()
sum_bytype_df["type"] = schooltypes["type"]
sum_bytype_df = sum_bytype_df.drop(["Size", "size_group", "budget", "budget_group"], axis=1)
display = sum_bytype_df.groupby(["type", "school_name"])
display.mean().dropna()
|
PyCitySchools/PyCitySchools_starter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Two Variables
#
# Use a geom to represent data points. Use the geom’s aesthetic
# properties to represent variables. Each function returns a layer.
#
# +
from datetime import datetime
import pandas as pd
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
# +
mpg_df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')
mw_df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/midwest.csv')
ec_df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/economics.csv', \
parse_dates=['date'])
ec_df = ec_df[ec_df.date > datetime(2000, 1, 1)]
# -
# Continuous X, Continuous Y
# --------------------------
#
#
p = ggplot(mpg_df, aes('cty', 'hwy'))
p + geom_point()
p + geom_jitter()
p + geom_smooth()
p + geom_text(aes(label='fl'))
# Discrete X, Continuous Y
# ------------------------
#
#
hwy_df = mpg_df.groupby('class').hwy.sum().to_frame('count').reset_index()
hwy_df.head(2)
ggplot(hwy_df, aes('class', 'count')) + geom_bar(stat='identity')
ggplot(mpg_df, aes('class', 'hwy')) + geom_boxplot()
# Discrete X, Discrete Y
# ----------------------
#
#
ggplot(mpg_df, aes('fl', 'drv')) + geom_jitter(width=.3, height=.3)
# Continuous Bivariate Distribution
# ---------------------------------
#
#
p = ggplot(mpg_df, aes('cty', 'hwy'))
p + geom_bin2d(binwidth=(2, 2))
p + geom_density2d(aes(color='..group..'))
p + geom_density2df(aes(fill='..group..'), color='white', size=.5)
# Continuous Function
# -------------------
#
#
p = ggplot(ec_df, aes('date', 'unemploy')) + scale_x_datetime()
p + geom_area()
p + geom_line()
p + geom_step()
# Visualizing Error
# -----------------
#
#
class_df = mpg_df.groupby('class').hwy.agg(['min', 'median', 'max']).reset_index()
class_df.head(2)
p = ggplot(class_df, aes(x='class'))
p + geom_crossbar(aes(ymin='min', middle='median', ymax='max'))
p + geom_errorbar(aes(ymin='min', ymax='max'))
p + geom_linerange(aes(ymin='min', ymax='max'))
p + geom_pointrange(aes(ymin='min', y='median', ymax='max'))
# Maps
# ----
#
#
states = geocode('state', mw_df.state.unique(), scope='US').get_boundaries(9)
states.head(2)
ggplot() + geom_map(data=states, tooltips=layer_tooltips().line('@{found name}'))
|
docs/_downloads/fd00915bcdcadd81eb8737d79e25aec1/plot__2_variables.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
np.set_printoptions(precision=2)
digits = load_digits()
X, y = digits.data, digits.target == 3
X_train, X_test, y_train, y_test = train_test_split(X, y)
# -
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
cross_val_score(SVC(), X_train, y_train)
from sklearn.dummy import DummyClassifier
cross_val_score(DummyClassifier("most_frequent"), X_train, y_train)
# +
from sklearn.metrics import roc_curve, roc_auc_score
for gamma in [.01, .1, 1]:
plt.xlabel("FPR")
plt.ylabel("TPR")
svm = SVC(gamma=gamma).fit(X_train, y_train)
decision_function = svm.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, decision_function)
acc = svm.score(X_test, y_test)
auc = roc_auc_score(y_test, svm.decision_function(X_test))
plt.plot(fpr, tpr, label="acc:%.2f auc:%.2f" % (acc, auc))
print()
plt.legend(loc="best")
# -
from sklearn.metrics.scorer import SCORERS
SCORERS.keys()
# # Defining your own scoring function
def my_accuracy(est, X, y):
return np.mean(est.predict(X) == y)
from sklearn.svm import LinearSVC
print(cross_val_score(LinearSVC(random_state=0), X, y, cv=5))
print(cross_val_score(LinearSVC(random_state=0), X, y, cv=5, scoring=my_accuracy))
|
Advanced Scoring.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nov05/DS-Unit-2-Applied-Modeling/blob/master/module4-model-interpretation/permutation_importances_partial_dependence_plots.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="akTDTClM5CD_" colab_type="code" colab={}
# modified by nov05 on 2019-07-25
# DS-Unit-2-Applied-Modeling
# module4-model-interpretation/
# + [markdown] colab_type="text" id="-hTictxWYih7"
# _Lambda School Data Science_
#
# This sprint, your project is Caterpillar Tube Pricing: Predict the prices suppliers will quote for industrial tube assemblies.
#
# # Permutation Importances, Partial Dependence Plots
#
#
# #### Objectives
# - Get and interpret permutation importances
# - Visualize and interpret partial dependence plots
# + [markdown] colab_type="text" id="LoxNYFBXYih9"
# ### Links
# - [Kaggle / <NAME>: Machine Learning Explainability](https://www.kaggle.com/learn/machine-learning-explainability)
# - [Permutation Importance](https://www.kaggle.com/dansbecker/permutation-importance)
# - [Partial Dependence Plots](https://www.kaggle.com/dansbecker/partial-plots)
# - [<NAME>: Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/)
# - [(Permutation) Importance](https://christophm.github.io/interpretable-ml-book/feature-importance.html)
# - [Partial Dependence Plots](https://christophm.github.io/interpretable-ml-book/pdp.html) + [animated explanation](https://twitter.com/ChristophMolnar/status/1066398522608635904)
# - Random Forest Feature Importances
# - [Ando Saabas: Selecting good features, Part 3, Random Forests](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/)
# - [<NAME>, et al: Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html)
#
# ### Libraries
# - [eli5](https://github.com/TeamHG-Memex/eli5): `conda install -c conda-forge eli5` / `pip install eli5`
# - [PDPbox](https://github.com/SauceCat/PDPbox): `pip install pdpbox`
# - [category_encoders](https://github.com/scikit-learn-contrib/categorical-encoding): `conda install -c conda-forge category_encoders` / `pip install category_encoders`
# + colab_type="code" id="BFQMky3CYih-" colab={}
# # !pip install eli5 pdpbox category_encoders
# + [markdown] colab_type="text" id="mDthquUBYiiB"
# ### Library quirks to work around
#
# 1. Some of these libraries don't work with pipelines.
#
# 2. eli5 PermutationImportance + xgboost + pandas didn't work. The bug seems to be fixed now, but if you have problems, [there's a work-around:](https://www.kaggle.com/dansbecker/permutation-importance#392299)
#
# > Important note here for anyone trying to use eli5's PermutationImportance on XGBoost estimators, currently you need to train your models using ".values or .as_matrix()" with you input data (X and Y), otherwise PermutationImportance won't work, [source](https://github.com/TeamHG-Memex/eli5/issues/256).
#
# 3. PDPbox _only_ works with pandas.
#
# 4. With PDPBox version <= 0.20, using the `pdp_interact_plot` function, `plot_type='contour'` gets an error, but `plot_type='grid'` works. [This issue](https://github.com/SauceCat/PDPbox/issues/40) will be fixed in the next release of PDPbox.
#
# **[(Data science is often about putting square pegs in round holes!)](https://www.youtube.com/watch?v=ry55--J4_VQ)**
#
# ### Two types of model explanations today:
#
# #### 1. Global model explanation: all features in relation to each other
# - Feature Importances: _Default, fastest, good for first estimates_
# - Drop-Column Importances: _The best in theory, but much too slow in practice_
# - Permutaton Importances: _A good compromise!_
#
# #### 2. Global model explanation: individual feature(s) in relation to target
# - Partial Dependence plots
#
# ### Third type of model explanation next week:
#
# #### 3. Individual prediction explanation
# - Shapley Values
#
# _Note that the coefficients from a linear model give you all three types of explanations!_
# + [markdown] colab_type="text" id="0lF0mw_yYiiC"
# ### Get data
#
#
# #### Option 1. Kaggle web UI
#
# Sign in to Kaggle and go to the [Caterpillar Tube Pricing](https://www.kaggle.com/c/caterpillar-tube-pricing) competition. Go to the Data page. After you have accepted the rules of the competition, use the download buttons to download the data.
#
#
# #### Option 2. Kaggle API
#
# Follow these [instructions](https://github.com/Kaggle/kaggle-api).
#
# #### Option 3. GitHub Repo — LOCAL
#
# If you are working locally:
#
# 1. Clone the [GitHub repo](https://github.com/LambdaSchool/DS-Unit-2-Applied-Modeling/tree/master/data/caterpillar) locally. The data is in the repo, so you don't need to download it separately.
#
# 2. Unzip the file `caterpillar-tube-pricing.zip` which is in the data folder of your local repo.
#
# 3. Unzip the file `data.zip`.
#
# 4. Run the cell below to assign a constant named `SOURCE`, a string that points to the location of the data on your local machine. The rest of the code in the notebook will use this constant.
# + colab_type="code" id="_xw6zxTlYiiC" colab={}
# SOURCE = '../data/caterpillar/caterpillar-tube-pricing/competition_data/'
# + [markdown] id="nB47ymOZ41Kj" colab_type="text"
# #### Option 4. GitHub Repo — COLAB
#
# If you are working on Google Colab, uncomment and run these cells, to download the data, unzip it, and assign a constant that points to the location of the data.
# + id="KX85jx8p41Kk" colab_type="code" colab={}
# # !wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/caterpillar/caterpillar-tube-pricing.zip
# + id="Tv27u7nu41Km" colab_type="code" colab={}
# # !unzip caterpillar-tube-pricing.zip
# + id="Ea1RFXZm41Kp" colab_type="code" colab={}
# # !unzip data.zip
# + id="HSHzYqS141Kr" colab_type="code" colab={}
SOURCE = 'competition_data/'
# + [markdown] colab_type="text" id="zuzgmaBfYiiI"
# # Example 🚜
# + [markdown] colab_type="text" id="9kMjLbFtYiiI"
# ***We considered some questions about this relational data...***
#
# ### `bill_of_materials`
#
# is formatted like this:
# + colab_type="code" id="aKwLXivaYiiJ" outputId="904d8311-cb84-4712-bdf6-1f4c46ba466d" colab={"base_uri": "https://localhost:8080/", "height": 223}
import pandas as pd
materials = pd.read_csv(SOURCE + 'bill_of_materials.csv')
materials.head()
# + [markdown] colab_type="text" id="zXcuBcU8YiiK"
# #### Would this be a better representation?
#
# Could pandas melt, crosstab, and other functions help reshape the data like this?
# + [markdown] colab_type="text" id="aJYsGhWHYiiL"
# | Crosstab | C-1622 | C-1629 | C-1312 | C-1624 | C-1631 | C-1641 | Distinct | Total |
# |:--------:|:------:|--------|--------|--------|--------|--------|----------|-------|
# | TA-00001 | 2 | 2 | 0 | 0 | 0 | 0 | 2 | 4 |
# | TA-00002 | 0 | 0 | 2 | 0 | 0 | 0 | 1 | 2 |
# | TA-00003 | 0 | 0 | 2 | 0 | 0 | 0 | 1 | 2 |
# | TA-00004 | 0 | 0 | 2 | 0 | 0 | 0 | 1 | 2 |
# | TA-00005 | 0 | 0 | 0 | 1 | 1 | 1 | 3 | 3 |
# + [markdown] colab_type="text" id="mj9DvANeYiiL"
# ### `components`
#
# Contains three representations of each component, in order of decreasing cardinality / granularity:
#
# - `component_id`
# - `name`
# - `component_type_id`
#
# What are the pros & cons of these different representations?
# + colab_type="code" id="b64SKvg2YiiM" outputId="cd628c08-d74f-4ead-8b6c-cc7ee3f836a4" colab={"base_uri": "https://localhost:8080/", "height": 172}
components = pd.read_csv(SOURCE + 'components.csv')
components.describe()
# + [markdown] colab_type="text" id="pSmjDeecYiiO"
# ***Here's how we could do some of this data wrangling...***
# + [markdown] colab_type="text" id="OgVDM0OeYiiP"
# ### 1a. Get a tidy list of the component id's in each tube assembly
# + colab_type="code" id="ZF8YqBtBYiiQ" outputId="9de34070-2e84-489d-ad51-403e8ab2a396" colab={"base_uri": "https://localhost:8080/", "height": 357}
import pandas as pd
materials = pd.read_csv(SOURCE + 'bill_of_materials.csv')
assembly_components = materials.melt(id_vars='tube_assembly_id',
value_vars=[f'component_id_{n}' for n in range(1,9)])
assembly_components = (assembly_components
.sort_values(by='tube_assembly_id')
.dropna()
.rename(columns={'value': 'component_id'}))
assembly_components.head(10)
# + [markdown] colab_type="text" id="Sl0PLnTsYiiS"
# ### 1b. Merge with component types
# + colab_type="code" id="RlPI75QtYiiT" outputId="d5ad0265-aa26-446e-c551-a52e105b6d9d" colab={"base_uri": "https://localhost:8080/", "height": 357}
components = pd.read_csv(SOURCE + 'components.csv')
assembly_component_types = assembly_components.merge(components, how='left')
assembly_component_types.head(10)
# + [markdown] colab_type="text" id="NJRtPHkeYiiV"
# ### 1c. Make a crosstab of the component types for each assembly (one-hot encoding)
# + colab_type="code" id="FreT2NuUYiiV" outputId="bdc91486-a6bd-4439-c1ba-55f581f7997a" colab={"base_uri": "https://localhost:8080/", "height": 237}
table = pd.crosstab(assembly_component_types['tube_assembly_id'],
assembly_component_types['component_type_id'])
table = table.reset_index()
table.columns.name = ''
print(table.shape)
table.head()
# + [markdown] colab_type="text" id="uwoM0NnpYiiW"
# ### 2a. Most of the component files have a "weight" feature:
# + colab_type="code" id="90lIJ4HoYiiX" outputId="1b806e25-064f-401b-f8a2-bf9ce77b4d87" colab={"base_uri": "https://localhost:8080/", "height": 618}
from glob import glob
import pandas as pd
def search_column(name):
for path in glob(SOURCE + '*.csv'):
df = pd.read_csv(path)
if name in df.columns:
print(path, df.shape)
print(df.columns.tolist(), '\n')
search_column('weight')
# + [markdown] colab_type="text" id="YxHns29uYiiY"
# ### 2b. Most of the component files have "orientation" & "unique_feature" binary features
# + colab_type="code" id="mhzYhf-qYiiZ" outputId="272146dc-7024-46b1-c72d-820021ab5a00" colab={"base_uri": "https://localhost:8080/", "height": 70}
comp_threaded = pd.read_csv(SOURCE + 'comp_threaded.csv')
comp_threaded['orientation'].value_counts()
# + colab_type="code" id="vHKqnEhuYiia" outputId="618458c8-181d-48f9-fd4a-e2e281ab8577" colab={"base_uri": "https://localhost:8080/", "height": 70}
comp_threaded['unique_feature'].value_counts()
# + [markdown] colab_type="text" id="eeO9v37rYiib"
# ### 2c. Read all the component files and concatenate them together
# + colab_type="code" id="nbxr2-7qYiic" colab={}
comp = pd.concat((pd.read_csv(path) for path in glob(SOURCE + 'comp_*.csv')), sort=False)
columns = ['component_id', 'component_type_id', 'orientation', 'unique_feature', 'weight']
comp = comp[columns]
comp['orientation'] = (comp['orientation']=='Yes').astype(int)
comp['unique_feature'] = (comp['unique_feature']=='Yes').astype(int)
comp['weight'] = comp['weight'].fillna(comp['weight'].median())
# + colab_type="code" id="ZZP-2fXIhB5X" outputId="7b6b1a28-b3e8-46a8-bfbf-e820f8b8d7de" colab={"base_uri": "https://localhost:8080/", "height": 203}
comp.head()
# + [markdown] colab_type="text" id="UKvxK-39Yiid"
# ### 2d. Engineer features, aggregated for all components in a tube assembly
# - Components total
# - Components distinct
# - Orientation
# - Unique Feature
# - Weight
# + colab_type="code" id="OM6mkggPYiie" outputId="918fa992-488c-4d27-ba3d-f541319496c6" colab={"base_uri": "https://localhost:8080/", "height": 223}
materials['components_total'] = sum(materials[f'quantity_{n}'].fillna(0) for n in range(1,9))
materials['components_distinct'] = sum(materials[f'component_id_{n}'].notnull().astype(int) for n in range(1,9))
materials['orientation'] = 0
materials['unique_feature'] = 0
materials['weight'] = 0
for n in range(1,9):
materials = materials.merge(comp, left_on=f'component_id_{n}', right_on='component_id',
how='left', suffixes=('', f'_{n}'))
for col in materials:
if 'orientation' in col or 'unique_feature' in col or 'weight' in col:
materials[col] = materials[col].fillna(0)
materials['orientation'] = sum(materials[f'orientation_{n}'] for n in range(1,9))
materials['unique_feature'] = sum(materials[f'unique_feature_{n}'] for n in range(1,9))
materials['weight'] = sum(materials[f'weight_{n}'] for n in range(1,9))
materials.head()
# + colab_type="code" id="aRG6ALKLhhYz" outputId="eb67b808-d80e-4fb7-bf53-5b2fec789a75" colab={"base_uri": "https://localhost:8080/", "height": 220}
features = ['tube_assembly_id', 'orientation', 'unique_feature', 'weight',
'components_total', 'components_distinct', 'component_id_1']
materials = materials[features]
print(materials.shape)
materials.head()
# + [markdown] colab_type="text" id="Xo4IsiIUYiif"
# ### 3. Read tube data
# + colab_type="code" id="iY8ixDQJYiig" outputId="6bcfc827-4f55-4c46-81dd-d79dc9d2b34c" colab={"base_uri": "https://localhost:8080/", "height": 203}
tube = pd.read_csv(SOURCE + 'tube.csv')
tube.head()
# + [markdown] colab_type="text" id="OepDqvsRYiii"
# ### 4. Merge all this data with train, validation, and test sets
# + colab_type="code" id="FCPcx32kYiii" colab={}
from sklearn.model_selection import train_test_split
# Read data
trainval = pd.read_csv(SOURCE + 'train_set.csv')
test = pd.read_csv(SOURCE + 'test_set.csv')
# Split into train & validation sets
# All rows for a given tube_assembly_id should go in either train or validation
trainval_tube_assemblies = trainval['tube_assembly_id'].unique()
train_tube_assemblies, val_tube_assemblies = train_test_split(
trainval_tube_assemblies, random_state=42)
train = trainval[trainval.tube_assembly_id.isin(train_tube_assemblies)]
val = trainval[trainval.tube_assembly_id.isin(val_tube_assemblies)]
# Wrangle train, validation, and test sets
def wrangle(X):
X = X.copy()
# Engineer date features
X['quote_date'] = pd.to_datetime(X['quote_date'], infer_datetime_format=True)
X['quote_date_year'] = X['quote_date'].dt.year
X['quote_date_month'] = X['quote_date'].dt.month
X = X.drop(columns='quote_date')
# Merge data
X = (X.merge(table, how='left')
.merge(materials, how='left')
.merge(tube, how='left')
.fillna(0))
# Drop tube_assembly_id because our goal is to predict unknown assemblies
X = X.drop(columns='tube_assembly_id')
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# + [markdown] colab_type="text" id="FUjdGlFcYiik"
# ### 5. Arrange X matrix and y vector (log-transformed)
# + colab_type="code" id="3uHk6IOzYiil" colab={}
import numpy as np
target = 'cost'
X_train = train.drop(columns=target)
X_val = val.drop(columns=target)
X_test = test.drop(columns='id')
y_train = train[target]
y_val = val[target]
y_train_log = np.log1p(y_train)
y_val_log = np.log1p(y_val)
# + [markdown] colab_type="text" id="d2qhAjycYiin"
# ### 6. Use xgboost to fit and evaluate model
# + colab_type="code" id="syxuHpu2Yiin" outputId="ea1a1220-8e5c-44bf-c87f-e059d1fcb3e4" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import category_encoders as ce
from xgboost import XGBRegressor
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
encoder = ce.OrdinalEncoder()
X_train_encoded = encoder.fit_transform(X_train)
X_val_encoded = encoder.transform(X_val)
eval_set = [(X_train_encoded, y_train_log),
(X_val_encoded, y_val_log)]
model = XGBRegressor(n_estimators=2000, n_jobs=-1)
model.fit(X_train_encoded, y_train_log,
eval_set=eval_set, eval_metric='rmse', early_stopping_rounds=50)
# + colab_type="code" id="Yi0j5IgvYiip" outputId="88bacf2c-038c-4816-a802-6555e6d23062" colab={"base_uri": "https://localhost:8080/", "height": 270}
# %matplotlib inline
import matplotlib.pyplot as plt
results = model.evals_result()
train_rmse = results['validation_0']['rmse']
val_rmse = results['validation_1']['rmse']
epoch = range(len(train_rmse))
plt.plot(epoch, train_rmse, label='Train')
plt.plot(epoch, val_rmse, label='Validation')
plt.legend();
# + [markdown] colab_type="text" id="Kops860zYiis"
# ### 7. Generate submission for Kaggle
#
# Scores for this submission:
#
# - Public: 0.26083
# - Private: 0.28639
# + colab_type="code" id="Sn77EPtAYiis" colab={}
def generate_submission(estimator, X_test, filename):
y_pred_log = estimator.predict(X_test)
y_pred = np.expm1(y_pred_log) # Convert from log-dollars to dollars
submission = pd.read_csv(SOURCE + '../sample_submission.csv')
submission['cost'] = y_pred
submission.to_csv(filename, index=False)
X_test_encoded = encoder.transform(X_test)
generate_submission(model, X_test_encoded, 'submission.csv')
# + [markdown] colab_type="text" id="7HOayKBOYiit"
# # MODEL INTERPRETATION
# + [markdown] colab_type="text" id="4bRhsxENYiiu"
# ## 1a. Feature Importances
# - Global explanation: all features in relation to each other
# - Default, fastest, good for first estimates
#
# [Here's some food for thought](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/) about feature importances:
#
# >**When the dataset has two (or more) correlated features, then from the point of view of the model, any of these correlated features can be used as the predictor, with no concrete preference of one over the others.** But once one of them is used, the importance of others is significantly reduced since effectively the impurity they can remove is already removed by the first feature. As a consequence, they will have a lower reported importance. This is not an issue when we want to use feature selection to reduce overfitting, since it makes sense to remove features that are mostly duplicated by other features. But when interpreting the data, it can lead to the incorrect conclusion that one of the variables is a strong predictor while the others in the same group are unimportant, while actually they are very close in terms of their relationship with the response variable.
#
# For more information, see [Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html).
# + colab_type="code" id="BNVm6f7mYiiu" outputId="e62aa6cc-456e-491e-c3a6-d1e75e75d560" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Get feature importances
importances = pd.Series(model.feature_importances_, X_train_encoded.columns)
# Plot feature importances
n = len(X_train_encoded.columns)
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
# + [markdown] colab_type="text" id="y8HzLcCBYiiv"
# ## 1b. Drop-Column Importance
# - Global explanation: all features in relation to each other
# - The best in theory, but much too slow in practice
# + colab_type="code" id="DQAOlERnYiiw" colab={"base_uri": "https://localhost:8080/", "height": 140} outputId="a2d0ad8e-fa52-4c0e-a32e-5b395fa9be75"
# %%time
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
column = 'annual_usage'
# Fit without column
model = XGBRegressor(n_estimators=1000, n_jobs=-1)
model.fit(X_train_encoded.drop(columns=column), y_train_log)
y_pred_log = model.predict(X_val_encoded.drop(columns=column))
score_without = rmse(y_val_log, y_pred_log)
print(f'Validation RMSLE without {column}:', score_without)
# Fit with column
model = XGBRegressor(n_estimators=1000, n_jobs=-1)
model.fit(X_train_encoded, y_train_log)
y_pred_log = model.predict(X_val_encoded)
score_with = rmse(y_val_log, y_pred_log)
print(f'Validation RMSLE with {column}:', score_with)
# Compare the error with & without column
print(f'Drop-Column Importance for {column}:', score_without - score_with)
# + [markdown] colab_type="text" id="6Vu39wGkYiix"
# ## 1c. Permutation Importance
# - Global explanation: all features in relation to each other
# - A good compromise!
#
# Permutation Importance is a compromise between Feature Importance based on impurity reduction (which is the fastest) and Drop Column Importance (which is the "best.")
#
# [The ELI5 library documentation explains,](https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html)
#
# > Importance can be measured by looking at how much the score (accuracy, F1, R^2, etc. - any score we’re interested in) decreases when a feature is not available.
# >
# > To do that one can remove feature from the dataset, re-train the estimator and check the score. But it requires re-training an estimator for each feature, which can be computationally intensive. ...
# >
# >To avoid re-training the estimator we can remove a feature only from the test part of the dataset, and compute score without using this feature. It doesn’t work as-is, because estimators expect feature to be present. So instead of removing a feature we can replace it with random noise - feature column is still there, but it no longer contains useful information. This method works if noise is drawn from the same distribution as original feature values (as otherwise estimator may fail). The simplest way to get such noise is to shuffle values for a feature, i.e. use other examples’ feature values - this is how permutation importance is computed.
# >
# >The method is most suitable for computing feature importances when a number of columns (features) is not huge; it can be resource-intensive otherwise.
# + [markdown] colab_type="text" id="GYCiEx7zYiiy"
# ### Do-It-Yourself way, for intuition
# + colab_type="code" id="TksOf_n2Yiiy" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="7ba0f925-3fe5-4cea-9b23-43e23ede620e"
feature = 'quantity'
X_val_encoded[feature].head()
# + colab_type="code" id="-plycNueYii0" colab={"base_uri": "https://localhost:8080/", "height": 175} outputId="153b1089-3bf5-4e7a-93a8-cf2ed2fd9867"
X_val_encoded[feature].describe()
# + colab_type="code" id="OF32BQb1Yii1" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="1bbee730-3862-4b83-9a19-3f2f69e57405"
X_val_permuted = X_val_encoded.copy()
X_val_permuted[feature] = np.random.permutation(X_val_encoded[feature])
X_val_permuted[feature].head()
# + colab_type="code" id="HuyRGVpGYii2" colab={"base_uri": "https://localhost:8080/", "height": 175} outputId="256ca710-8554-44ed-c525-bcd08aa3198e"
X_val_permuted['quantity'].describe()
# + colab_type="code" id="Zj8rAwMhYii4" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="42feb8d3-c53c-47a8-92a0-2d9aa1c86154"
y_pred_log = model.predict(X_val_permuted)
score_permuted = rmse(y_val_log, y_pred_log)
print(f'Validation RMSLE with {feature}:', score_with)
print(f'Validation RMSLE with {feature} permuted:', score_permuted)
print(f'Permutation Importance:', score_permuted - score_with)
# + colab_type="code" id="m5ID-5jtYii5" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="c3aaeb9e-e050-4551-8b9c-165f88e108d8"
feature = 'annual_usage'
X_val_permuted = X_val_encoded.copy()
X_val_permuted[feature] = np.random.permutation(X_val_encoded[feature])
y_pred_log = model.predict(X_val_permuted)
score_permuted = rmse(y_val_log, y_pred_log)
print(f'Validation RMSLE with {feature}:', score_with)
print(f'Validation RMSLE with {feature} permuted:', score_permuted)
print(f'Permutation Importance:', score_permuted - score_with)
# + [markdown] colab_type="text" id="0LYk19SNYii7"
# ### With eli5 library
#
# For more documentation on using this library, see:
# - [eli5.sklearn.PermutationImportance](https://eli5.readthedocs.io/en/latest/autodocs/sklearn.html#eli5.sklearn.permutation_importance.PermutationImportance)
# - [eli5.show_weights](https://eli5.readthedocs.io/en/latest/autodocs/eli5.html#eli5.show_weights)
# - [scikit-learn user guide, `scoring` parameter](https://scikit-learn.org/stable/modules/model_evaluation.html#the-scoring-parameter-defining-model-evaluation-rules)
# + colab_type="code" id="zdoMW4sCYii7" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4195f0e9-b43f-4708-e794-b9bd0a9bcd78"
import eli5
from eli5.sklearn import PermutationImportance
permuter = PermutationImportance(model, scoring='neg_mean_squared_error',
cv='prefit', n_iter=2, random_state=42)
permuter.fit(X_val_encoded, y_val_log)
feature_names = X_val_encoded.columns.tolist()
eli5.show_weights(permuter, top=None, feature_names=feature_names)
# + [markdown] colab_type="text" id="q07yW9k-Yii8"
# ### We can use importances for feature selection
#
# For example, we can remove features with zero importance. The model trains faster and the score does not decrease.
# + colab_type="code" id="tZrPFyEMYii9" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0e961cbe-1393-4fa1-822a-f2f42e5a1016"
print('Shape before removing features:', X_train.shape)
# + colab_type="code" id="A_NbDgh3Yii-" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2edd2611-b04e-4323-b9b0-60ec8b6b8b14"
mask = permuter.feature_importances_ > 0
features = X_train.columns[mask]
X_train = X_train[features]
print('Shape after removing features:', X_train.shape)
X_val = X_val[features]
# + colab_type="code" id="Erg_PdpoYii_" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="320e2521-6873-424a-84ed-400f80641246"
encoder = ce.OrdinalEncoder()
X_train_encoded = encoder.fit_transform(X_train)
X_val_encoded = encoder.transform(X_val)
model = XGBRegressor(n_estimators=1000, n_jobs=-1)
model.fit(X_train_encoded, y_train_log)
y_pred_log = model.predict(X_val_encoded)
print(f'Validation RMSLE', rmse(y_val_log, y_pred_log))
# + [markdown] colab_type="text" id="QOUzbLKpYijB"
# ## 2. Partial Dependence Plots
#
# PDPbox
# - [Gallery](https://github.com/SauceCat/PDPbox#gallery)
# - [API Reference: pdpbox.pdp.pdp_isolate](https://pdpbox.readthedocs.io/en/latest/pdp_isolate.html)
# - [API Reference: pdpbox.pdp.pdp_plot](https://pdpbox.readthedocs.io/en/latest/pdp_plot.html)
# + colab_type="code" id="DeH3lw0CYijB" colab={"base_uri": "https://localhost:8080/", "height": 582} outputId="76083ed4-d98e-4adf-d5e1-5e084996bbfc"
from pdpbox.pdp import pdp_isolate, pdp_plot
feature = 'quantity'
isolated = pdp_isolate(
model=model,
dataset=X_val_encoded,
model_features=X_val_encoded.columns,
feature=feature
)
pdp_plot(isolated, feature_name=feature);
# + colab_type="code" id="FWVwA6viYijD" colab={"base_uri": "https://localhost:8080/", "height": 582} outputId="2177841e-1449-4402-f525-e3bbb803f419"
feature = 'weight'
isolated = pdp_isolate(
model=model,
dataset=X_val_encoded,
model_features=X_val_encoded.columns,
feature=feature
)
pdp_plot(isolated, feature_name=feature);
# + [markdown] colab_type="text" id="oMYkx9fAYijF"
# ### Explaining Partial Dependence Plots
# + [markdown] colab_type="text" id="5O6s9jisYijI"
# From [PDPbox documentation](https://pdpbox.readthedocs.io/en/latest/):
#
#
# >**The common headache**: When using black box machine learning algorithms like random forest and boosting, it is hard to understand the relations between predictors and model outcome. For example, in terms of random forest, all we get is the feature importance. Although we can know which feature is significantly influencing the outcome based on the importance calculation, it really sucks that we don’t know in which direction it is influencing. And in most of the real cases, the effect is non-monotonic. We need some powerful tools to help understanding the complex relations between predictors and model prediction.
# + [markdown] colab_type="text" id="zN2C8QTMYijI"
# [Animation by <NAME>](https://twitter.com/ChristophMolnar/status/1066398522608635904), author of [_Interpretable Machine Learning_](https://christophm.github.io/interpretable-ml-book/)
#
# > Partial dependence plots show how a feature affects predictions of a Machine Learning model on average.
# > 1. Define grid along feature
# > 2. Model predictions at grid points
# > 3. Line per data instance -> ICE (Individual Conditional Expectation) curve
# > 4. Average curves to get a PDP (Partial Dependence Plot)
# + [markdown] colab_type="text" id="LOu_hUU6YijJ"
# ### Partial Dependence Plots with 2 features, to see interactions
#
# PDPbox
# - [Gallery](https://github.com/SauceCat/PDPbox#gallery)
# - [API Reference: pdpbox.pdp.pdp_interact](https://pdpbox.readthedocs.io/en/latest/pdp_interact.html)
# - [API Reference: pdpbox.pdp.pdp_interact_plot](https://pdpbox.readthedocs.io/en/latest/pdp_interact_plot.html)
#
# Be aware of a bug in PDPBox version <= 0.20:
# - With the `pdp_interact_plot` function, `plot_type='contour'` gets an error, but `plot_type='grid'` works
# - This will be fixed in the next release of PDPbox: https://github.com/SauceCat/PDPbox/issues/40
# + colab_type="code" id="edL2X3QtYijJ" colab={"base_uri": "https://localhost:8080/", "height": 585} outputId="55205767-f22a-42ce-dd06-cd7002335bfa"
from pdpbox.pdp import pdp_interact, pdp_interact_plot
features = ['quantity', 'weight']
interaction = pdp_interact(
model=model,
dataset=X_val_encoded,
model_features=X_val_encoded.columns,
features=features
)
pdp_interact_plot(interaction, plot_type='grid', feature_names=features);
# + colab_type="code" id="3yRW-0wIYijL" colab={"base_uri": "https://localhost:8080/", "height": 585} outputId="d2c55910-1610-4a41-912d-9054b3dabe32"
features = ['annual_usage', 'quote_date_year']
interaction = pdp_interact(
model=model,
dataset=X_val_encoded,
model_features=X_val_encoded.columns,
features=features
)
pdp_interact_plot(interaction, plot_type='grid', feature_names=features);
# + [markdown] colab_type="text" id="zP1RIqnfYijM"
# # ASSIGNMENT
# - Use the Caterpillar dataset (or _any_ dataset of your choice). **Make these 3 types of visualizations** for model interpretation:
# - Feature Importances
# - Permutation Importances
# - Partial Dependence Plot
# - **Share at least 1 of your visualizations on Slack.**
# - Commit your notebook to your fork of the GitHub repo.
#
#
# ## Stretch Goals
# - Improve your scores on Kaggle! Look at [Kaggle Kernels](https://www.kaggle.com/c/caterpillar-tube-pricing/kernels) for ideas. **Share your best features and techniques on Slack.**
# - Try the [Skater library](https://oracle.github.io/Skater/index.html), which is an another option to get permutation importances and partial dependence plots.
# - Can you figure out partial dependence plots with categorical features?
# - Check out the links at the top of this notebook to learn more about how to interpret "black box" machine learning models.
|
module4-model-interpretation/permutation_importances_partial_dependence_plots.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# # Introduction to DataFrames
# **[<NAME>](http://bogumilkaminski.pl/about/), July 16, 2019**
using DataFrames
using BenchmarkTools
# ## Performance tips
# ### Access by column number is faster than by name
x = DataFrame(rand(5, 1000))
@btime $x[!, 500];
@btime $x.x500;
# ### When working with data `DataFrame` use barrier functions or type annotation
# +
using Random
function f_bad() # this function will be slow
Random.seed!(1); x = DataFrame(rand(1000000,2))
y, z = x[!, 1], x[!, 2]
p = 0.0
for i in 1:nrow(x)
p += y[i]*z[i]
end
p
end
@btime f_bad();
# if you run @code_warntype f_bad() then you notice
# that Julia does not know column types of `DataFrame`
# +
# solution 1 is to use barrier function (it should be possible to use it in almost any code)
function f_inner(y,z)
p = 0.0
for i in 1:length(y)
p += y[i]*z[i]
end
p
end
function f_barrier() # extract the work to an inner function
Random.seed!(1); x = DataFrame(rand(1000000,2))
f_inner(x[!, 1], x[!, 2])
end
using LinearAlgebra
function f_inbuilt() # or use inbuilt function if possible
Random.seed!(1); x = DataFrame(rand(1000000,2))
dot(x[!, 1], x[!, 2])
end
@btime f_barrier();
@btime f_inbuilt();
# +
# solution 2 is to provide the types of extracted columns
# it is simpler but there are cases in which you will not know these types
# This example assumes that you have DataFrames master at least from August 31, 2018
function f_typed()
Random.seed!(1); x = DataFrame(rand(1000000,2))
y::Vector{Float64}, z::Vector{Float64} = x[!, 1], x[!, 2]
p = 0.0
for i in 1:nrow(x)
p += y[i]*z[i]
end
p
end
@btime f_typed();
# -
# ### Consider using delayed `DataFrame` creation technique
# also notice the difference in performance between `DataFrame` and `DataFrame!` (copying vs non-copying data frame creation)
# +
function f1()
x = DataFrame!([Vector{Float64}(undef, 10^4) for i in 1:100]) # we work with a DataFrame directly
for c in 1:ncol(x)
d = x[!, c]
for r in 1:nrow(x)
d[r] = rand()
end
end
x
end
function f1a()
x = DataFrame([Vector{Float64}(undef, 10^4) for i in 1:100]) # we work with a DataFrame directly
for c in 1:ncol(x)
d = x[!, c]
for r in 1:nrow(x)
d[r] = rand()
end
end
x
end
function f2()
x = Vector{Any}(undef, 100)
for c in 1:length(x)
d = Vector{Float64}(undef, 10^4)
for r in 1:length(d)
d[r] = rand()
end
x[c] = d
end
DataFrame!(x) # we delay creation of DataFrame after we have our job done
end
function f2a()
x = Vector{Any}(undef, 100)
for c in 1:length(x)
d = Vector{Float64}(undef, 10^4)
for r in 1:length(d)
d[r] = rand()
end
x[c] = d
end
DataFrame(x) # we delay creation of DataFrame after we have our job done
end
@btime f1();
@btime f1a();
@btime f2();
@btime f2a();
# -
# ### You can add rows to a `DataFrame` in place and it is fast
# +
x = DataFrame(rand(10^6, 5))
y = DataFrame(transpose(1.0:5.0))
z = [1.0:5.0;]
@btime vcat($x, $y); # creates a new DataFrame - slow
@btime append!($x, $y); # in place - fast
x = DataFrame(rand(10^6, 5)) # reset to the same starting point
@btime push!($x, $z); # add a single row in place - fast
# -
# ### Allowing `missing` as well as `categorical` slows down computations
# +
using StatsBase
function test(data) # uses countmap function to test performance
println(eltype(data))
x = rand(data, 10^6)
y = categorical(x)
println(" raw:")
@btime countmap($x)
println(" categorical:")
@btime countmap($y)
nothing
end
test(1:10)
test([randstring() for i in 1:10])
test(allowmissing(1:10))
test(allowmissing([randstring() for i in 1:10]))
# -
# ### When aggregating use column selector and prefer categorical or pooled array grouping variable
df = DataFrame(x=rand('a':'d', 10^7), y=1);
@btime by($df, :x, v -> sum(v.y)) # traditional syntax, slow
@btime by($df, :x, :y=>sum) # use column selector
categorical!(df, :x);
@btime by($df, :x, :y=>sum)
using PooledArrays
df.x = PooledArray{Char}(df.x)
@btime by($df, :x, :y=>sum)
# ### Use views instead of materializing a new DataFrame
x = DataFrame(rand(100, 1000))
@btime $x[1:1, :]
@btime $x[1, :]
@btime view($x, 1:1, :)
@btime $x[1:1, 1:20]
@btime $x[1, 1:20]
@btime view($x, 1:1, 1:20)
|
11_performance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import forge
from puzzle.puzzlepedia import puzzlepedia
puzzle = puzzlepedia.parse("""
import itertools
name in {Beth, Charles, David, Frank, Jessica, Karen, Taylor}
novel in {Chair, Folly, Pigeons, Forget, Moon, Nile, Comedy}
rank in range(1, 7+1)
#1:
ages = {
Beth: variable(30, 60, 'Beth age'),
Charles: variable(30, 60, 'Charles age'),
David: variable(30, 60, 'David age'),
Frank: variable(30, 60, 'Frank age'),
Jessica: variable(30, 60, 'Jessica age'),
Karen: variable(30, 60, 'Karen age'),
Taylor: variable(30, 60, 'Taylor age'),
}
# No longer needed:
# Numberjack.AllDiff([v.value for v in ages.values()])
Beth.rank == sum(ages[Beth] >= a for a in ages.values())
Charles.rank == sum(ages[Charles] >= a for a in ages.values())
David.rank == sum(ages[David] >= a for a in ages.values())
Frank.rank == sum(ages[Frank] >= a for a in ages.values())
Jessica.rank == sum(ages[Jessica] >= a for a in ages.values())
Karen.rank == sum(ages[Karen] >= a for a in ages.values())
Taylor.rank == sum(ages[Taylor] >= a for a in ages.values())
#2: 4th oldest is 48. Satisfied below (see #7, #8).
# any(a == 48 for a in ages.values())
# sum(a > 48 for a in ages.values()) == 3
#3a: Original:
#abs(ages[Jessica] - ages[Taylor]) == 1
#def skip(a, b, x, y):
# return ((x is ages[a]) and (y is ages[b])) or ((x is ages[b]) and (y is ages[a]))
#
#for a, b in filter(lambda i: skip(Jessica, Taylor, *i) == False, itertools.combinations(ages.values(), 2)):
# abs(a - b) > 1
#ages[Jessica] <= 50
#ages[Taylor] <= 50
#3b: Simplified given #7, #8:
{Jessica, Taylor} == {2, 3}
#4a: (Original) Frank-Taylor are the widest apart, when sorted by age.
# all(Taylor[i - 1] or Taylor[i + 1] for i in range(2, 6+1) if Frank[i])
# if Frank == 1: Taylor == 2
# if Frank == 7: Taylor == 6
# This implies that any pair of names will be >= that distance.
# ft_diff = abs(ages[Frank] - ages[Taylor])
# Frank <-> Taylor <-> Jessica.
# any(abs(ages[Beth] - ages[a]) < ft_diff for a in [Charles, David, Frank, Jessica, Karen])
# any(abs(ages[Charles] - ages[a]) < ft_diff for a in [Beth, David, Frank, Jessica, Karen])
# any(abs(ages[David] - ages[a]) < ft_diff for a in [Beth, Charles, Frank, Jessica, Karen])
# any(abs(ages[Frank] - ages[a]) < ft_diff for a in [Beth, Charles, David, Karen])
# any(abs(ages[Jessica] - ages[a]) < ft_diff for a in [Beth, Charles, David, Karen])
# any(abs(ages[Karen] - ages[a]) < ft_diff for a in [Beth, Charles, David, Frank, Jessica])
# NB: Skip taylor.
#4b: Simplified given #7, #8:
{<NAME>} == {1, 2}
#5:
{Forget, Comedy} == {1, 2}
#6:
{Chair, Nile} == {6, 7}
#7: There exists a guest who 2/3rds the age of another, also: 3/4, 4/5.
#8: There exists a guest whos age has reversed digits of another.
#all_pairs = list(itertools.combinations(range(30, 60+1), 2))
#targets23 = list(filter(lambda x: x[0] * 3 == x[1] * 2, all_pairs))
#targets34 = list(filter(lambda x: x[0] * 4 == x[1] * 3, all_pairs))
#targets45 = list(filter(lambda x: x[0] * 5 == x[1] * 4, all_pairs))
# All pairs:
# [(30, 45), (32, 48), (34, 51), (36, 54), (38, 57), (40, 60)]
# [(30, 40), (33, 44), (36, 48), (39, 52), (42, 56), (45, 60)]
# [(32, 40), (36, 45), (40, 50), (44, 55), (48, 60)]
# [ (34, 43), (45, 54) ] (Rule #8)
# Observations:
# 1. The middle number is 48. Finding 3 pairs with 3 numbers over 48 is not
# possible unless one of the pairs contains 48.
# (This is also implied as 48 does not satisfy rule #8).
# 2. If 48 was the larger of the pair then there would not be enough remaining
# pairs with numbers larger than 48. 48 is the smaller of the pairs.
# > (48, 60) is one of the pairs.
# 3. With 48 and 60 consumed, these are remaining:
# [(30, 45), (34, 51), (36, 54), (38, 57) ]
# [(30, 40), (33, 44), (39, 52), (42, 56) ]
# [ (48, 60)]
# [ (34, 43), (45, 54) ] (Rule #8)
# 4. There needs to be a pair of numbers <50 per #3. Eliminates:
# 36/54 (no 35, 37)
# 30/40 (no 31, 39, 41)
# [(30, 45), (34, 51), (38, 57)]
# [(33, 44), (39, 52), (42, 56)]
# [ (48, 60)]
# [(xx, 43), (xx, 54) ] (Rule #8)
# 5. If the "reversed digits" was 54 then (30, 45) must be in play. Per #3 that
# gives: 54 + (30, 45) + (33, 44) and (48, 60).
# 30, 33, 44, 45, 48, 54, 60.
# > ...and 48 is not the 4th oldest.
# > Reversed digits are "43" which pairs with (34, 51).
# [ (34, 51) ]
# [(33, 44), (39, 52), (42, 56)]
# [ (48, 60)]
# [(xx, 43) ] (Rule #8)
# 6. Per #3 that requires (33, 44) or (42, 56).
# 43 + (34, 51) + (33, 44) + (48, 60): Violates #3.
# 43 + (34, 51) + (42, 56) + (48, 60): Okay.
# Sorted: 34, 42, 43, 48, 51, 56, 60.
gcc(ages.values(), {43, 34, 51, 42, 56, 48, 60})
Pigeons != 3
#9:
Beth.rank > Karen.rank
Charles.rank > Karen.rank
Karen.rank > Pigeons.rank
#10:
Taylor != Comedy
Karen != Folly
#11:
Beth.rank < Chair.rank
for k, v in ages.items():
print(k, v)
""")
# -
|
src/puzzle/examples/mim/p7_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
f_dir = "../../out/ZINC_sparse_LapPE_LN/results/"
# +
file = []
dataset = []
model = []
layer = []
params = []
acc_test = []
acc_train = []
convergence = []
total_time = []
epoch_time = []
for filename in os.listdir(f_dir):
if filename[-4:] == ".txt":
file.append( filename )
print(filename)
with open(os.path.join(f_dir, filename), "r") as f:
lines = f.readlines()
for line in lines:
#print('h1c',line)
if line[:9] == "Dataset: ":
dataset.append( line[9:-2] )
if line[:7] == "Model: ":
model.append( line[7:-1] )
if line[:17] == "net_params={'L': ":
layer.append( line[17:18] )
if line[:18] == "Total Parameters: ":
params.append( line[18:-1] )
if line[:10] == "TEST MAE: ":
acc_test.append( float(line[10:-1]) )
if line[:11] == "TRAIN MAE: ":
acc_train.append( float(line[11:-1]) )
if line[4:31] == "Convergence Time (Epochs): ":
convergence.append( float(line[31:-1]) )
if line[:18] == "Total Time Taken: ":
total_time.append( float(line[18:-4]) )
if line[:24] == 'Average Time Per Epoch: ':
epoch_time.append( float(line[24:-2]) )
# print('file',file)
# print('dataset',dataset)
# print('model',model)
# print('layer',layer)
# print('params',params)
# print('acc_test',acc_test)
# print('acc_train',acc_train)
# print('convergence',convergence)
# print('total_time',total_time)
# print('epoch_time',epoch_time)
list_datasets = ['ZINC', 'SBM_PATTERN','SBM_CLUSTER']
#print('list_datasets',list_datasets)
list_gnns = ['GraphTransformer']
#print('list_gnns',list_gnns)
#list_datasets = ['SBM_CLUSTER']
for data in list_datasets:
#print(data)
for gnn in list_gnns:
#print('gnn:',gnn)
acc_test_one_gnn = []
acc_train_one_gnn = []
convergence_one_gnn = []
total_time_one_gnn = []
epoch_time_one_gnn = []
nb_seeds = 0
for i in range(len(file)):
if data==dataset[i] and gnn==model[i]:
params_one_gnn = params[i]
acc_test_one_gnn.append(acc_test[i])
acc_train_one_gnn.append(acc_train[i])
convergence_one_gnn.append(convergence[i])
total_time_one_gnn.append(total_time[i])
epoch_time_one_gnn.append(epoch_time[i])
L = layer[i]
nb_seeds = nb_seeds + 1
if len(acc_test_one_gnn)>0:
latex_str = f"{data} & {nb_seeds} & {gnn} & {L} & {params_one_gnn} & {np.mean(acc_test_one_gnn):.3f}$\pm${np.std(acc_test_one_gnn):.3f} & {np.mean(acc_train_one_gnn):.3f}$\pm${np.std(acc_train_one_gnn):.3f} & {np.mean(convergence_one_gnn):.2f} & {np.mean(epoch_time_one_gnn):.2f}s/{np.mean(total_time_one_gnn):.2f}hr"
print("\nDataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n{}".format(latex_str,nb_seeds))
print("\n")
|
scripts/StatisticalResults/generate_statistics_molecules_graph_regression_ZINC.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8UMFqsCD0xyF" colab_type="code" colab={}
# Importing necessary packages
import pandas as pd
import numpy as np
import altair as alt
# + id="HSXgY0ze09cY" colab_type="code" colab={}
file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter03/bank-full.csv'
bankData = pd.read_csv(file_url, sep=";")
# + id="ecnqNxm0TZay" colab_type="code" outputId="515916d0-69e2-4b67-8da1-b11ebc7bb5d0" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1573003026166, "user_tz": -660, "elapsed": 8374, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Normalising data
from sklearn import preprocessing
x = bankData[['balance']].values.astype(float)
# Creating the scaling function
minmaxScaler = preprocessing.MinMaxScaler()
# Transforming the balance data by normalising it with minmaxScalre
bankData['balanceTran'] = minmaxScaler.fit_transform(x)
# Printing the head of the data
bankData.head()
# + id="oISJ1v9sTg_S" colab_type="code" colab={}
# Adding a small numerical constant to eliminate 0 values
bankData['balanceTran'] = bankData['balanceTran'] + 0.00001
# + id="GsDGKLQzTy9O" colab_type="code" outputId="d89e4bc9-09e9-4f54-d071-b2edc3784914" colab={"base_uri": "https://localhost:8080/", "height": 224} executionInfo={"status": "ok", "timestamp": 1573003030600, "user_tz": -660, "elapsed": 1190, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Let us transform values for loan data
bankData['loanTran'] = 1
# Giving a weight of 5 if there is no loan
bankData.loc[bankData['loan'] == 'no', 'loanTran'] = 5
bankData.head()
# + id="tPrwaWORT2wt" colab_type="code" outputId="3687fb10-b25c-4ad4-d818-8f0ba4b6965c" colab={"base_uri": "https://localhost:8080/", "height": 224} executionInfo={"status": "ok", "timestamp": 1573003040146, "user_tz": -660, "elapsed": 1415, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Let us transform values for Housing data
bankData['houseTran'] = 5
# Giving a weight of 1 if the customer has a house
bankData.loc[bankData['housing'] == 'no', 'houseTran'] = 1
bankData.head()
# + id="C3a2zadTUGLq" colab_type="code" outputId="ce60c876-3080-43c3-b3fa-8e44d17a123f" colab={"base_uri": "https://localhost:8080/", "height": 224} executionInfo={"status": "ok", "timestamp": 1573003043755, "user_tz": -660, "elapsed": 1023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Let us now create the new variable which is a product of all these
bankData['assetIndex'] = bankData['balanceTran'] * bankData['loanTran'] * bankData['houseTran']
bankData.head()
# + id="I2eFF9GLUSn0" colab_type="code" outputId="68c4ab47-ac24-4e42-9588-b6414d6826fd" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1573003063439, "user_tz": -660, "elapsed": 1199, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Finding the quantile
np.quantile(bankData['assetIndex'],[0.25,0.5,0.75])
# + id="dklJXEaNUbf7" colab_type="code" outputId="646550ac-4c22-494c-a792-fa8e5c7d9b97" colab={"base_uri": "https://localhost:8080/", "height": 224} executionInfo={"status": "ok", "timestamp": 1573003081578, "user_tz": -660, "elapsed": 1500, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Creating quantiles from the assetindex data
bankData['assetClass'] = 'Quant1'
bankData.loc[(bankData['assetIndex'] > 0.38) & (bankData['assetIndex'] < 0.57), 'assetClass'] = 'Quant2'
bankData.loc[(bankData['assetIndex'] > 0.57) & (bankData['assetIndex'] < 1.9), 'assetClass'] = 'Quant3'
bankData.loc[bankData['assetIndex'] > 1.9, 'assetClass'] = 'Quant4'
bankData.head()
# + id="JAt_inPjSo75" colab_type="code" colab={}
# Calculating total of each asset class
assetTot = bankData.groupby('assetClass')['y'].agg(assetTot='count').reset_index()
# Calculating the category wise counts
assetProp = bankData.groupby(['assetClass', 'y'])['y'].agg(assetCat='count').reset_index()
# + id="tTGT5nBGSzgP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="5a262e17-17ba-420e-ec7d-e9b470723f7f" executionInfo={"status": "ok", "timestamp": 1573003235697, "user_tz": -660, "elapsed": 1143, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Merging both the data frames
assetComb = pd.merge(assetProp, assetTot, on=['assetClass'])
assetComb['catProp'] = (assetComb.assetCat / assetComb.assetTot)*100
assetComb
# + id="iJG_qZRzlVc1" colab_type="code" colab={}
# Categorical variables, removing loan and housing
bankCat1 = pd.get_dummies(bankData[['job','marital','education','default','contact','month','poutcome']])
# + id="N6j-jur1ljLP" colab_type="code" outputId="567676f2-e3e5-48cf-8308-6a7276c09051" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1573003254182, "user_tz": -660, "elapsed": 1007, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
bankNum1 = bankData[['age','day','duration','campaign','pdays','previous','assetIndex']]
bankNum1.head()
# + id="rhbHeJ3PlyiM" colab_type="code" colab={}
# Normalise some of the numerical variables
from sklearn import preprocessing
# + id="zG-wd9qgl4xW" colab_type="code" colab={}
# Creating the scaling function
minmaxScaler = preprocessing.MinMaxScaler()
# + id="3wDY6fPfmAki" colab_type="code" colab={}
# Creating the transformation variables
ageT1 = bankNum1[['age']].values.astype(float)
dayT1 = bankNum1[['day']].values.astype(float)
durT1 = bankNum1[['duration']].values.astype(float)
# + id="5MT1CP24mDBx" colab_type="code" outputId="628d9b83-87b0-476a-9331-6a09ed5eb071" colab={"base_uri": "https://localhost:8080/", "height": 323} executionInfo={"status": "ok", "timestamp": 1573003270320, "user_tz": -660, "elapsed": 1121, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Transforming the balance data by normalising it with minmaxScalre
bankNum1['ageTran'] = minmaxScaler.fit_transform(ageT1)
bankNum1['dayTran'] = minmaxScaler.fit_transform(dayT1)
bankNum1['durTran'] = minmaxScaler.fit_transform(durT1)
# + id="L2LfnZDWmNJ3" colab_type="code" outputId="6e72f0da-1dfc-4376-c9e5-c4a261e265f6" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1573003276269, "user_tz": -660, "elapsed": 1345, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Let us create a new numerical variable by selecting the transformed variables
bankNum2 = bankNum1[['ageTran','dayTran','durTran','campaign','pdays','previous','assetIndex']]
# Printing the head of the data
bankNum2.head()
# + id="hD6suIpqmYTd" colab_type="code" outputId="58fc69cb-ff8b-483e-8286-5b3f863a7f2f" colab={"base_uri": "https://localhost:8080/", "height": 275} executionInfo={"status": "ok", "timestamp": 1573003278578, "user_tz": -660, "elapsed": 1430, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Preparing the X variables
X = pd.concat([bankCat1, bankNum2], axis=1)
print(X.shape)
# Preparing the Y variable
Y = bankData['y']
print(Y.shape)
X.head()
# + id="4c7Jyxymmiaj" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
# Splitting the data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=123)
# + id="EVSGc5Lom2vj" colab_type="code" outputId="28442c3a-4832-4df5-f84d-3d5ef637ed02" colab={"base_uri": "https://localhost:8080/", "height": 156} executionInfo={"status": "ok", "timestamp": 1573003281602, "user_tz": -660, "elapsed": 1265, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
from sklearn.linear_model import LogisticRegression
# Defining the LogisticRegression function
bankModel = LogisticRegression()
bankModel.fit(X_train, y_train)
# + id="GhFXSRnknr3p" colab_type="code" outputId="e88952b2-01bc-497d-be39-8c2ad85f00bc" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1573003282951, "user_tz": -660, "elapsed": 838, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
pred = bankModel.predict(X_test)
print('Accuracy of Logisticr regression model prediction on test set: {:.2f}'.format(bankModel.score(X_test, y_test)))
# + id="DVBvXZJknuJ3" colab_type="code" outputId="d865240b-fdbc-4526-bd3f-01b9e42792de" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1573003284105, "user_tz": -660, "elapsed": 1127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
# Confusion Matrix for the model
from sklearn.metrics import confusion_matrix
confusionMatrix = confusion_matrix(y_test, pred)
print(confusionMatrix)
# + id="BqcxgdLun0HY" colab_type="code" outputId="2956690b-bee6-42db-826b-670f4c47d5ae" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1573003287122, "user_tz": -660, "elapsed": 1461, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCYY-iGjUIqBSnlLoszfZTN7rU7FRNg05Rdt9Ii3A=s64", "userId": "11809607246124237079"}}
from sklearn.metrics import classification_report
print(classification_report(y_test, pred))
|
Chapter03/Activity3.02/Activity3_02.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit
# name: python3
# ---
# # [Codewars](https://www.codewars.com/kata/5545f109004975ea66000086/train/python)
#
# - Refactoring
# # Is n divisible by x and y?
#
# > <br>
# >
# > Create a function that checks if a number `n` is divisible by two numbers: <br>
# >
# > `x` **AND** `y`.
# >
# >All inputs are positive, non-zero digits.
# >
# > Examples:
# >
# > 1) n = 3, x = 1, y = 3 ===> `true` because 3 is divisible by 1 and 3<br>
# > <br>
# > 2) n = 12, x = 2, y = 6 ===> `true` because 12 is divisible by 2 and 6<br>
# ><br>
# > 3) n = 100, x = 5, y = 3 ===> `false` because 100 is not divisible by 3<br>
# ><br>
# > 4) n = 12, x = 7, y = 5 ===> `false` because 12 is neither divisible by 7 nor 5 <br>
# > <br>
# >
# +
# Tests
import codewars_test as test
from solution import is_divisible
@test.describe("Sample Tests")
def basic_tests():
@test.it('Basic Test Cases')
def basic_test_cases():
test.assert_equals(is_divisible(3,2,2),False)
test.assert_equals(is_divisible(3,3,4),False)
test.assert_equals(is_divisible(12,3,4),True)
test.assert_equals(is_divisible(8,3,4),False)
# -
# # My Solution
#
# ### My Understanding
#
# I will be given three numbers. The first one is `n`, I need to check if `n` can be divided by the next two numbers given, `x` and `y`. The result needs to be a boolean - `True` or `False`.<br> I believe I can use Mod % here.<br> If `n` has a remainder of 0 for both `x` and `y`, then it is divisible by `x` and `y` and the result should be `True`.<br>
# <br>
# - [x] Create an `if` statement
# - [x] Add rule for `x` % to equal 0
# - [x] Add rule for `y` % to equal 0
# - [x] Return a Boolean
def is_divisible(n,x,y):
# If statement to check divisibility
## Check to see if the remainder of each equal zero
if n %x == 0 and n %y == 0:
return True
return False
is_divisible(3,2,2)
is_divisible(3,3,4)
is_divisible(12,3,4)
is_divisible(8,3,4)
# # Other Solutions
def is_divisible(n,x,y):
return n % x == 0 and n % y == 0
def is_divisible(n,x,y):
return n % x + n % y == 0
is_divisible = lambda n,x,y: not (n%x or n%y)
def is_divisible(n,x,y):
return (n % x == 0) & (n % y == 0)
|
2021/October/Tuesday-Oct-12-Divisible-x-y.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Saving and Loading Models
#
# In this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import fc_model
# +
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download and load the training data
trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
# -
# Here we can see one of the images.
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
# # Train a network
#
# To make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.
# Create the network, define the criterion and optimizer
model = fc_model.Network(784, 10, [512, 256, 128])
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)
# ## Saving and loading networks
#
# As you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.
#
# The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.
print("Our model: \n\n", model, '\n')
print("The state dict keys: \n\n", model.state_dict().keys())
# The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.
torch.save(model.state_dict(), 'checkpoint.pth')
# Then we can load the state dict with `torch.load`.
state_dict = torch.load('checkpoint.pth')
print(state_dict.keys())
# And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.
model.load_state_dict(state_dict)
# Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.
# Try this
model = fc_model.Network(784, 10, [400, 200, 100])
# This will throw an error because the tensor sizes are wrong!
model.load_state_dict(state_dict)
# This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.
# +
checkpoint = {'input_size': 784,
'output_size': 10,
'hidden_layers': [each.out_features for each in model.hidden_layers],
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
# -
# Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints.
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = fc_model.Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'])
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
print(model)
|
Part 6 - Saving and Loading Models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problema de selección de portafolio con preferencias media-varianza
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTuJvMFl4gvu5hxxkiWSANNVUAYO-oVpCJCivsl6Vllt6m6J9qUNw" width="400px" height="400px" />
#
# En la clase pasada hablamos acerca de:
# - preferencias,
# - funciones de utilidad,
# - la actitud de los inversionistas de cara al riesgo,
# - la aversión al riesgo, entre otros.
#
# Todas ellas son piezas que necesitamos para responder la pregunta de ¿cómo un inversionista toma la decisión óptima de selección de portafolio?
#
# En esta clase al fin estamos listos para ensamblar estos conceptos y escribir el problema de selección de portafolios.
#
# En el camino aprenderemos acerca del concepto de **utilidad esperada**, que nos permite trabajar con incertidumbre en el modelado económico (una de las ideas más importantes en economía). Esta idea tiene más de 60 años, y básicamente dice que los individuos, cuando están de cara a incertidumbre, maximizan el valor esperado de su utilidad (solo cierto si somos homo economicus).
#
# Además del concepto de utilidad esperada, aprenderemos acerca de **preferencias media-varianza**. Es decir, supondremos que los inversionistas toman decisiones basados en un tipo particular de preferencias.
#
# Con lo anterior, estableceremos el problema de selección de portafolios.
#
# **Objetivos:**
#
# - ¿Qué es utilidad esperada?
# - ¿Qué son las preferencias media-varianza?
# - Funciones de utilidad media-varianza.
# - Enunciado y solución del problema básico de selección de portafolio.
#
# *Referencia:*
# - Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.
# ___
# ## 1. Utilidad esperada
# - Básicamente combina las probabilidades de los resultados con cómo los inversionistas se sienten con dichos resultados.
# - En otras palabras, la utilidad esperada multiplica la probabilidad de suceso de un evento con la utilidad que genera dicho evento.
#
# Recordemos que las *funciones de utilidad* permiten a los inversionistas expresar cómo se sienten con los resultados, especialmente en los malos ratos.
#
# Entonces la *utilidad esperada* es una herramienta que nos permite cuantificar cómo nos sentimos en nuestros malos momentos económicos, capturando el riesgo con la probabilidad de ocurrencia de dichos malos momentos.
# Dado este marco de trabajo, cualquier decisión se puede escribir como la maximización de la utilidad esperada:
# \begin{align}
# \max_{\theta} & \quad E[U(W)], \\
# \end{align}
# mediante la escogencia de cierta variable $\theta$ (gastos, planes de ahorro, compra de activos, planes de producción, etc.).
#
# Para nuestros propósitos, la variable de decisión serán los pesos o ponderaciones del portafolio.
#
# Adicionalmente, en el contexto de la decisión de distribución de la riqueza entre activos, el problema de maximización tendrá comúnmente las siguientes restricciones:
# - universo de inversión,
# - posición en los activos dados.
# **Ejemplo.**
#
# Supongamos que un inversionista debe determinar la composición óptima de su portafolio, que contiene activos y bonos. Supongamos que son los únicos instrumentos disponibles.
#
# Sean:
# - $w_s$: peso o ponderación de activos en el portafolio,
# - $w_b$: peso o ponderación de bonos en el portafolio,
# - $r_s$: rendimiento de los activos, y
# - $r_b$: rendimiento de los bonos.
#
# De manera que podemos escribir el problema de selección de portafolios como la maximización de la utilidad esperade de nuestra riqueza futura, la cual dependerá de nuestros rendimientos:
#
# \begin{align}
# \max_{w_s,w_b} &\quad E[U(W)]\\
# \text{s. a.} &\quad W=W_0(1+w_sr_s+w_br_b)\\
# &\quad w_s+w_b=1
# \end{align}
#
#
# Preguntas:
# - ¿Qué significan las restricciones?
# - Ya que tenemos planteado este problema básico, ¿qué haría falta para empezar a resolverlo?
# ___
# ## 2. Preferencias media-varianza
#
# ### 2.1. Utilidad media-varianza
#
# Entonces, ¿qué funciones de utilidad deberíamos de usar en este problema de selección de portafolios?
#
# - La respuesta es: **preferencias media-varianza**.
# - Éstas serán representadas en términos de funciones de utilidad como: **utilidad media-varianza**.
#
# Usamos la *utilidad media-varianza* en el problema de selección de portafolios dado que ésta decribe el "trade-off" entre riesgo y rendimiento que enfrentan los inversionistas. La *utilidad media-varianza* está dada por la siguiente expresión:
#
# $$U=E[r_p]-\frac{1}{2}\gamma\sigma_p^2,$$
#
# donde
# - $E[r_p]$ es el rendimiento esperado del portafolio,
# - $\sigma_p^2$ es la varianza del portafolio, y
# - $\gamma$ es el coeficiente de aversión al riesgo.
# #### Intuición acerca de la función de utilidad media-varianza:
# - Sólo se preocupa por medias :) y varianzas :(.
# - Incrementa con: rendimiento esperado del portafolio.
# - Decrece con: varianza del portafolio.
# - Malos tiempos: rendimientos son bajos y las volatilidades son altas.
# - Conecta bastante bien con la teoría moderna de portafolios, la cual caracteriza los rendimientos con medias y varianzas únicamente.
# - Criticada por su limitación: supone que los inversionistas sólo se preocupan por medias y varianzas.
# ### 2.2. Curvas de indiferencia
#
# *¿Recuerdan las curvas de nivel que se ven en cálculo de varias variables?*
# - Bien, acá nos servirán para representar la utilidad media-varianza gráficamente.
# - En el contexto de utilidad media-varianza, las curvas de nivel se llaman **curvas de indiferencia**.
#
# Dados ciertos niveles de utilidad $U_1>U_2>U_3$, las curvas de indiferencia relativas a estos niveles de utilidad, son los lugares geométricos en el espacio de rendimiento esperado vs. volatilidad representados por las siguientes expresiones
#
# $$U_1=E[r_p]-\frac{1}{2}\gamma\sigma_p^2\Rightarrow E[r_p]=\frac{1}{2}\gamma\sigma_p^2+U_1,$$
#
# $$U_2=E[r_p]-\frac{1}{2}\gamma\sigma_p^2\Rightarrow E[r_p]=\frac{1}{2}\gamma\sigma_p^2+U_2,$$
#
# $$U_3=E[r_p]-\frac{1}{2}\gamma\sigma_p^2\Rightarrow E[r_p]=\frac{1}{2}\gamma\sigma_p^2+U_3.$$
# **Gráficamente**
# Importar numpy y pyplot
# +
# Coeficiente de aversión al riesgo (entre 1 y 10 comúnmente)
# Niveles de utilidad
# Vector de volatilidades (sugerido 1%-60%)
# Curvas de indiferencia
# -
# Gráfica
# Bueno, ¿y porqué se llaman curvas de indiferencia?, ¿qué representa una cuva de indiferencia?
#
# - Porque sobre una misma curva el nivel de utilidad es el mismo (es indiferente).
# - Son todas las combinaciones de riesgo y rendimiento que producen un mismo nivel de utilidad.
# Volviendo al problema de selección de portafolios, queremos la utilidad más alta.
# - ¿Cuál de las anteriores curvas de indiferencia corresponde a la utilidad más alta?
# - Intuitivamente, ¿porqué?
# - Curvas de indiferencia para niveles de utilidad más altos, estarán...
#
# Notamos además que las anteriores curvas de indiferencia son *paralelas* una con otra. Claro, las dibujamos con el mismo coeficiente de aversión al riesgo.
#
# ¿Cómo cambian estas curvas para coeficientes de aversión al riesgo más altos?
# +
# Coeficientes de aversión al riesgo (entre 1 y 10 comúnmente)
# Nivel de utilidad
# Vector de volatilidades (sugerido 1%-60%)
# Curvas de indiferencia
# -
# Gráfica
# ¿Cómo interpretamos las anteriores gráficas?, ¿qué pasa con las personas más aversas al riesgo?
# - Se puede ver de dos maneras: para un mismo nivel de rendimiento esperado, una persona más aversa al riesgo soporta un nivel menor de riesgo; equivalentemente, para un mismo nivel de riesgo, una persona más aversa al riesgo requerirá un nivel de rendimiento esperado más alto.
# Con todo lo anterior, el problema de selección de portafolios se puede plantear como *encontrar la curva de indeferencia más alta dado el conjunto de oportunidades de inversión y restricciones*.
# ## 3. Problema de selección de portafolios: una ilustración
#
# Ahora ilustraremos el problema de selección de portafolios con algunos datos.
# - Por ahora solo queremos ilustrar gráficamente cómo se resuelve este problema. Trabajar en la intuición.
# - En las siguientes dos clases nos enfocaremos en cómo resolverlo analíticamente.
#
# Acá tenemos el rendimiento medio anual y la volatilidad para dos instrumentos usando datos de EU: instrumentos de deuda (bonos) y acciones. Supondremos que el inversionista solo puede invertir en estas dos clases de instrumentos.
# Importamos pandas
import pandas as pd
# Datos
data = pd.DataFrame(index=['Stocks','Bonds', 'CorrSB'], columns=['Mean', 'Std'])
data['Mean'] = [0.119, 0.0591, 0.113]
data['Std'] = [0.1915, 0.0833, None]
data
# Entonces, ¿cuál es la distribución de riqueza óptima?, o más bien, ¿cuál es la composición óptima del portafolio para un inversionista dado su nivel de aversión al riesgo?
#
# **Primero.** Recordamos que, para dos activos, podemos trazar la frontera de mínima varianza tomando todas las posibles combinaciones de los dos activos.
#
# De nuevo, sean:
# - $w_s=w$: peso o ponderación de activos en el portafolio,
# - $w_b=1-w$: peso o ponderación de bonos en el portafolio,
# - $r_s$: rendimiento de los activos, y
# - $r_b$: rendimiento de los bonos.
#
# Entonces
#
# $$E[r_p]=wE[r_{s}]+(1-w)E[r_b]$$
#
# $$\sigma_p^2=w^2\sigma_{s}^2+(1-w)^2\sigma_b^2+2w(1-w)\rho_{s,b}\sigma_s\sigma_b$$
# +
# Vector de w variando entre 0 y 1 con n pasos
# Rendimientos esperados individuales
# Volatilidades individuales
# Correlacion
# -
# Crear un DataFrame cuyas columnas sean rendimiento
# y volatilidad del portafolio para cada una de las w
# generadas
# Gráfica
# **Segundo.** Graficamos en la misma ventana, curvas de indiferencia.
# +
# Niveles de utilidad
# Coeficiente de aversión al riesgo
# Curvas de indiferencia
# -
# Gráfica
# **Tercero.** La elección óptima está dada por la curva de indiferencia para el nivel de utilidad más alto que es tangente a la frontera media-varianza.
# - Claramente, esta selección depende del coeficiente de aversión al riesgo.
# Gráfica con zoom
# # Anuncios parroquiales
#
# ## 1. Quiz la siguiente clase.
# ## 2. Un par de artículos del WSJ y el NYT que discuten herramientas disponibles para la medición de su propia tolerancia al riesgo:
# - [Artículo 1](https://www.nytimes.com/2016/02/13/your-money/as-stocks-fall-its-time-to-measure-your-risk-tolerance.html)
# - [Artículo 2](https://www.wsj.com/articles/check-your-tolerance-for-investment-risk-now-before-markets-sag-1405619939)
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
|
Modulo3/Clase11_ProblemaSeleccionPortafolio.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="636929bc-001" colab_type="text"
# #1. Install Dependencies
# First install the libraries needed to execute recipes, this only needs to be done once, then click play.
#
# + id="636929bc-002" colab_type="code"
# !pip install git+https://github.com/google/starthinker
# + [markdown] id="636929bc-003" colab_type="text"
# #2. Get Cloud Project ID
# To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
#
# + id="636929bc-004" colab_type="code"
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
# + [markdown] id="636929bc-005" colab_type="text"
# #3. Get Client Credentials
# To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
#
# + id="636929bc-006" colab_type="code"
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
# + [markdown] id="636929bc-007" colab_type="text"
# #4. Enter Storage Bucket Parameters
# Create and permission a bucket in Storage.
# 1. Specify the name of the bucket and who will have owner permissions.
# 1. Existing buckets are preserved.
# 1. Adding a permission to the list will update the permissions but removing them will not.
# 1. You have to manualy remove grants.
# Modify the values below for your use case, can be done multiple times, then click play.
#
# + id="636929bc-008" colab_type="code"
FIELDS = {
'auth_write': 'service', # Credentials used for writing data.
'bucket_bucket': '', # Name of Google Cloud Bucket to create.
'bucket_emails': '', # Comma separated emails.
'bucket_groups': '', # Comma separated groups.
}
print("Parameters Set To: %s" % FIELDS)
# + [markdown] id="636929bc-009" colab_type="text"
# #5. Execute Storage Bucket
# This does NOT need to be modified unless you are changing the recipe, click play.
#
# + id="636929bc-010" colab_type="code"
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'bucket': {
'auth': 'user',
'bucket': {'field': {'name': 'bucket_bucket','kind': 'string','order': 2,'default': '','description': 'Name of Google Cloud Bucket to create.'}},
'emails': {'field': {'name': 'bucket_emails','kind': 'string_list','order': 3,'default': '','description': 'Comma separated emails.'}},
'groups': {'field': {'name': 'bucket_groups','kind': 'string_list','order': 4,'default': '','description': 'Comma separated groups.'}}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
|
colabs/bucket.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="d7aa1b0ad6979877450f9cd89e1e37289b51cf6e"
# # **Movies Recommender System**
# + [markdown] _uuid="f34fc4dcfba717692c620e1fdfa502ee910c6365"
# Here , we are building Movie Recommendation System using [TMDB 5000 Movie Dataset](https://www.kaggle.com/tmdb/tmdb-movie-metadata).
# + [markdown] _uuid="f9a9405b7e81c1da449bd2e96c2849fb86caa614"
# There are 2 types of recommender systems are implemented in this notebook:-
#
# > * **Demographic Filtering**- They offer generalized recommendations to every user, based on movie popularity and/or genre. The System recommends the same movies to users with similar demographic features. Since each user is different , this approach is considered to be too simple. The basic idea behind this system is that movies that are more popular and critically acclaimed will have a higher probability of being liked by the average audience.Weighted Average is used to make this simple recommender .
#
#
# + [markdown] _uuid="60a2df15abf82ba21918e3a42cb0ee46d22fa764"
# > * **Content Based Filtering**- They suggest similar items based on a particular item. This system uses item metadata, such as genre, director, description, actors, etc. for movies, to make these recommendations. The general idea behind these recommender systems is that if a person liked a particular item, he or she will also like an item that is similar to it.
# + [markdown] _uuid="6b418588e3f9139f74cb3a9546f5dca49729579b"
# Loading the Data
# + _uuid="c1fdd129c1cbab68ae3e6bf2062575f01f80b87c"
import pandas as pd
import numpy as np
df1=pd.read_csv('tmdb_5000_credits.csv')
df2=pd.read_csv('tmdb_5000_movies.csv')
# + [markdown] _uuid="402a28d17c13bba3f2060d72c2ff75f5377a9f01"
# The first dataset contains the following features:-
#
# * movie_id - A unique identifier for each movie.
# * cast - The name of lead and supporting actors.
# * crew - The name of Director, Editor, Composer, Writer etc.
#
# The second dataset has the following features:-
#
# * budget - The budget in which the movie was made.
# * genre - The genre of the movie, Action, Comedy ,Thriller etc.
# * homepage - A link to the homepage of the movie.
# * id - This is infact the movie_id as in the first dataset.
# * keywords - The keywords or tags related to the movie.
# * original_language - The language in which the movie was made.
# * original_title - The title of the movie before translation or adaptation.
# * overview - A brief description of the movie.
# * popularity - A numeric quantity specifying the movie popularity.
# * production_companies - The production house of the movie.
# * production_countries - The country in which it was produced.
# * release_date - The date on which it was released.
# * revenue - The worldwide revenue generated by the movie.
# * runtime - The running time of the movie in minutes.
# * status - "Released" or "Rumored".
# * tagline - Movie's tagline.
# * title - Title of the movie.
# * vote_average - average ratings the movie recieved.
# * vote_count - the count of votes recieved.
#
# Let's join the two dataset on the 'id' column
#
# + _uuid="c87bda9d56a936be126d03eda0bc743ee35be461"
df1.columns = ['id','tittle','cast','crew']
df2= df2.merge(df1,on='id')
# + _uuid="71d266ed92947c51acf07189d3b42379134ef6e7"
df2.head(5)
# + [markdown] _uuid="ee603279675033fc397f0c94738e20b34f35312b"
# # **Demographic Filtering** -
#
# We can use the average ratings of the movie as the score but using this won't be fair enough since a movie with 8.9 average rating and only 3 votes cannot be considered better than the movie with 7.8 as as average rating but 40 votes.
# So, I'll be using IMDB's weighted rating (wr) which is given as :-
#
# 
# where,
# * v is the number of votes for the movie.
# * m is the minimum votes required to be listed in the chart.
# * R is the average rating of the movie.
# * C is the mean vote across the whole report.
#
# We already have v(**vote_count**) and R (**vote_average**) and C can be calculated as
# + _uuid="5799b99c5e5ed5b7723ae8b31e1fc9fb1e7b89ec"
C= df2['vote_average'].mean()
C
# + [markdown] _uuid="02fa8642f75bcef6d4a79e029af6dfeebf19e3a5"
# So, the mean rating for all the movies is approx 6 on a scale of 10.The next step is to determine an appropriate value for m, the minimum votes required to be listed in the chart. We will use 90th percentile as our cutoff. In other words, for a movie to feature in the charts, it must have more votes than at least 90% of the movies in the list.
# + _uuid="f2f1eaff1e4349b5d2d11dd5ce79c19a85561148"
m= df2['vote_count'].quantile(0.9)
m
# + [markdown] _uuid="b77dea5a38ca2c399e3abeac1487e784fe146078"
# Now, we can filter out the movies that qualify for the chart
# + _uuid="a22008df6d81d3b716d39a56efd3d547345bfbce"
q_movies = df2.copy().loc[df2['vote_count'] >= m]
q_movies.shape
# + [markdown] _uuid="cf1fe5a3692caee41a6e7a74c3fde6aeb8a62947"
# We see that there are 481 movies which qualify to be in this list. Now, we need to calculate our metric for each qualified movie. To do this, we will define a function, **weighted_rating()** and define a new feature **score**, of which we'll calculate the value by applying this function to our DataFrame of qualified movies:
# + _uuid="bb680ed0fb1c3020785d34152c57c6e2279d4424"
def weighted_rating(x, m=m, C=C):
v = x['vote_count']
R = x['vote_average']
# Calculation based on the IMDB formula
return (v/(v+m) * R) + (m/(m+v) * C)
# + _uuid="d2d189929715237ab19a18fb8747239b86092968"
# Define a new feature 'score' and calculate its value with `weighted_rating()`
q_movies['score'] = q_movies.apply(weighted_rating, axis=1)
# + [markdown] _uuid="4cdd60c146173606146ec4fc3a1c9d8c184cb81c"
# Finally, let's sort the DataFrame based on the score feature and output the title, vote count, vote average and weighted rating or score of the top 10 movies.
# + _uuid="a9a9fc3810ea67c31908bbdf8bb930daa918102b"
#Sort movies based on score calculated above
q_movies = q_movies.sort_values('score', ascending=False)
#Print the top 15 movies
q_movies[['title', 'vote_count', 'vote_average', 'score']].head(10)
# + [markdown] _uuid="fe716df6e5e5a354ac53d556087147c0a64df2cc"
# # **Content Based Filtering**
# In this recommender system the content of the movie (overview, cast, crew, keyword, tagline etc) is used to find its similarity with other movies. Then the movies that are most likely to be similar are recommended.
# + [markdown] _uuid="b0a813c803b0ba1f0204188ab2a63dc7f59ce2eb"
# ## **Plot description based Recommender**
#
# We will compute pairwise similarity scores for all movies based on their plot descriptions and recommend movies based on that similarity score. The plot description is given in the **overview** feature of our dataset.
# + _uuid="5e676c38ace04a24205b76b16dac0fa3e058027f"
df2['overview'].head(5)
# + [markdown] _uuid="277a9bb5b00a6bd2469c45777f9c659066f402b3"
# Now we'll compute Term Frequency-Inverse Document Frequency (TF-IDF) vectors for each overview.
#
# term frequency , it is the relative frequency of a word in a document and is given as
# **(term instances/total instances)**.
# Inverse Document Frequency is the relative count of documents containing the term is given as
# **log(number of documents/documents with term)**
# The overall importance of each word to the documents in which they appear is equal to **TF * IDF**
#
# This will give a matrix where each column represents a word in the overview vocabulary (all the words that appear in at least one document) and each row represents a movie, as before.This is done to reduce the importance of words that occur frequently in plot overviews and therefore, their significance in computing the final similarity score.
# + _uuid="a92da8cde39c61deef5a1b8efa31ed84cda7f5fe"
#Import TfIdfVectorizer from scikit-learn
from sklearn.feature_extraction.text import TfidfVectorizer
#Define a TF-IDF Vectorizer Object. Remove all english stop words such as 'the', 'a'
tfidf = TfidfVectorizer(stop_words='english')
#Replace NaN with an empty string
df2['overview'] = df2['overview'].fillna('')
#Construct the required TF-IDF matrix by fitting and transforming the data
tfidf_matrix = tfidf.fit_transform(df2['overview'])
#Output the shape of tfidf_matrix
tfidf_matrix.shape
# + [markdown] _uuid="6bde57434bf9a0e8f8b229d36901d75b77ff962f"
# We see that over 20,000 different words were used to describe the 4800 movies in our dataset.
#
# With this matrix in hand, we can now compute a similarity score. There are several candidates for this; such as the euclidean, the Pearson and the [cosine similarity scores](https://en.wikipedia.org/wiki/Cosine_similarity). There is no right answer to which score is the best. Different scores work well in different scenarios and it is often a good idea to experiment with different metrics.
#
# We will be using the cosine similarity to calculate a numeric quantity that denotes the similarity between two movies. We use the cosine similarity score since it is independent of magnitude and is relatively easy and fast to calculate. Mathematically, it is defined as follows:
# 
# + [markdown] _uuid="c1c138ae64648cb5a94127a06441d88dddc2bd9a"
# Since we have used the TF-IDF vectorizer, calculating the dot product will directly give us the cosine similarity score. Therefore, we will use sklearn's **linear_kernel()** instead of cosine_similarities() since it is faster.
# + _uuid="5eb17d12220eecab4faf01bbfd13e79d8e446537"
# Import linear_kernel
from sklearn.metrics.pairwise import linear_kernel
# Compute the cosine similarity matrix
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
# + [markdown] _uuid="f5ca112fbbe25b11f0f3356a31d1604727242700"
# We are going to define a function that takes in a movie title as an input and outputs a list of the 10 most similar movies. Firstly, for this, we need a reverse mapping of movie titles and DataFrame indices. In other words, we need a mechanism to identify the index of a movie in our metadata DataFrame, given its title.
# + _uuid="55df2df36be98e6dec5f617a5aa51b77c500faa4"
#Construct a reverse map of indices and movie titles
indices = pd.Series(df2.index, index=df2['title']).drop_duplicates()
# + [markdown] _uuid="da5896c6ccfd44c3347af3097275d0aa707c1001"
# We are now in a good position to define our recommendation function. These are the following steps we'll follow :-
# * Get the index of the movie given its title.
# * Get the list of cosine similarity scores for that particular movie with all movies. Convert it into a list of tuples where the first element is its position and the second is the similarity score.
# * Sort the aforementioned list of tuples based on the similarity scores; that is, the second element.
# * Get the top 10 elements of this list. Ignore the first element as it refers to self (the movie most similar to a particular movie is the movie itself).
# * Return the titles corresponding to the indices of the top elements.
# + _uuid="9c383fcbb916dce464b01adf980d26ad96aebe0e"
# Function that takes in movie title as input and outputs most similar movies
def get_recommendations(title, cosine_sim=cosine_sim):
# Get the index of the movie that matches the title
idx = indices[title]
# Get the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 most similar movies
return df2['title'].iloc[movie_indices]
# + _uuid="14d722124f82e69cb444adcc589e396c75cbb4ff"
get_recommendations('The Dark Knight Rises')
# + _uuid="902b9f1ab91921889c85e9008818dcc0b4710ccd"
get_recommendations('The Avengers')
# + [markdown] _uuid="146302c25776b2c0076e64663a5e2e41e977fd2c"
# While our system has done a decent job of finding movies with similar plot descriptions, the quality of recommendations is not that great. "The Dark Knight Rises" returns all Batman movies while it is more likely that the people who liked that movie are more inclined to enjoy other Christopher Nolan movies. This is something that cannot be captured by the present system.
# + [markdown] _uuid="fcfe9db9c2fdd9334538256d233c6acf33c1c049"
# ## **Credits, Genres and Keywords Based Recommender**
# We are going to build a recommender based on the following metadata: the 3 top actors, the director, related genres and the movie plot keywords.
#
# From the cast, crew and keywords features, we need to extract the three most important actors, the director and the keywords associated with that movie. Right now, our data is present in the form of "stringified" lists , we need to convert it into a safe and usable structure
# + _uuid="59a8d0991e3cae9a44a4b351e154fd1000724448"
# Parse the stringified features into their corresponding python objects
from ast import literal_eval
features = ['cast', 'crew', 'keywords', 'genres']
for feature in features:
df2[feature] = df2[feature].apply(literal_eval)
# + [markdown] _uuid="47d6062d1622a163f2bcf80b79eb7b1454003739"
# Next, we'll write functions that will help us to extract the required information from each feature.
# + _uuid="783b0e89f1c04a12ff51eb29cc68e93c818896cd"
# Get the director's name from the crew feature. If director is not listed, return NaN
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
# + _uuid="86c4e9f4e6ef1e5ff287f58f3a1119fbddbdae09"
# Returns the list top 3 elements or entire list; whichever is more.
def get_list(x):
if isinstance(x, list):
names = [i['name'] for i in x]
#Check if more than 3 elements exist. If yes, return only first three. If no, return entire list.
if len(names) > 3:
names = names[:3]
return names
#Return empty list in case of missing/malformed data
return []
# + _uuid="dd060c3c1d724de71555218f30cccafd4a8ad6af"
# Define new director, cast, genres and keywords features that are in a suitable form.
df2['director'] = df2['crew'].apply(get_director)
features = ['cast', 'keywords', 'genres']
for feature in features:
df2[feature] = df2[feature].apply(get_list)
# + _uuid="87a96f835470aa3df590b74322c2717ff529d6ae"
# Print the new features of the first 3 films
df2[['title', 'cast', 'director', 'keywords', 'genres']].head(3)
# + [markdown] _uuid="0bcb2c2e99ffd9ce73205c9c6ef6687d16caa31f"
# The next step would be to convert the names and keyword instances into lowercase and strip all the spaces between them. This is done so that our vectorizer doesn't count the Johnny of "<NAME>" and "<NAME>" as the same.
# + _uuid="86af764c406a8b6184b37b57cfe499d20ce45f9c"
# Function to convert all strings to lower case and strip names of spaces
def clean_data(x):
if isinstance(x, list):
return [str.lower(i.replace(" ", "")) for i in x]
else:
#Check if director exists. If not, return empty string
if isinstance(x, str):
return str.lower(x.replace(" ", ""))
else:
return ''
# + _uuid="5728cc017ff6ed1dcd79da05b1dd57a60557e853"
# Apply clean_data function to your features.
features = ['cast', 'keywords', 'director', 'genres']
for feature in features:
df2[feature] = df2[feature].apply(clean_data)
# + [markdown] _uuid="b6b3e1c480a7c280fbe81e63c5c4cf3ce308dc28"
# We are now in a position to create our "metadata soup", which is a string that contains all the metadata that we want to feed to our vectorizer (namely actors, director and keywords).
# + _uuid="20aef87703c408926f7617573ed043605207767f"
def create_soup(x):
return ' '.join(x['keywords']) + ' ' + ' '.join(x['cast']) + ' ' + x['director'] + ' ' + ' '.join(x['genres'])
df2['soup'] = df2.apply(create_soup, axis=1)
# + [markdown] _uuid="7b79886883806b8fb58098f9f803dabeaa0cadf6"
# The next steps are the same as what we did with our plot description based recommender. One important difference is that we use the **CountVectorizer()** instead of TF-IDF. This is because we do not want to down-weight the presence of an actor/director if he or she has acted or directed in relatively more movies. It doesn't make much intuitive sense.
# + _uuid="b66a1afc1083917d5ef136ccdcd9b50cca087e2b"
# Import CountVectorizer and create the count matrix
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(df2['soup'])
# + _uuid="3fa5539ed1680ed5323f8351ac7e4840f629e958"
# Compute the Cosine Similarity matrix based on the count_matrix
from sklearn.metrics.pairwise import cosine_similarity
cosine_sim2 = cosine_similarity(count_matrix, count_matrix)
# + _uuid="b2b8565a04f4bda92d3ba9d15c348af1cd8f8b4d"
# Reset index of our main DataFrame and construct reverse mapping as before
df2 = df2.reset_index()
indices = pd.Series(df2.index, index=df2['title'])
# + [markdown] _uuid="3c5a3a44893f63aa558030d03e228a365d10d91f"
# We can now reuse our **get_recommendations()** function by passing in the new **cosine_sim2** matrix as your second argument.
# + _uuid="d1e0e02be7a9e71422d3a492834cb4f8434d1464"
get_recommendations('The Dark Knight Rises', cosine_sim2)
# + _uuid="d6c4df85a80d830b2905f69e0e59ebb3461db3b7"
get_recommendations('The Godfather', cosine_sim2)
# + [markdown] _uuid="4d963ff547ee4980c0f23840394046d805fda574"
# We see that our recommender has been successful in capturing more information due to more metadata and has given us (arguably) better recommendations.
|
RS_All (1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import time
import yaml
import torch
import logging
import pickle
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
import seaborn as sns
import torch.nn.functional as F
import seaborn as sns
from tqdm.notebook import tqdm
from torch.optim import Adam, Optimizer
from collections import defaultdict
from torch_geometric.data import Data, InMemoryDataset
from model import APGCN
from seeds import test_seeds, gen_seeds, quick_seeds
from data import get_dataset, set_train_val_test_split
# +
def save_obj(obj, name):
with open('results/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('results/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def summary(results):
report={}
for k, v in results.items():
if k != 'steps' and k != 'probs':
boots_series = sns.algorithms.bootstrap(results[k], func=np.mean, n_boot=1000)
report[k] = np.mean(results[k])
report[f'{k}_ci'] = np.max(np.abs(sns.utils.ci(boots_series, 95) - report[k]))
else:
array = np.array([k.mean().cpu().detach().numpy() for k in results['steps']])
boots_series = sns.algorithms.bootstrap(array, func=np.mean, n_boot=1000)
report[k] = np.mean(array)
report[f'{k}_ci'] = np.max(np.abs(sns.utils.ci(boots_series, 95) - report[k]))
return report
def plot_density(results):
fig, ax = plt.subplots()
z =[(x.cpu().numpy()).astype(int) for x in results['steps']]
z = np.vstack(z)
z = np.mean(z,axis=0)
sns.distplot(z, hist = False, kde = True,
kde_kws = {'shade': True, 'linewidth': 3},
ax=ax)
plt.xlabel('Number of Steps')
plt.ylabel('Density')
plt.tight_layout()
plt.show()
return
# +
def train(model: torch.nn.Module, optimizer: Optimizer, data: Data, train_halt, weight_decay: float):
model.train()
for param in model.prop.parameters():
param.requires_grad = train_halt
optimizer.zero_grad()
logits, steps, reminders = model(data)
loss = F.nll_loss(logits[data.train_mask], data.y[data.train_mask])
l2_reg = sum((torch.sum(param ** 2) for param in model.reg_params))
loss += weight_decay/2 * l2_reg + model.prop_penalty *(
steps[data.train_mask] + reminders[data.train_mask]).mean()
loss.backward()
optimizer.step()
return
def evaluate(model: torch.nn.Module, data: Data, test: bool, weight_decay: float):
model.eval()
with torch.no_grad():
logits, steps, reminders = model(data)
loss = F.nll_loss(logits[data.train_mask], data.y[data.train_mask])
l2_reg = sum((torch.sum(param ** 2) for param in model.reg_params))
loss += weight_decay/2 * l2_reg + model.prop_penalty *(
steps[data.train_mask] + reminders[data.train_mask]).mean()
eval_dict = {}
keys = ['train','val']
eval_dict['steps'] = steps
for key in keys:
mask = data[f'{key}_mask']
pred = logits[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
eval_dict[f'{key}_acc'] = acc
return eval_dict, loss
def test_acc(model: torch.nn.Module, data: Data):
model.eval()
with torch.no_grad():
logits, steps, reminders = model(data)
mask = data['test_mask']
pred = logits[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
return acc
# -
def run(dataset: InMemoryDataset,
model: torch.nn.Module,
seeds: np.ndarray,
test: bool = False,
max_epochs: int = 10000,
patience: int = 100,
lr: float = 0.01,
weight_decay: float = 0.01,
num_development: int = 1500,
device: str = 'cuda'):
best_dict = defaultdict(list)
for seed in tqdm(seeds):
for _ in range(config['niter_per_seed']):
torch_seed = gen_seeds()
torch.manual_seed(seed=torch_seed)
dataset.data = set_train_val_test_split(
seed,
dataset.data,
num_development=num_development,
num_per_class=20
).to(device)
model.to(device).reset_parameters()
optimizer = Adam(model.parameters(),lr=lr)
patience_counter = 0
best_loss = 999
tmp_dict = {'val_acc': 0}
start_time = time.perf_counter()
for epoch in range(1, max_epochs + 1):
if patience_counter == patience:
break
train(model, optimizer, dataset.data, epoch%5==0, weight_decay)
eval_dict, loss = evaluate(model, dataset.data, test, weight_decay)
if(eval_dict['val_acc'] > tmp_dict['val_acc']) or (
(eval_dict['val_acc'] == tmp_dict['val_acc']) and loss < best_loss):
patience_counter = 0
tmp_dict['epoch'] = epoch
tmp_dict['runtime'] = time.perf_counter() - start_time
for k, v in eval_dict.items():
tmp_dict[k] = v
best_state = {key: value.cpu() for key, value
in model.state_dict().items()}
else:
patience_counter += 1
if loss < best_loss:
best_loss = loss
patience_counter = 0
model.load_state_dict(best_state)
tmp_dict['test_acc'] = test_acc(model,dataset.data)
print("Epoch: {:.1f}"" Train: {:.2f}"" Val: {:.2f}"" Test: {:.2f}".format(
tmp_dict['epoch'],
tmp_dict['train_acc'] * 100,
tmp_dict['val_acc'] * 100,
tmp_dict['test_acc'] * 100))
for k, v in tmp_dict.items():
best_dict[k].append(v)
return dict(best_dict)
# +
device = 'cuda'
if torch.cuda.is_available():
torch.cuda.synchronize()
#Datasets: 'citeseer', 'cora_ml' 'pubmed' 'ms_academic', 'amazon_electronics_computers', 'amazon_electronics_photo'
#Num Developent: 1500,1500,1500,5000,1500,1500
# weight_decay 0 for Amazon Datasets 8e-03 for the others
config = {'dataset_name': 'amazon_electronics_computers',
'test': True,
'use_lcc': True,
'num_development': 1500,
'niter_per_seed': 5,
'hidden_units': 64,
'lr': 0.01,
'dropout': 0.5,
'weight_decay': 0
}
dataset = get_dataset(
name=config['dataset_name'],
use_lcc=config['use_lcc']
)
dataset.data = dataset.data.to(device)
# +
model = APGCN(dataset,10, prop_penalty=0.05)
results = run(
dataset,
model,
seeds=test_seeds if config['test'] else val_seeds,
#seeds= quick_seeds,
lr=config['lr'],
weight_decay=config['weight_decay'],
test=config['test'],
num_development=config['num_development'],
device=device
)
#save_obj(results,'results_' + config['dataset_name'])
report = summary(results)
print("FINAL\n"
"Train Accuracy: {:.2f} ± {:.2f}%\n"
"Stopping Accuracy: {:.2f} ± {:.2f}%\n"
"Test Accuracy: {:.2f} ± {:.2f}%\n"
"Steps: {:.2f} ± {:.2f}\n"
"Epochs: {:.2f} ± {:.2f}\n"
"Runtime: {:.4f} ± {:.4f}\n"
.format(
report['train_acc'] * 100,
report['train_acc_ci'] * 100,
report['val_acc'] * 100,
report['val_acc_ci'] * 100,
report['test_acc']*100,
report['test_acc_ci']*100,
report['steps'],
report['steps_ci'],
report['epoch'],
report['epoch_ci'],
report['runtime'],
report['runtime_ci']))
plot_density(results)
del model, dataset
torch.cuda.empty_cache()
|
AP-GCN_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# ** Two stage problem. **
# * Default Classifier: Use AUC or F1-score to evaluate performance because of class imbalance.
# * Default Loss: Use MAE ( Evaluation metric ) to evaluate performance
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import os, sys
from sklearn.cross_validation import train_test_split, StratifiedKFold
from sklearn.metrics import mean_absolute_error
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import MinMaxScaler, Imputer
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, f1_score
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
np.random.seed(3)
basepath = os.path.expanduser('~/Desktop/src/Loan_Default_Prediction/')
sys.path.append(os.path.join(basepath, 'src'))
from data import *
from features import *
from models import *
# -
train = pd.read_csv(os.path.join(basepath, 'data/raw/train_v2.csv'),
index_col='id',
dtype=np.float32
)
# test = pd.read_csv(os.path.join(basepath, 'data/raw/test_v2.csv'), index_col='id')
# sample_sub = pd.read_csv(os.path.join(basepath, 'data/raw/sampleSubmission.csv'))
# loss target
train['is_default'] = (train.loss > 0).astype(np.int)
# ** Training examples are in the chronological order but not in the test set. **
train = train.iloc[np.random.permutation(len(train))] # shuffle training set.
# +
# features to remove from training set
features_to_remove = ['f33', 'f678', 'f37', 'f764', 'f700', \
'f34', 'f38', 'f702', 'f701', 'f736', 'f35']
features_to_remove.extend(['is_default', 'loss'])
# +
itrain, itest = get_stratified_sample(train, train.is_default, train_size=.3, random_state=11)
X_train = train.iloc[itrain][train.columns.drop(features_to_remove)]
X_test = train.iloc[itest][train.columns.drop(features_to_remove)]
y_train = train.is_default.iloc[itrain]
y_test = train.is_default.iloc[itest]
# -
print(X_train.shape, X_test.shape)
del train
# ** Two Step Modelling. **
# * Pipeline for predicting whether there was any loss.
# * Pipeline to predict the actual value in case any loss was incurred.
pipeline_default = Pipeline([
('feature_union', FeatureUnion([
('golden_feature', GoldenFeatures())
])),
('imputer', Imputer()),
('scaler', MinMaxScaler()),
('select', TreeBasedSelection(ExtraTreesClassifier(), y_train, n_features_to_select=30)),
('union', FeatureUnion([
('feature_interaction', FeatureInteraction())
])),
('model', RandomForestClassifier(n_estimators=25, n_jobs=2, random_state=5))
])
pipeline_default.fit(X_train, y_train)
yhat = pipeline_default.predict_proba(X_test)[:, 1]
print('AUC score: %f'%(roc_auc_score(y_test, yhat)))
best_f1_score = bestF1(y_test, yhat)
pipeline_loss = Pipeline([
('feature_union', FeatureUnion([
('golden_feature', GoldenFeatures())
])),
('imputer', Imputer()),
('scaler', MinMaxScaler()),
('select', TreeBasedSelection(ExtraTreesClassifier(), y_train, n_features_to_select=30)),
('union', FeatureUnion([
('feature_interaction', FeatureInteraction())
])),
('model', RandomForestClassifier(n_estimators=25, n_jobs=2, random_state=5))
])
# ** Submission **
sample_sub = pd.read_csv(os.path.join(basepath, 'data/raw/sampleSubmission.csv'))
sample_sub['loss'] = loss
sample_sub.to_csv(os.path.join(basepath, 'submissions/baseline.csv'), index=False)
|
notebooks/BenchmarkModel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
df = pd.read_csv('NICS_Firearm_Checks.csv', index_col='State/Territory')
# -
df.shape
df.info()
df.fillna(0, inplace=True)
df
df = df.apply(lambda x: pd.to_numeric(x.astype(str).str.replace(',','')))
df=df.astype(int)
df.info()
year_grp = df.groupby(['Year'])
# +
df_2016 = year_grp.get_group(2016)
month_sum=[]
for month in df.columns[1:13]:
month_sum.append(df_2016[month].sum())
df_2016.columns[1:13]
fig, ax = plt.subplots(figsize=(8,7))
plt.xlabel('Month')
plt.ylabel('Background Checks')
plt.title('2016 Background Checks by Month')
ax.plot(df_2016.columns[1:13], month_sum)
plt.show()
# +
df_2015 = year_grp.get_group(2015)
month_sum=[]
for month in df.columns[1:13]:
month_sum.append(df_2015[month].sum())
df_2015.columns[1:13]
fig, ax = plt.subplots(figsize=(8,7))
plt.xlabel('Month')
plt.ylabel('Background Checks')
plt.title('2015 Background Checks by Month')
ax.plot(df_2015.columns[1:13], month_sum)
plt.show()
# +
df_2020 = year_grp.get_group(2020)
month_sum=[]
for month in df.columns[1:13]:
month_sum.append(df_2020[month].sum())
df_2020.columns[1:13]
fig, ax = plt.subplots(figsize=(8,7))
plt.xlabel('Month')
plt.ylabel('Background Checks')
plt.title('2020 Background Checks by Month')
ax.plot(df_2020.columns[1:13], month_sum)
plt.show()
# -
df_yr = df.groupby('Year').sum().drop(columns='Totals')
df_yr
# +
df_new = pd.read_csv('NICS_Firearm_Checks_Month_Year.csv', index_col='Year')
df_new.dropna(inplace=True)
df_new = df_new.apply(lambda x: pd.to_numeric(x.astype(str).str.replace(',','')))
df_new.drop(columns='Totals', inplace=True)
df_new_list = df_new.values.tolist()
sums = [elem for items in df_new_list for elem in items]
labels = []
for i in df_new.index:
for j in df_new.columns:
labels.append(j+"-"+str(int(i)))
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(labels, sums)
labs = [i for i in df_new.index.astype(int)]
ax.set_xticks([i*12-11 for i in range(1,22)])
ax.set_xticklabels([i for i in df_new.index.astype(int)], rotation = 50)
plt.xlabel('Year')
plt.ylabel('Background Checks (millions)')
plt.title('Firearm Background Checks - 2000 to 2020')
# -
df_2016.sort_values(by='Totals', ascending=False)
#Yearly Sums
df1 = year_grp['Totals'].sum()
df1 = pd.DataFrame(df1)
df1 = df1.rename(columns = {'Totals': 'Yearly Sum'})
df1
fig, ax = plt.subplots()
plt.xlabel('Year')
plt.ylabel('Firearm Background Checks')
plt.title('Background Checks by Year')
ax.plot(df1.index, df1['Yearly Sum'])
ax.ticklabel_format(style='plain')
df1.plot(kind='bar', title = 'Background Checks by Year', xlabel = 'Year', ylabel = "Background Checks (tens of millions)", legend=None)
plt.show()
# +
#Percentage Increase from Previous Year
for i in range(2001,2020):
df1['Pct Change'] = df1['Yearly Sum'].pct_change()*100
df1
# -
#looking at population change in 18-24 - 2020 population number not finalized - used an approximation of 7% growth.
popover18 = [209786222, 212297780, 214688736, 217007175, 219507563, 221992930, 224622198, 227211802, 229989364, 232637362,
235201000, 237649350, 240134326, 242425013, 244737285, 247017112, 249291898, 251400193, 253368356, 255200373, 273064399]
df1['popover18'] = popover18
df1
df1['Pct Change Pop'] = df1['popover18'].pct_change()*100
df1
# Which states had the highest total number of checks
df.sort_values(by=['Totals'], ascending=[False]).head(20)
# +
#Which state had the lowest quantity
#remove territories since those are lowest outside of states
bad_index = ['Virgin Islands', 'Mariana Islands', 'District of Columbia', 'Guam', 'Puerto Rico']
df[~df.index.isin(bad_index)].sort_values(by=['Totals'], ascending=[True]).head(20)
# -
df2 = pd.read_csv('NICS_Firearm_Checks.csv', index_col='Year')
df2.fillna(0, inplace=True)
df2 = df.apply(lambda x: pd.to_numeric(x.astype(str).str.replace(',','')))
df2=df2.astype(int)
df2
# +
#defining buckets for red/blue/swing states based on 2020 election results
red_states = ['Idaho', 'Utah', 'Montana', 'Wyoming', 'North Dakota', 'South Dakota', 'Nebraska', 'Kansas', 'Oklahoma',
'Texas', 'Iowa', 'Missouri', 'Arkansas', 'Louisiana', 'Indiana', 'Kentucky', 'Tennessee', 'Mississippi',
'Alabama', 'North Carolina', 'South Carolina', 'West Virginia', 'Alaska', 'New Hampshire']
swing_states = ['Nevada', 'Arizona', 'Minnesota', 'Wisconsin', 'Ohio', 'Florida', 'Georgia', 'Pennsylvania', 'Michigan',
'Maine']
blue_states = ['Washington', 'Oregon', 'California', 'Hawaii', 'Colorado', 'New Mexico', 'Illinois', 'Virginia', 'New York',
'Vermont', 'Massachusetts', 'Rhode Island', 'Connecticut', 'New Jersey', 'Maryland', 'Delaware']
# +
df2_states = df2[~df.index.isin(bad_index)]
def lean(x):
if x in red_states:
x = 1
elif x in blue_states:
x = 2
elif x in swing_states:
x = 3
return x
df2_states
df2_states.reset_index(inplace=True)
df2_states
df2_states['Political'] =df2_states['State/Territory'].apply(lean)
df2_states[['State/Territory','Totals', 'Political']]
# +
pltc_grp = df2_states.groupby(['Year', 'Political'])
pltc_states = pltc_grp['Totals'].sum()
df3 = pd.DataFrame(pltc_states)
df3.sort_values(by='Year', ascending=False)
print(df3)
plot_df3 = df3.unstack('Political').loc[:, 'Totals']
plot_df3.index = plot_df3.index.tolist()
# +
import matplotlib.ticker
plt.rcParams['figure.figsize'] = [10, 10]
df4 = pd.DataFrame(plot_df3)
red = [i for i in df4[1]]
blue = [i for i in df4[2]]
swing = [i for i in df4[3]]
index = [int(i) for i in plot_df3.index]
plt.plot(index, red, color='r', label = 'Red States')
plt.plot(index, blue, color='#0111f9', label = 'Blue States')
plt.plot(index, swing, color='#196D36', label = 'Swing States')
locator = matplotlib.ticker.MultipleLocator(2)
plt.gca().xaxis.set_major_locator(locator)
formatter = matplotlib.ticker.StrMethodFormatter("{x:.0f}")
plt.gca().xaxis.set_major_formatter(formatter)
plt.xlabel('Years')
plt.ylabel('# Background Checks')
plt.title('Yearly Background Checks by State Political Lean')
plt.legend()
plt.show()
# -
#normalize state totals by population
df_pop = pd.read_csv('state_pop_data_2010_to_2019.csv')
filter1 = ['District of Columbia', 'Puerto Rico']
df_pop[~df_pop['NAME'].isin(filter1)]
bad_index = ['Virgin Islands', 'Mariana Islands', 'District of Columbia', 'Guam', 'Puerto Rico']
df[~df.index.isin(bad_index)]
df.reset_index(inplace=True)
df_statetotals = df[['State/Territory', 'Year','Totals']].copy()
df_statetotals.set_index('State/Territory', inplace=True)
df_statetotals
#2019 Totals
df_statetotals.groupby(['Year']).get_group(2019)
cleaned = pd.DataFrame(df_pop[~df_pop['NAME'].isin(filter1)])
state_normalized = pd.DataFrame(cleaned['NAME'])
state_normalized.set_index('NAME', inplace=True)
# +
#2010:
years=[2010]
for i in years:
state_normalized[str(i) + ' Checks'] = (df_statetotals.groupby(['Year']).get_group(i))['Totals']
state_normalized[str(i) + ' Pop'] = cleaned.set_index('NAME')['CENSUS'+str(i)+'POP']
state_normalized[str(i) + ' Normalized'] = state_normalized[str(i) + ' Checks']/state_normalized[str(i) + ' Pop']
#2011-2019:
years=[2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]
for i in years:
state_normalized[str(i) + ' Checks'] = (df_statetotals.groupby(['Year']).get_group(i))['Totals']
state_normalized[str(i) + ' Pop'] = cleaned.set_index('NAME')['POPESTIMATE'+str(i)]
state_normalized[str(i) + ' Normalized'] = state_normalized[str(i) + ' Checks']/state_normalized[str(i) + ' Pop']
state_normalized
# -
#Normalized background checks by state population
maxValue = state_normalized.idxmax()
print(maxValue)
#Actual values for Kentucky's ratio
state_normalized.loc['Kentucky', ['2010 Normalized', '2011 Normalized', '2012 Normalized',
'2013 Normalized', '2014 Normalized', '2015 Normalized', '2016 Normalized',
'2017 Normalized', '2018 Normalized', '2019 Normalized']]
|
Firearm Background Checks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #Making multipanel plots with matplotlib
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# +
x=np.linspace(0,2*np.pi,100)
print(x[-1],2*np.pi)
y=np.sin(x)
z=np.cos(x)
w=np.sin(4*x)
v=np.cos(4*x)
# +
f, axarr=plt.subplots(1,2)
axarr[0].plot(x,y)
axarr[0].set_xlabel('x')
axarr[0].set_ylabel('sin(x)')
axarr[0].set_title(r'$\sin(x)$')
axarr[1].plot(x,z)
axarr[1].set_xlabel('x')
axarr[1].set_ylabel('cos(x)')
axarr[1].set_title(r'$\cos(x)$')
f.subplots_adjust(wspace=0.4)
axarr[0].set_aspect('equal')
axarr[1].set_aspect(np.pi)
fig=plt.figure(figsize=(6,6))
plt.plot(x,y,label=r'$y=\sin(x)$')
plt.plot(x,z,label=r'$y=\cos(x)$')
plt.plot(x,w,label=r'$y=\sin(4x)$')
plt.plot(x,v,label=r'$y=\cos(4x)$')
plt.xlabel(r'$x$')
plt.ylabel(r'$y(x)$')
plt.xlim([0,2*np.pi])
plt.ylim([-1.2,1.2])
plt.legend(loc=1,framealpha=0.95)
plt.gca().set_aspect(np.pi/1.2)
plt.savefig('legendx.png',bbox_inches="tight",dpi=650)
# -
|
multiplanel_figures.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analyzing IMDB Data in Keras
# +
# Imports
import numpy as np
import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
# %matplotlib inline
np.random.seed(42)
# -
# ## 1. Loading the data
# This dataset comes preloaded with Keras, so one simple command will get us training and testing data. There is a parameter for how many words we want to look at. We've set it at 1000, but feel free to experiment.
# +
# Loading the data (it's preloaded in Keras)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)
print(x_train.shape)
print(x_test.shape)
# -
# ## 2. Examining the data
# Notice that the data has been already pre-processed, where all the words have numbers, and the reviews come in as a vector with the words that the review contains. For example, if the word 'the' is the first one in our dictionary, and a review contains the word 'the', then there is a 1 in the corresponding vector.
#
# The output comes as a vector of 1's and 0's, where 1 is a positive sentiment for the review, and 0 is negative.
print(x_train[0])
print(y_train[0])
# ## 3. One-hot encoding the output
# Here, we'll turn the input vectors into (0,1)-vectors. For example, if the pre-processed vector contains the number 14, then in the processed vector, the 14th entry will be 1.
# One-hot encoding the output into vector mode, each of length 1000
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print(x_train[0])
# And we'll also one-hot encode the output.
# One-hot encoding the output
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train.shape)
print(y_test.shape)
# ## 4. Building the model architecture
# Build a model here using sequential. Feel free to experiment with different layers and sizes! Also, experiment adding dropout to reduce overfitting.
# +
# TODO: Build the model architecture
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(1000,)))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))
# TODO: Compile the model using a loss function and an optimizer.
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.summary()
# -
# ## 5. Training the model
# Run the model here. Experiment with different batch_size, and number of epochs!
# TODO: Run the model. Feel free to experiment with different batch sizes and number of epochs.
model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=0)
# ## 6. Evaluating the model
# This will give you the accuracy of the model, as evaluated on the testing set. Can you get something over 85%?
score = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: ", score[1])
|
IMDB-keras/IMDB_In_Keras.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = -0.9
weight2 = -0.9
bias = 1.1
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [True, False, True, False]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# -
|
Deep Learning/Logical-Operator-Percetron.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Conjugate Priors
# + [markdown] tags=["remove-cell"]
# Think Bayes, Second Edition
#
# Copyright 2020 <NAME>
#
# License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
# + tags=["remove-cell"]
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
# !pip install empiricaldist
# + tags=["remove-cell"]
# Get utils.py
import os
if not os.path.exists('utils.py'):
# !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py
# + tags=["remove-cell"]
from utils import set_pyplot_params
set_pyplot_params()
# -
# In the previous chapters we have used grid approximations to solve a variety of problems.
# One of my goals has been to show that this approach is sufficient to solve many real-world problems.
# And I think it's a good place to start because it shows clearly how the methods work.
#
# However, as we saw in the previous chapter, grid methods will only get you so far.
# As we increase the number of parameters, the number of points in the grid grows (literally) exponentially.
# With more than 3-4 parameters, grid methods become impractical.
#
# So, in the remaining three chapters, I will present three alternatives:
#
# 1. In this chapter we'll use **conjugate priors** to speed up some of the computations we've already done.
#
# 2. In the next chapter, I'll present Monte Carlo Markov Chain (MCMC) methods, which can solve problems with tens of parameters, or even hundreds, in a reasonable amount of time.
#
# 3. And in the last chapter we'll use Approximate Bayesian Calculation (ABC) for problems that are hard to model with simple distributions.
#
# We'll start with the World Cup problem.
# ## The World Cup problem
#
# In Chapter xxx, we solved the World Cup problem using a Poisson process to model goals in a soccer game as random events that are equally likely to occur at any point during a game.
#
# We used a gamma distribution to represent the prior distribution of $\lambda$, the goal-scoring rate. And we used a Poisson distribution to compute the probability of $k$, the number of goals scored.
#
# Here's a gamma object that represents the prior distribution.
# +
from scipy.stats import gamma
alpha = 1.4
dist = gamma(alpha)
# -
# And here's a grid approximation.
# +
import numpy as np
from utils import pmf_from_dist
lams = np.linspace(0, 10, 101)
prior = pmf_from_dist(dist, lams)
# -
# Here's the likelihood of scoring 4 goals for each possible value of `lam`.
# +
from scipy.stats import poisson
k = 4
likelihood = poisson(lams).pmf(k)
# -
# And here's the update.
posterior = prior * likelihood
posterior.normalize()
# So far, this should be familiar.
# And here's the posterior distribution of the goal-scoring rate.
# ## The conjugate prior
#
# In Chapter xxx, I presented three reasons to use a gamma distribution for the prior and said there was a fourth reason I would reveal later.
# Well, now is the time.
#
# The other reason I chose the gamma distribution is that it is the "conjugate prior" of the Poisson distribution, so-called because the two distributions are connected or coupled, which is what "conjugate" means.
#
# In the next section I'll explain *how* they are connected, but first I'll show you the consequence of this connection, which is that there is a remarkably simple way to compute the posterior distribution.
#
# However, in order to demonstrate it, we have to switch from the one-parameter version of the gamma distribution to the two-parameter version. Since the first parameter is called `alpha`, you might guess that the second parameter is called `beta`.
#
# The following function takes `alpha` and `beta` and makes an object that represents a gamma distribution with those parameters.
def make_gamma_dist(alpha, beta):
"""Makes a gamma object."""
dist = gamma(alpha, scale=1/beta)
dist.alpha = alpha
dist.beta = beta
return dist
# Here's the prior distribution with `alpha=1.4` again and `beta=1`.
# +
alpha = 1.4
beta = 1
prior_gamma = make_gamma_dist(alpha, beta)
prior_gamma.mean()
# -
# Now I claim without proof that we can do a Bayesian update with `k` goals just by making a gamma distribution with parameters `alpha+k` and `beta+1`.
def update_gamma(prior, data):
"""Update a gamma prior."""
k, t = data
alpha = prior.alpha + k
beta = prior.beta + t
return make_gamma_dist(alpha, beta)
# Here's how we update it with `k=4` goals in `t=1` game.
data = 4, 1
posterior_gamma = update_gamma(prior_gamma, data)
# After all the work we did with the grid, it might seem absurd that we can do a Bayesian update by adding two pairs of numbers.
# So let's confirm that it works.
#
# I'll make a `Pmf` with a discrete approximation of the posterior distribution.
posterior_conjugate = pmf_from_dist(posterior_gamma, lams)
# The following figure shows the result along with the posterior we computed using the grid algorithm.
# + tags=["hide-cell"]
from utils import decorate
def decorate_rate(title=''):
decorate(xlabel='Goal scoring rate (lam)',
ylabel='PMF',
title=title)
# + tags=["hide-input"]
posterior.plot(label='grid posterior', color='C1')
posterior_conjugate.plot(label='conjugate posterior',
color='C4', linestyle='dotted')
decorate_rate('Posterior distribution')
# -
# They are the same other than small differences due to floating-point approximations.
# + tags=["hide-cell"]
np.allclose(posterior, posterior_conjugate)
# -
# ## What the Actual?
#
# To understand how that works, we'll write the PDF of the gamma prior and the PMF of the Poisson likelihood, then multiply them together, because that's what the Bayesian update does.
# We'll see that the result is a gamma distribution, and we'll derive its parameters.
#
# Here's the PDF of the gamma prior, which is the probability density for each value of $\lambda$, given parameters $\alpha$ and $\beta$:
#
# $$\lambda^{\alpha-1} e^{-\lambda \beta}$$
#
# I have omitted the normalizing factor; since we are planning to normalize the posterior distribution anyway, we don't really need it.
#
# Now suppose a team scores $k$ goals in $t$ games.
# The probability of this data is given by PMF of the Poisson distribution, which is a function of $k$ with $\lambda$ and $t$ as parameters.
#
# $$\lambda^k e^{-\lambda t}$$
#
# Again, I have omitted the normalizing factor, which makes it clearer that the gamma and Poisson distributions have the same functional form.
# When we multiply them together, we can pair up the factors and add up the exponents.
# The result is the unnormalized posterior distribution,
#
# $$\lambda^{\alpha-1+k} e^{-\lambda(\beta + t)}$$
#
# which we can recognize as an unnormalized gamma distribution with parameters $\alpha + k$ and $\beta + t$.
#
# This derivation provides insight into what the parameters of the posterior distribution mean: $\alpha$ reflects the number of events that have occurred; $\beta$ reflects the elapsed time.
# ## Binomial likelihood
#
# As a second example, let's look again at the Euro problem.
# When we solved it with a grid algorithm, we started with a uniform prior:
# +
from utils import make_uniform
xs = np.linspace(0, 1, 101)
uniform = make_uniform(xs, 'uniform')
# -
# We used the binomial distribution to compute the likelihood of the data, which was 140 heads out of 250 attempts.
# +
from scipy.stats import binom
k, n = 140, 250
xs = uniform.qs
likelihood = binom.pmf(k, n, xs)
# -
# Then we computed the posterior distribution in the usual way.
# + tags=["hide-output"]
posterior = uniform * likelihood
posterior.normalize()
# -
# We can solve this problem more efficiently using the conjugate prior of the binomial distribution, which is the beta distribution.
#
# The beta distribution is bounded between 0 and 1, so it works well for representing the distribution of a probability like `x`.
# It has two parameters, called `alpha` and `beta`, that determine the shape of the distribution.
#
# SciPy provides an object called `beta` that represents a beta distribution.
# The following function takes `alpha` and `beta` and returns a new `beta` object.
# +
import scipy.stats
def make_beta(alpha, beta):
"""Makes a beta object."""
dist = scipy.stats.beta(alpha, beta)
dist.alpha = alpha
dist.beta = beta
return dist
# -
# It turns out that the uniform distribution, which we used as a prior, is the beta distribution with parameters `alpha=1` and `beta=1`.
# So we can make a `beta` object that represents a uniform distribution, like this:
# +
alpha = 1
beta = 1
prior_beta = make_beta(alpha, beta)
# -
# Now let's figure out how to do the update. As in the previous example, we'll write the PDF of the prior distribution and the PMF of the likelihood function, and multiply them together. We'll see that the product has the same form as the prior, and we'll derive its parameters.
#
# Here is the PDF of the beta distribution, which is a function of $x$ with $\alpha$ and $\beta$ as parameters.
#
# $$x^{\alpha-1} (1-x)^{\beta-1}$$
#
# Again, I have omitted the normalizing factor, which we don't need because we are going to normalize the distribution after the update.
#
# And here's the PMF of the binomial distribution, which is a function of $k$ with $n$ and $x$ as parameters.
#
# $$x^{k} (1-x)^{n-k}$$
#
# Again, I have omitted the normalizing factor.
# Now when we multiply the beta prior and the binomial likelihood, the result is
#
# $$x^{\alpha-1+k} (1-x)^{\beta-1+n-k}$$
#
# which we recognize as an unnormalized beta distribution with parameters $\alpha+k$ and $\beta+n-k$.
#
# So if we observe `k` successes in `n` trials, we can do the update by making a beta distribution with parameters `alpha+k` and `beta+n-k`.
# That's what this function does:
def update_beta(prior, data):
"""Update a beta distribution."""
k, n = data
alpha = prior.alpha + k
beta = prior.beta + n - k
return make_beta(alpha, beta)
# Again, the conjugate prior gives us insight into the meaning of the parameters; $\alpha$ is related to the number of observed successes; $\beta$ is related to the number of failures.
#
# Here's how we do the update with the observed data.
data = 140, 250
posterior_beta = update_beta(prior_beta, data)
# To confirm that it works, I'll evaluate the posterior distribution for the possible values of `xs` and put the results in a `Pmf`.
posterior_conjugate = pmf_from_dist(posterior_beta, xs)
# And we can compare the posterior distribution we just computed with the results from the grid algorithm.
# + tags=["hide-cell"]
def decorate_euro(title):
decorate(xlabel='Proportion of heads (x)',
ylabel='Probability',
title=title)
# + tags=["hide-input"]
posterior.plot(label='grid posterior', color='C1')
posterior_conjugate.plot(label='conjugate posterior',
color='C4', linestyle='dotted')
decorate_euro(title='Posterior distribution of x')
# -
# They are the same other than small differences due to floating-point approximations.
#
# The examples so far are problems we have already solved, so let's try something new.
# + tags=["hide-cell"]
np.allclose(posterior, posterior_conjugate)
# -
# ## Lions and tigers and bears
#
# Suppose we visit a wild animal preserve where we know that the only animals are lions and tigers and bears, but we don't know how many of each there are.
# During the tour, we see 3 lions, 2 tigers, and one bear. Assuming that every animal had an equal chance to appear in our sample, what is the probability that the next animal we see is a bear?
#
# To answer this question, we'll use the data to estimate the prevalence of each species, that is, what fraction of the animals belong to each species.
# If we know the prevalences, we can use the multinomial distribution to compute the probability of the data.
# For example, suppose we know that the fraction of lions, tigers, and bears is 0.4, 0.3, and 0.3, respectively.
#
# In that case the probability of the data is:
# +
from scipy.stats import multinomial
data = 3, 2, 1
n = np.sum(data)
ps = 0.4, 0.3, 0.3
multinomial.pmf(data, n, ps)
# -
# Now, we could choose a prior for the prevalences and do a Bayesian update using the multinomial distribution to compute the probability of the data.
#
# But there's an easier way, because the multinomial distribution has a conjugate prior: the Dirichlet distribution.
# ## The Dirichlet distribution
#
# The Dirichlet distribution is a multivariate distribution, like the multivariate normal distribution we used in Chapter xxx to describe the distribution of penguin measurements.
#
# In that example, the quantities in the distribution are pairs of flipper length and culmen length, and the parameters of the distribution are a vector of means and a matrix of covariances.
#
# In a Dirichlet distribution, the quantities are vectors of probabilities, $\pmb{x}$, and the parameter is a vector, $\pmb{\alpha}$.
#
# An example will make that clearer. SciPy provides a `dirichlet` object that represents a Dirichlet distribution.
# Here's an instance with $\pmb{\alpha} = 1, 2, 3$.
# +
from scipy.stats import dirichlet
alpha = 1, 2, 3
dist = dirichlet(alpha)
# -
# Since we provided three parameters, the result is a distribution of three variables.
# If we draw a random value from this distribution, like this:
dist.rvs()
# + tags=["hide-cell"]
dist.rvs().sum()
# -
# The result is an array of three values.
# They are bounded between 0 and 1, and they always add up to 1, so they can be interpreted as the probabilities of a set of outcomes that are mutually exclusive and collectively exhaustive.
#
# Let's see what the distributions of these values look like. I'll draw 1000 random vectors from this distribution, like this:
sample = dist.rvs(1000)
# + tags=["hide-cell"]
sample.shape
# -
# The result is an array with 1000 rows and three columns. I'll compute the `Cdf` of the values in each column.
# +
from empiricaldist import Cdf
cdfs = [Cdf.from_seq(col)
for col in sample.transpose()]
# -
# The result is a list of `Cdf` objects that represent the marginal distributions of the three variables. Here's what they look like.
# + tags=["hide-input"]
for i, cdf in enumerate(cdfs):
label = f'Column {i}'
cdf.plot(label=label)
decorate()
# -
# Column 0, which corresponds to the lowest parameter, contains the lowest probabilities.
# Column 2, which corresponds to the highest parameter, contains the highest probabilities.
#
# As it turns out, these marginal distributions are beta distributions.
# The following function takes a sequence of parameters, `alpha`, and computes the marginal distribution of variable `i`:
def marginal_beta(alpha, i):
"""Compute the ith marginal of a Dirichlet distribution."""
total = np.sum(alpha)
return make_beta(alpha[i], total-alpha[i])
# We can use it to compute the marginal distribution for the three variables.
marginals = [marginal_beta(alpha, i)
for i in range(len(alpha))]
# The following plot shows the CDF of these distributions as gray lines and compares them to the CDFs of the samples.
# + tags=["hide-input"]
xs = np.linspace(0, 1, 101)
for i in range(len(alpha)):
label = f'Column {i}'
pmf = pmf_from_dist(marginals[i], xs)
pmf.make_cdf().plot(label='_nolegend', color='C5')
cdf = cdfs[i]
cdf.plot(label=label, linestyle='dotted')
decorate()
# -
# This confirms that the marginals of the Dirichelet distribution are beta distributions.
#
# And that's useful because the Dirichlet distribution is the conjugate prior for the multinomial likelihood function.
#
# If the prior distribution is Dirichlet with parameter vector `alpha` and the data is a vector of observations, `data`, the posterior distribution is Dirichlet with parameter vector `alpha + data`.
#
# As an exercise at the end of this chapter, you can use this method to solve the Lions and Tigers and Bears problem.
# ## Summary
#
# After reading this chapter, if you feel like you've been tricked, I understand. It turns out that many of the problems in this book can be solved with just a few arithmetic operations. So why did we go to all the trouble of using grid algorithms?
#
# Sadly, there are only a few problems we can solve with conjugate priors; in fact, this chapter includes most of the ones that are useful in practice.
#
# For the vast majority of problems, there is no conjugate prior and no shortcut to compute the posterior distribution.
# That's why we need grid algorithms and the methods in the next two chapters, Approximate Bayesian Computation (ABC) and Markov chain Monte Carlo methods (MCMC).
# ## Exercises
#
# **Exercise:** In the second version of the World Cup problem, the data we use for the update is not the number of goals in a game, but the time until the first goal.
# So the probability of the data is given by the exponential distribution rather than the Poisson distribution.
#
# But it turns out that the gamma distribution is *also* the conjugate prior of the exponential distribution, so there is a simple way to compute this update, too.
# The PDF of the exponential distribution is a function of $t$ with $\lambda$ as a parameter.
#
# $$\lambda e^{-\lambda t}$$
#
# Multiply the PDF of the gamma prior by this likelihood, confirm that the result is an unnormalized gamma distribution, and see if you can derive its parameters.
#
# Write a few lines of code to update `prior_gamma` with the data from this version of the problem, which was a first goal after 11 minutes and a second goal after an additional 12 minutes.
# + [markdown] tags=["hide-cell"]
# Remember to express these quantities in units of games, which are approximately 90 minutes.
# +
# Solution
"""
The unnormalized posterior is
\lambda^{\alpha-1+1} e^{-(\beta + t) \lambda}
which is an unnormalized gamma distribution with parameters
`alpha+1` and `beta+t`, which means that we observed 1 goal
in elapsed time `t`.
So we can use the same update function and call it like this:
"""
data = 1, 11/90
posterior1 = update_gamma(prior_gamma, data)
# +
# Solution
# Here's the second update
data = 1, 12/90
posterior2 = update_gamma(posterior1, data)
# +
# Solution
prior_gamma.mean(), posterior1.mean(), posterior2.mean()
# +
# And here's what the posteriors look like
pmf_from_dist(prior_gamma, lams).plot(color='C5', label='prior')
pmf_from_dist(posterior1, lams).plot(label='after 1 goal')
pmf_from_dist(posterior2, lams).plot(label='after 2 goals')
decorate_rate(title='World Cup Problem, Germany v Brazil')
# -
# **Exercise:** For problems like the Euro problem where the likelihood function is binomial, we can do a Bayesian update with just a few arithmetic operations, but only if the prior is a beta distribution.
#
# If we want a uniform prior, we can use a beta distribution with `alpha=1` and `beta=1`.
# But what can we do if the prior distribution we want is not a beta distribution?
# For example, in Chapter xxx we also solved the Euro problem with a triangle prior, which is not a beta distribution.
#
# In these cases, we can often find a beta distribution that is a good-enough approximation for the prior we want.
# See if you can find a beta distribution that fits the triangle prior, then update it using `update_beta`.
#
# Use `pmf_from_dist` to make a `Pmf` that approximates the posterior distribution and compare it to the posterior we just computed using a grid algorithm. How big is the largest difference between them?
# + [markdown] tags=["hide-cell"]
# Here's the triangle prior again.
# + tags=["hide-cell"]
from empiricaldist import Pmf
ramp_up = np.arange(50)
ramp_down = np.arange(50, -1, -1)
a = np.append(ramp_up, ramp_down)
xs = uniform.qs
triangle = Pmf(a, xs, name='triangle')
triangle.normalize()
# + [markdown] tags=["hide-cell"]
# And here's the update.
# + tags=["hide-cell"]
k, n = 140, 250
likelihood = binom.pmf(k, n, xs)
posterior = triangle * likelihood
posterior.normalize()
# + [markdown] tags=["hide-cell"]
# To get you started, here's the beta distribution that we used as a uniform prior.
# + tags=["hide-cell"]
alpha = 1
beta = 1
prior_beta = make_beta(alpha, beta)
prior_beta.mean()
# + [markdown] tags=["hide-cell"]
# And here's what it looks like compared to the triangle prior.
# + tags=["hide-cell"]
prior_pmf = pmf_from_dist(prior_beta, xs)
triangle.plot(label='triangle')
prior_pmf.plot(label='beta')
decorate_euro('Prior distributions')
# + [markdown] tags=["hide-cell"]
# Now you take it from there.
# +
# Solution
data = 140, 250
posterior_beta = update_beta(prior_beta, data)
posterior_beta.mean()
# +
# Solution
posterior_conjugate = pmf_from_dist(posterior_beta, xs)
# +
# Solution
posterior.plot(label='grid posterior', linestyle='dotted')
posterior_conjugate.plot(label='conjugate posterior')
decorate(xlabel='Proportion of heads (x)',
ylabel='Probability',
title='Posterior distribution of x')
# +
# Solution
# The largest absolute difference is pretty small
np.allclose(posterior, posterior_conjugate)
# -
# **Exercise:** [3Blue1Brown](https://en.wikipedia.org/wiki/3Blue1Brown) is a YouTube channel about math; if you are not already aware of it, I recommend it highly.
# In [this video](https://www.youtube.com/watch?v=8idr1WZ1A7Q) the narrator presents this problem:
#
# > You are buying a product online and you see three sellers offering the same product at the same price. One of them has a 100% positive rating, but with only 10 reviews. Another has a 96% positive rating with 50 total reviews. And yet another has a 93% positive rating, but with 200 total reviews.
# >
# >Which one should you buy from?
#
# Let's think about how to model this scenario. Suppose each seller has some unknown probability, `x`, of providing satisfactory service and getting a positive rating, and we want to choose the seller with the highest value of `x`.
#
# This is not the only model for this scenario, and it is not necessarily the best. An alternative would be something like item response theory, where sellers have varying ability to provide satisfactory service and customers have varying difficulty of being satisfied.
#
# But the first model has the virtue of simplicity, so let's see where it gets us.
#
# 1. As a prior, I suggest a beta distribution with `alpha=8` and `beta=2`. What does this prior look like and what does it imply about sellers?
#
# 2. Use the data to update the prior for the three sellers and plot the posterior distributions. Which seller has the highest posterior mean?
#
# 3. How confident should we be about our choice? That is, what is the probability that the seller with the highest posterior mean actually has the highest value of `x`?
#
# 4. Consider a beta prior with `alpha=0.7` and `beta=0.5`. What does this prior look like and what does it imply about sellers?
#
# 5. Run the analysis again with the this prior and see what effect it has on the results.
# + [markdown] tags=["hide-cell"]
# Note: When you evaluate the beta distribution, you should restrict the range of `xs` so it does not include 0 and 1. When the parameters of the beta distribution are less than 1, the probability density goes to infinity at 0 and 1. From a mathematical point of view, that's not a problem; it is still a proper probability distribution. But from a computational point of view, it means we have to avoid evaluating the PDF at 0 and 1.
# +
# Solution
# The first prior implies that most sellers are
# satisfactory most of the time, but none are perfect.
prior = make_beta(8, 2)
xs = np.linspace(0.005, 0.995, 199)
prior_pmf = pmf_from_dist(prior, xs)
prior_pmf.plot(color='C5', label='prior')
decorate(xlabel='Probability of positive rating',
ylabel='PDF')
# +
# Solution
data1 = 10, 10
data2 = 48, 50
data3 = 186, 200
# +
# Solution
seller1 = update_beta(prior, data1)
seller2 = update_beta(prior, data2)
seller3 = update_beta(prior, data3)
# +
# Solution
seller1_pmf = pmf_from_dist(seller1, xs)
seller2_pmf = pmf_from_dist(seller2, xs)
seller3_pmf = pmf_from_dist(seller3, xs)
# +
# Solution
seller1_pmf.plot(label='seller 1')
seller2_pmf.plot(label='seller 2')
seller3_pmf.plot(label='seller 3')
decorate(xlabel='Probability of positive rating',
ylabel='PDF',
xlim=(0.65, 1.0))
# +
# Solution
seller1.mean(), seller2.mean(), seller3.mean()
# +
# Solution
iters = 10000
a = np.empty((3, iters))
a[0] = seller1.rvs(iters)
a[1] = seller2.rvs(iters)
a[2] = seller3.rvs(iters)
# +
# Solution
from empiricaldist import Pmf
best = np.argmax(a, axis=0)
Pmf.from_seq(best)
# -
# **Exercise:** Use a Dirichlet prior with parameter vector `alpha = [1, 1, 1]` to solve the Lions and Tigers and Bears problem:
#
# >Suppose we visit a wild animal preserve where we know that the only animals are lions and tigers and bears, but we don't know how many of each there are.
# >
# >During the tour, we see 3 lions, 2 tigers, and one bear. Assuming that every animal had an equal chance to appear in our sample, estimate the prevalence of each species.
# >
# >What is the probability that the next animal we see is a bear?
#
# +
# Solution
prior_alpha = np.array([1, 1, 1])
data = 3, 2, 1
# +
# Solution
posterior_alpha = prior_alpha + data
# +
# Solution
marginal_bear = marginal_beta(posterior_alpha, 2)
marginal_bear.mean()
# +
# Solution
dist = dirichlet(posterior_alpha)
# +
# Solution
import pandas as pd
index = ['lion', 'tiger', 'bear']
pd.DataFrame(dist.mean(), index, columns=['prob'])
# -
|
soln/chap18.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Character-Level LSTM in PyTorch
#
# In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**
#
# This network is based off of <NAME>'s [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
#
# <img src="assets/charseq.jpeg" width="500">
# First let's load in our required resources for data loading and model creation.
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# ## Load in Data
#
# Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
# open text file and read in data as 'text'
with open('data/anna.txt', 'r') as f:
text=f.read()
# Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
text[:100]
# ### Tokenization
#
# In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
# encode the text and map each character to an integer and vice versa
# we create 2 dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps charactesr to unique intergers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii,ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
# And we can see those same characters from above, encoded as integers.
encoded[:100]
# ## Pre-processing the data
#
# As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
#
def one_hot_encode(arr, n_lables):
# initialize the encoded array
one_hot = np.zeros((np.multiply(*arr.shape),n_lables),dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]),arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot= one_hot.reshape((*arr.shape,n_lables))
return one_hot
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# +
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
# -
# ## Making training mini-batches
#
#
# To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
#
# <img src="assets/sequence_batching@1x.png" width=500px>
#
#
# <br>
#
# In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.
#
# ### Creating Batches
#
# **1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **
#
# Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
#
# **2. After that, we need to split `arr` into $N$ batches. **
#
# You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
#
# **3. Now that we have this array, we can iterate through it to get our mini-batches. **
#
# The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.
#
# > **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total=batch_size*seq_length
## TODO: Get the number of batches we can make
n_batches = len(arr)//batch_size_total
## TODO: Keep only enough characters to make full batches
arr = arr[:n_batches*batch_size_total]
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size,-1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:,n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:,:-1],y[:,-1]=x[:,1:],arr[:,n+seq_length]
except IndexError:
y[:,:-1],y[:,-1]=x[:,1:],arr[:,0]
yield x, y
# ### Test Your Implementation
#
# Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
# If you implemented `get_batches` correctly, the above output should look something like
# ```
# x
# [[25 8 60 11 45 27 28 73 1 2]
# [17 7 20 73 45 8 60 45 73 60]
# [27 20 80 73 7 28 73 60 73 65]
# [17 73 45 8 27 73 66 8 46 27]
# [73 17 60 12 73 8 27 28 73 45]
# [66 64 17 17 46 7 20 73 60 20]
# [73 76 20 20 60 73 8 60 80 73]
# [47 35 43 7 20 17 24 50 37 73]]
#
# y
# [[ 8 60 11 45 27 28 73 1 2 2]
# [ 7 20 73 45 8 60 45 73 60 45]
# [20 80 73 7 28 73 60 73 65 7]
# [73 45 8 27 73 66 8 46 27 65]
# [17 60 12 73 8 27 28 73 45 27]
# [64 17 17 46 7 20 73 60 20 80]
# [76 20 20 60 73 8 60 80 73 17]
# [35 43 7 20 17 24 50 37 73 36]]
# ```
# although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
# ---
# ## Defining the network with PyTorch
#
# Below is where you'll define the network.
#
# <img src="assets/charRNN.png" width=500px>
#
# Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
# ### Model Structure
#
# In `__init__` the suggested structure is as follows:
# * Create and store the necessary dictionaries (this has been done for you)
# * Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
# * Define a dropout layer with `dropout_prob`
# * Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
# * Finally, initialize the weights (again, this has been given)
#
# Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
# ---
# ### LSTM Inputs/Outputs
#
# You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
#
# ```python
# self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
# dropout=drop_prob, batch_first=True)
# ```
#
# where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
#
# We also need to create an initial hidden state of all zeros. This is done like so
#
# ```python
# self.init_hidden()
# ```
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob=drop_prob
self.n_layers=n_layers
self.n_hidden=n_hidden
self.lr=lr
# createing character dictionaries
self.chars=tokens
self.int2char=dict(enumerate(self.chars))
self.char2int={ch : ii for ii , ch in self.int2char.items()}
self.input_size=len(self.chars)
## TODO: define the layers of the model
# LSTM layers
self.lstm = nn.LSTM(self.input_size, self.n_hidden, self.n_layers,
dropout=self.drop_prob, batch_first=True)
# dropout layer
self.dropout=nn.Dropout(self.drop_prob)
# final fully connected layer
self.fc = nn.Linear(self.n_hidden,self.input_size)
def forward(self,x,hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state 'hidden'.
'''
## TODO: get the outpus and the new hidden state from the lstm
r_hidden,hidden=self.lstm(x,hidden)
out=self.dropout(r_hidden)
out=out.contiguous().view(-1,self.n_hidden)
out=self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self,batch_size):
''' Initializes hidden state '''
# Create tow new tensors with size n_layers x batch_size x n_hidden,
# initialized to zero , for hidden state and cell state of LSTM
weigth = next(self.parameters()).data
if (train_on_gpu):
hidden = (weigth.new(self.n_layers,batch_size, self.n_hidden).zero_().cuda(),
weigth.new(self.n_layers,batch_size,self.n_hidden).zero_().cuda())
else:
hidden = (weigth.new(self.n_layers,batch_size, self.n_hidden).zero_(),
weigth.new(self.n_layers,batch_size,self.n_hidden).zero_())
return hidden
# ## Time to train
#
# The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
#
# Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
#
# A couple of details about training:
# >* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
# * We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Argument
--------
net: charRNN netwrok
data: text data to train the network
epochs: number of epochs
batch_size: number of mini-sequences per mini batch aka batch size
seq_length: number of character stesp per mini batch
lr: learning rate
clip: gradient clipping
val_frac: fraction of data to hold out for validation
print_every: number of stesp for printing training and validation loss
'''
net.train()
opt=torch.optim.Adam(net.parameters(),lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx= int(len(data)*(1-val_frac))
data,val_data = data[:val_idx],data[val_idx:]
if train_on_gpu:
net.cuda()
counter = 0
n_chars=len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x,y in get_batches(data,batch_size,seq_length):
counter+=1
# one-hot encode our data and make them torch tensors
x =one_hot_encode(x,n_chars)
inputs,targets=torch.from_numpy(x), torch.from_numpy(y)
if train_on_gpu:
inputs,targets=inputs.cuda(),targets.cuda()
# creating new variable for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output,h=net(inputs,h)
# caclulate the loss and perform backprop
loss= criterion(output,targets.view(batch_size*seq_length))
loss.backward()
# 'clip_grad_norm' helps prevent the exploding gradient problem in RNNs/ LSTMs.
nn.utils.clip_grad_norm_(net.parameters(),clip)
opt.step()
# loss stats
if counter % print_every ==0:
# get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x,y in get_batches(val_data,batch_size,seq_length):
# one-hot encode our data and make them torch tensors
x=one_hot_encode(x,n_chars)
x,y = torch.from_numpy(x), torch.from_numpy(y)
# createing new variables for the hidden state otehrwisw
# we'd backdrop through the entire training history
val_h=tuple([each.data for each in val_h])
inputs, target = x,y
if train_on_gpu:
inputs,targets=inputs.cuda(),targets.cuda()
output,val_h= net(inputs,val_h)
val_loss=criterion(output,targets.view(batch_size*seq_length))
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg throug validation data
print("Epcoh: {}/{}...".format(e+1,epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:4f}".format(np.mean(val_losses))
)
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length).long())
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
# ## Instantiating the model
#
# Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
# +
## TODO: set you model hyperparameters
# define and print the net
n_hidden=512
n_layers=2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
# -
# ### Set your training hyperparameters!
# +
batch_size = 128
seq_length = 100
n_epochs = 5 # start small if you are just testing initial behavior
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
# -
# ## Getting the best model
#
# To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
# ## Hyperparameters
#
# Here are the hyperparameters for the network.
#
# In defining the model:
# * `n_hidden` - The number of units in the hidden layers.
# * `n_layers` - Number of hidden LSTM layers to use.
#
# We assume that dropout probability and learning rate will be kept at the default, in this example.
#
# And in training:
# * `batch_size` - Number of sequences running through the network in one pass.
# * `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
# * `lr` - Learning rate for training
#
# Here's some good advice from <NAME> on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
#
# > ## Tips and Tricks
#
# >### Monitoring Validation Loss vs. Training Loss
# >If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
#
# > - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
# > - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
#
# > ### Approximate number of parameters
#
# > The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
#
# > - The number of parameters in your model. This is printed when you start training.
# > - The size of your dataset. 1MB file is approximately 1 million characters.
#
# >These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
#
# > - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
# > - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
#
# > ### Best models strategy
#
# >The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
#
# >It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
#
# >By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
# ## Checkpoint
#
# After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
# +
# change the name, for saving multiple files
model_name = 'rnn_x_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
# -
# ---
# ## Making Predictions
#
# Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
#
# ### A note on the `predict` function
#
# The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
#
# > To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
#
# ### Top K sampling
#
# Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
#
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
# ### Priming and generating text
#
# Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
# ## Loading a checkpoint
# +
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_x_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# -
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
|
recurrent-neural-networks/char-rnn/Character_Level_RNN_Exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import confusion_matrix
import nltk
from nltk.corpus import stopwords
import string
# -
data = pd.read_csv('C:/Users/sk@softech/Desktop/mlDataSet/spam_or_not_spam.csv')
data.head()
data.shape
data.columns
data.drop_duplicates(inplace=True)
data.isnull().sum()
#Tokenization (a list of tokens), will be used as the analyzer
#1.Remove punctuation
#2.Remove stopwords.
#3. Return list of clean text words
def process_text(text):
#1
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
#2
clean_words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#3
return clean_words
from sklearn.feature_extraction.text import CountVectorizer
msg = CountVectorizer(analyzer=process_text).fit_transform(data['email'])
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(msg, data['label'], test_size = 0.20, random_state = 1)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression( random_state = 0).fit(X_train, Y_train)
# +
#Print the predictions
print(log_reg.predict(X_train))
#Print the actual values
print(Y_train.values)
# -
#Evaluate the model on the training data set
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score
pred = log_reg.predict(X_train)
print(classification_report(Y_train ,pred ))
print('Confusion Matrix: \n',confusion_matrix(Y_train,pred))
print()
print('Accuracy: ', accuracy_score(Y_train,pred))
# +
#Print the predictions
print('Predicted value: ',log_reg.predict(X_test))
#Print Actual Label
print('Actual value: ',Y_test.values)
# +
#Evaluate the model on the test data set
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score
pred = log_reg.predict(X_test)
print(classification_report(Y_test ,pred ))
print('Confusion Matrix: \n', confusion_matrix(Y_test,pred))
print()
print('Accuracy: ', accuracy_score(Y_test,pred))
# -
|
4thAssesed.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from hwt.synthesizer.interface import Interface
from hwt.interfaces.std import VectSignal, Signal
from hwt.synthesizer.unit import Unit
from hwt.hdl.constants import DIRECTION
from hwtLib.amba.axis import AxiStreamAgent
# An Interface is a base class of HWT interfaces, it's usage is straight forward
# Note that hwtLib.amba.axis already contains definition of AxiStream
class AxiStream(Interface):
"""
An example of interface definition in HWT
(AMBA4 AXI-4 Stream https://static.docs.arm.com/ihi0051/a/IHI0051A_amba4_axi4_stream_v1_0_protocol_spec.pdf)
"""
def _declr(self):
"""
Interface has the _declr() method with same meaning as Unit._declr shown in previous tutorial
it is the place where publically visible interfaces should be declared.
"""
DATA_WIDTH = 64
# self.<interface name> = <interface object>
self.data = VectSignal(DATA_WIDTH)
self.strb = VectSignal(DATA_WIDTH//8)
self.last = Signal()
self.valid = Signal()
self.ready = Signal(masterDir=DIRECTION.IN) # ready will be input to master
def _getIpCoreIntfClass(self):
"""
An optional method where you can override how the interface should be represented in exported IP-cores
"""
return IP_AXIStream
def _initSimAgent(self, sim):
"""
An optional method where you can override the simulation agent used is simulation
to read/write from/to interface
"""
# the ._ag has to be specified otherwise the simulator won't be able to communicate
# with the circuit if this inteface is on top component
self._ag = AxiStreamAgent(sim, self)
from hwt.interfaces.utils import addClkRstn
# simple wire with our interface
class AxiStreamWire(Unit):
def _declr(self):
# addClkRstn just adds self.clk = Clk(); self.rst_n = Rst_n()
# we are adding it because the AxiStreamAgent needs it as AxiStream is synchronous interface
addClkRstn(self)
self.a = AxiStream()
self.b = AxiStream()._m()
def _impl(self):
self.b(self.a)
class AxiStreamWireSignalBySignal(AxiStreamWire):
def _impl(self):
a, b = self.a, self.b
b.data(a.data)
b.strb(a.strb)
b.last(a.last)
b.valid(a.valid)
# note that there the direction is reversed
# because the direction in AxiStream definition is reversed as well
a.ready(b.ready)
class AxiStreamWireSignalBySignal2(AxiStreamWire):
def _impl(self):
# each Interface/Unit instance has _interface list of children which can be
# used for introspection etc.
for a, b in zip(self.a._interface, self.b._interface):
if a is self.a.ready:
a(b)
else:
b(a)
# +
from hwt.simulator.simTestCase import SimTestCase
from hwtSimApi.constants import CLK_PERIOD
from pyMathBitPrecise.bit_utils import mask
# An example simulation with our interface
class AxiStreamWireWireTC(SimTestCase):
def test_simple(self):
u = AxiStreamWire()
self.compileSimAndStart(u)
# The data format depends on implementation of simulation agent
# and it can be found in documentation of the agent.
# It is usually a tuple of integers corresponding to values of signals
# in order in which they are defined.
inputData = [
# (data, strb, last) as AxiStreamAgent requires
(i, mask(64//8), 1)
for i in range(5)
]
u.a._ag.data.extend(inputData)
self.runSim(10 * CLK_PERIOD)
self.assertValSequenceEqual(u.b._ag.data, inputData)
# +
from jupyter_widget_hwt import HwtSignalDumpWidget
selected_test = AxiStreamWireWireTC('test_simple')
trace = HwtSignalDumpWidget(selected_test, width=1000, height=500)
display(trace)
# -
|
examples/hwt_tutorial_2_custom_interface.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp features
# default_cls_lvl 2
# -
# %load_ext autotime
# # Feature Engineering und Target Variablen
# > In diesem Modul werden zusätzliche Features berechnet und die Zielvariablen sowie Test und Train split definiert.
#
# Laut [Kaggle Data Description](https://www.kaggle.com/c/santander-product-recommendation/data) haben wir 1.5 Jahre an Daten. Das besagte Ziel ist es die zusätzlich erworbenen Produkte vorherzusagen für die Periode 2016-05-28. Dies ist eines der wichtigsten Schritte im ganzen ML Prozess. Für diesen PoC halten wir das ganze Recht einfach. In eine echten Projekt würden wir zusätzlich folgende Schritte machen:
#
# - entwicklen von zusätzlichen Featueres wie z.B. relatives Einkommen zur Altersgruppe und Lokation
# - berechnen von Differenzen zum Vormonat bzw. Vormonaten
# - mehrere Datasets definieren für ein Ensemble von Modellen.
#
#
# Das Ziel dieser Kaggle Challenge ist es die neuen Produkte für die Periode 2016-06-28 vorherzusagen. Dazu haben sie das Datenset in ein Testset (Grunddaten von der Periode 2016-06-28) und Trainingset (Daten von 2015-01-28 bis 2016-05-28) aufgeteilt. Leider kennen wir die wahren Werte von dem Testset der Periode 2016-06-28 nicht, weshalb wir diesen Datenpunkt ignorieren werden und Train und Testset wie folgt aufteilen werden.
# 
#export
import pandas as pd
import numpy as np
from fastscript import *
#export
def load_data(path='data/interim/02_train.csv'):
"""load data"""
return pd.read_csv(path)
data = load_data()
data.head(10)
# +
#export
target_cols = ['ind_ahor_fin_ult1','ind_aval_fin_ult1','ind_cco_fin_ult1',
'ind_cder_fin_ult1','ind_cno_fin_ult1','ind_ctju_fin_ult1',
'ind_ctma_fin_ult1','ind_ctop_fin_ult1','ind_ctpp_fin_ult1',
'ind_deco_fin_ult1','ind_deme_fin_ult1','ind_dela_fin_ult1',
'ind_ecue_fin_ult1','ind_fond_fin_ult1','ind_hip_fin_ult1',
'ind_plan_fin_ult1','ind_pres_fin_ult1','ind_reca_fin_ult1',
'ind_tjcr_fin_ult1','ind_valo_fin_ult1','ind_viv_fin_ult1',
'ind_nomina_ult1','ind_nom_pens_ult1','ind_recibo_ult1']
target_cols.remove('ind_ahor_fin_ult1') #wenig häufig
target_cols.remove('ind_aval_fin_ult1') #wenig häufig
feature_cols = ['ind_empleado', 'sexo', 'age', 'renta', 'ind_nuevo',
'indrel', 'indrel_1mes', 'tiprel_1mes', 'indresi', 'indext',
'conyuemp', 'indfall', 'tipodom', 'ind_actividad_cliente',
'segmento', 'antiguedad', 'pais_residencia', 'canal_entrada']
# -
data[target_cols].sum()
# ## calculate target vars
#export
def calculate_targets(df:pd.DataFrame,
feature_cols:list=feature_cols,
target_products:list=target_cols,
remove_wrong_shift_entries=True,
shift_periods=1):
"""add the shifted product values and calculate target variables"""
df.sort_values(by = ['id', 'month_int'], inplace=True) #sort by id then by month_int
df['id_shift'] = df['id'].shift(shift_periods).fillna(0).astype(np.int32)
idx_to_remove = ((df['id'] - df['id_shift']) != 0) #store index unwanted entries
#add shifted target colums
for col in target_products:
name = col + '_s'
df[name] = df[col].shift(shift_periods).fillna(0).astype(np.int8)
df.loc[idx_to_remove, name] = 0 #set to 0 so that the difference works out
# set 1 only for added products not for existing products
for col in target_products:
df[col] = (df[col] - df[col + '_s']).astype(np.int8)
df[col] = (df[col] > 0).astype(np.int8)
if remove_wrong_shift_entries:
df = df[idx_to_remove == False] #remove illogical results
return df
df1 = calculate_targets(data)
df_s_12 = calculate_targets(data, shift_periods = 12)
print(len(df1))
print(len(df_s_12))
assert len(df1) > len(df_s_12)
assert min(df_s_12.month_int.unique()) == 13 #first month 1 + 12 shift
assert min(calculate_targets(data, shift_periods = 6).month_int.unique()) == 7
assert 'ind_ctma_fin_ult1_s' in df1.columns #shifted columns
#export
def remove_rows_without_product(df:pd.DataFrame, target_products:list=target_cols):
"""removes all rows from a train set without new products"""
return df[df[target_products].sum(axis=1) != 0] #entfernen von Zeilen ohne neue Produkte
df2 = remove_rows_without_product(df1)
assert len(df2) < len(df1)
#export
def calculate_target_as_one_column(df:pd.DataFrame, feature_cols:list, target_cols:list):
"""create a row for every new porduct and give the product name as target column, this is done for the train set"""
x = df[target_cols]
x = x[x==1].stack().reset_index().drop(0,1)
df = pd.merge(df, x, left_on=df.index, right_on='level_0')
df.rename(columns={'level_1': "y"}, inplace=True)
keep_cols = feature_cols.copy()
keep_cols += [ col for col in df if col[-2:] == '_s'] # keep also shifted columns
keep_cols.append('month_int')
keep_cols.append('id') #keep id
keep_cols.append('y') #keep target var
return df[keep_cols]
df3 = calculate_target_as_one_column(df2, feature_cols, target_cols)
assert 'y' in df3.columns
assert len(df3) > len(df2) #additional rows as there are more than one product per month
# ## train_test_split
#export
def get_last_month_test_set(df:pd.DataFrame, feature_cols=feature_cols, target_cols=target_cols):
"""calculates test set for all new products in the last month"""
df = df.loc[df.month_int >= 16,].copy()
df = calculate_targets(df, feature_cols, target_cols, shift_periods=1, remove_wrong_shift_entries=False)
df = df.loc[df.month_int == 17]
return df
df4_test = get_last_month_test_set(data)
assert df4_test.month_int.unique()[0] == 17
assert data[data.month_int == 17]['month_int'].count() == len(df4_test)
#export
def get_train_set(df:pd.DataFrame, feature_cols=feature_cols, target_cols=target_cols, product_shift=1):
"""retunrs the trainset. Take care that you don't include the test data to this function"""
train = calculate_targets(df.loc[df.month_int <= 16,].copy(),
feature_cols, target_cols,
shift_periods=product_shift)
train = remove_rows_without_product(train, target_cols)
train = calculate_target_as_one_column(train, feature_cols, target_cols)
return train
df5_train = get_train_set(data)
assert 'y' in df5_train.columns
assert 'id' in df5_train.columns
assert df5_train.month_int.max() == 16
df5_train
#export
@call_parse
def calculate_main(source:Param("source csv file", str)='data/interim/02_train.csv',
dest_train:Param("destination train csv file", str)='data/interim/03_train.csv',
dest_test:Param("destination test csv file", str)='data/interim/03_test.csv',
shift_periods:Param("how many periods to shift target_vars", str) = 1):
"""calculate target variables and delayed product features"""
data = load_data(source)
train = get_train_set(data, product_shift=shift_periods)
test = get_last_month_test_set(data)
train.to_csv(dest_train, index=False)
test.to_csv(dest_test, index=False)
return (train, test)
#slow
train, test = calculate_main()
train12, test12 = calculate_main(
dest_train='data/interim/03_train_shift12.csv',
dest_test='data/interim/03_test_shift12.csv',
shift_periods=12)
train6, test6 = calculate_main(
dest_train='data/interim/03_train_shift6.csv',
dest_test='data/interim/03_test_shift6.csv',
shift_periods=6)
#slow
pd.set_option('display.max_columns', None)
train
#slow
test
print(train.month_int.min())
print(train6.month_int.min())
print(train12.month_int.min())
print(len(train))
print(len(train6))
print(len(train12))
# ## Export
from nbdev.export import *
notebook2script()
len(train)
|
03_features.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GWEKvPCCxJke"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="l-m8KQ-nxK5l"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="O8FuVCLYxi_l"
# # TensorFlow Addons 이미지: 연산
#
# <table class="tfo-notebook-buttons" align="left">
# <td><a target="_blank" href="https://www.tensorflow.org/addons/tutorials/image_ops"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/addons/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab에서 실행하기</a></td>
# <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/addons/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서소스 보기</a></td>
# <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/addons/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td>
# </table>
# + [markdown] id="2a5ksOt-xsOl"
# ## 개요
#
# 이 노트북은 TensorFlow Addons에서 일부 이미지 연산을 사용하는 방법을 보여줍니다.
#
# 예제에서 다룰 이미지 연산의 목록은 다음과 같습니다.
#
# - `tfa.image.mean_filter2d`
#
# - `tfa.image.rotate`
#
# - `tfa.image.transform`
#
# - `tfa.image.random_hsv_in_yiq`
#
# - `tfa.image.adjust_hsv_in_yiq`
#
# - `tfa.image.dense_image_warp`
#
# - `tfa.image.euclidean_dist_transform`
# + [markdown] id="DMbjxr4PyMPF"
# # 설정
# + id="o_QTX_vHGbj7"
# !pip install -U tensorflow-addons
# + id="5hVIKCrhWh4a"
import tensorflow as tf
import numpy as np
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
# + [markdown] id="Q6Z2rsP8yp2v"
# # 이미지 준비 및 검사
# + [markdown] id="9gbgJP10z9WO"
# ## 이미지 다운로드하기
# + id="IgUsVhBQ6dSg"
img_path = tf.keras.utils.get_file('tensorflow.png','https://tensorflow.org/images/tf_logo.png')
# + [markdown] id="uheQOL-y0Fj3"
# ## 이미지 검사하기
# + [markdown] id="MFGirRRZ0Y9k"
# ### TensorFlow 아이콘
# + id="NRlvNQdm1YI8"
img_raw = tf.io.read_file(img_path)
img = tf.io.decode_image(img_raw)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [500,500])
plt.title("TensorFlow Logo with shape {}".format(img.shape))
_ = plt.imshow(img)
# + [markdown] id="clXQrFVa2nN7"
# ### 흑백 버전 만들기
# + id="tbaIkUCS2eNv"
bw_img = 1.0 - tf.image.rgb_to_grayscale(img)
plt.title("Mask image with shape {}".format(bw_img.shape))
_ = plt.imshow(bw_img[...,0], cmap='gray')
# + [markdown] id="UwqfpOm--vV2"
# # tfa.image 다루기
# + [markdown] id="jIa5HnomPds3"
# ## Mean filtering
#
# 평균 필터링은 이미지 또는 신호에서 노이즈를 제거하는 데 자주 사용되는 필터링 기술입니다. 이미지를 픽셀 단위로 실행하고 인접 픽셀의 평균값으로 대체하는 것입니다.
# + id="SutWnbRoHl6i"
mean = tfa.image.mean_filter2d(img, filter_shape=11)
_ = plt.imshow(mean)
# + [markdown] id="Mp6cU7I0-r2h"
# ## Rotate
#
# 이 연산은 사용자가 입력한 각도(라디안)로 주어진 이미지를 회전합니다.
# + id="9kxUES9sM8Jl"
rotate = tfa.image.rotate(img, tf.constant(np.pi/8))
_ = plt.imshow(rotate)
# + [markdown] id="WjMdSDKlBcPh"
# ## Transform
#
# 이 연산은 사용자가 제공한 변환 벡터를 기반으로 주어진 이미지를 변환합니다.
# + id="HTh1Qpps8Rg5"
transform = tfa.image.transform(img, [1.0, 1.0, -250, 0.0, 1.0, 0.0, 0.0, 0.0])
_ = plt.imshow(transform)
# + [markdown] id="O79BrK-bC8oh"
# ## Random HSV in YIQ
#
# 이 연산은 주어진 RGB 이미지의 색상 스케일을 YIQ로 변경하지만, 여기에서 델타 색조 및 채도 값은 주어진 범위에서 무작위로 선택됩니다.
# + id="zZBI-9XvBSuh"
delta = 0.5
lower_saturation = 0.1
upper_saturation = 0.9
lower_value = 0.2
upper_value = 0.8
rand_hsvinyiq = tfa.image.random_hsv_in_yiq(img, delta, lower_saturation, upper_saturation, lower_value, upper_value)
_ = plt.imshow(rand_hsvinyiq)
# + [markdown] id="ruyvVnmCDBgj"
# ## Adjust HSV in YIQ
#
# 이 연산은 주어진 RGB 이미지의 색상 스케일을 YIQ로 변경하지만, 여기에서는 임의로 선택하는 대신 델타 색조 및 채도 값이 사용자로부터 입력됩니다.
# + id="vbCdwGtYChnQ"
delta = 0.5
saturation = 0.3
value = 0.6
adj_hsvinyiq = tfa.image.adjust_hsv_in_yiq(img, delta, saturation, value)
_ = plt.imshow(adj_hsvinyiq)
# + [markdown] id="fdbCDYJkG8Gv"
# ## Dense Image Warp
#
# 이 연산은 오프셋 벡터의 flow 필드에서 지정된 이미지의 비선형 왜곡을 위한 것입니다(여기에서는 예를 들어, 임의의 값 사용).
# + id="dG557eQDDtSK"
input_img = tf.image.convert_image_dtype(tf.expand_dims(img, 0), tf.dtypes.float32)
flow_shape = [1, input_img.shape[1], input_img.shape[2], 2]
init_flows = np.float32(np.random.normal(size=flow_shape) * 2.0)
dense_img_warp = tfa.image.dense_image_warp(input_img, init_flows)
dense_img_warp = tf.squeeze(dense_img_warp, 0)
_ = plt.imshow(dense_img_warp)
# + [markdown] id="FcLMnSKYPcjA"
# ## Euclidian Distance Transform
#
# 이 연산은 전경 픽셀에서 배경 픽셀까지의 유클리드 거리로 픽셀값을 업데이트합니다.
#
# - 참고: 이진 이미지만 취하여 이미지로 변환됩니다. 다른 이미지가 주어지면 단일 값을 가진 이미지로 변환됩니다.
# + id="-OMh6oeRQaYQ"
gray = tf.image.convert_image_dtype(bw_img,tf.uint8)
# The op expects a batch of images, so add a batch dimension
gray = tf.expand_dims(gray, 0)
eucid = tfa.image.euclidean_dist_transform(gray)
eucid = tf.squeeze(eucid, (0, -1))
_ = plt.imshow(eucid, cmap='gray')
|
site/ko/addons/tutorials/image_ops.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/vitaltavares/MQP2019/blob/master/Vital/Restructure_w_Implied_Vol_Surface.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="89t4J6Dmd-Eo"
# # BSM formula
#
# ## Abstract
#
# - create GBM class
# - define a method for BSM formula for a given option type
# + [markdown] colab_type="text" id="y5hyO8FseuLn"
# ## Analysis
#
# BS model assumes the distribution of stock as lognormal. In particular, it writes
# $$\ln \frac{S(T)}{S(0)} \sim \mathcal N((r - \frac 1 2 \sigma^2) T, \sigma^2 T)$$
# with respect to risk neutral measure. In the above, the parameters stand for
#
# * $S(0)$: The initial stock price
# * $S(T)$: The stock price at $T$
# * $r$: interest rate
# * $\sigma$: volatility
#
#
# + [markdown] colab_type="text" id="4BEWnmSve9oM"
#
# The call and put price with t $T$ and $K$ will be known as $C_0$ and $P_0$ given as below:
# $$C_0 = \mathbb E [e^{-rT} (S(T) - K)^+] = S_0 \Phi(d_1) - K e^{-rT} \Phi(d_2),$$
# and
# $$P_0 = \mathbb E [e^{-rT} (S(T) - K)^-] = K e^{-rT} \Phi(- d_2) - S_0 \Phi(- d_1),$$
# where $d_i$ are given as
# $$d_1 = \frac{1}{\sigma\sqrt{\left( T - t \right)}}
# \left[
# \ln\frac{S_{0}}{K}
# +
# \left(
# r + \frac{\sigma^2}{2}
# \right)
# \left(
# T-t
# \right)
# \right],$$
# and
# $$d_2 = \frac{1}{\sigma\sqrt{\left( T - t \right)}}
# \left[
# \ln\frac{S_{0}}{K}
# +
# \left(
# r - \frac{\sigma^2}{2}
# \right)
# \left(
# T-t
# \right)
# \right] = d_{1}-\sigma\sqrt{\left( T - t \right)}$$
#
# From $\textit{Stochastic Calculus for Finance II Continuous Time Models}$ by Shreve
#
#
# Put-call parity will be useful:
# $$C_0 - P_0 = S(0) - e^{-rT} K.$$
#
# + [markdown] colab_type="text" id="mewOxcQJfFnT"
# ## Code
# + colab_type="code" id="RXd_brmsfEs9" colab={}
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# + [markdown] colab_type="text" id="M40EwMCkfS21"
# We reload the european option class created before.
# + colab_type="code" id="czvpqtvId_3D" colab={}
class VanillaOption:
"""
:param S: underlying
:param K: strike price
:param r: rate
:param t: time to expiration
:param vol: volatility
"""
def __init__(self,S = 100,K = 100, t= 0.5, r = 0.019, vol = 0.217,otype = 1):
self.otype = otype
self.K = K
self.S = S
self.t = t
self.r = r
self.vol = vol
def payoff(self, S): #S: excercise price
otype = self.otype
S = self.strike
K = self.K
t = self.t
return np.max([0, (S - K)*otype])
def bsm_price(self):
otype = self.otype
S = self.S
K = self.K
t = self.t
vol = self.vol
r = self.r
d1 = 1/(vol * np.sqrt(t))*(np.log(S/K) + (r + np.power(vol,2)/2)*(t)) # Need to be working with t minus current time t
d2 = d1 - (vol * np.sqrt(t)) # But how do we get the current time t
return (otype * S * ss.norm.cdf(otype * d1) - otype * K * np.exp(-r * t) * ss.norm.cdf(otype * d2))
def delta(self):
otype = self.otype
t = self.t
k = self.K
S = self.S
vol = self.vol
r = self.r
d1 = 1/(vol*np.sqrt(t))*(np.log(S/k) + (r + np.power(vol,2)/2)*(t))
if otype == 1:
return ss.norm.cdf(d1)
else:
return ss.norm.cdf(d1) - 1
def gamma(self):
otype = self.otype
t = self.t
k = self.K
S = self.S
vol = self.vol
r = self.r
d1 = 1/(vol*np.sqrt(t))*(np.log(S/k) + (r + np.power(vol,2)/2)*(t))
return ss.norm.pdf(d1) / (S * vol * np.sqrt(t))
def vega(self):
otype = self.otype
t = self.t
k = self.K
S = self.S
vol = self.vol
r = self.r
d1 = 1/(vol*np.sqrt(t))*(np.log(S/k) + (r + np.power(vol,2)/2)*(t))
return ss.norm.pdf(d1) * np.sqrt(t)* S/ 100.0
def theta(self):
otype = self.otype
t = self.t
k = self.K
S = self.S
vol = self.vol
r = self.vol
d1 = 1/(vol*np.sqrt(t))*(np.log(S/k) + (r + np.power(vol,2)/2)*(t))
d2 = 1/(vol*np.sqrt(t))*(np.log(S/k) + (r - np.power(vol,2)/2)*(t))
if otype == 1:
return (-S * ss.norm.pdf(d1) * vol / (2 * np.sqrt(t))) - (r * k * np.exp(-r * t) * ss.norm.cdf(d2))
else:
return (-S * ss.norm.pdf(d1) * vol / (2 * np.sqrt(t))) + (r * k * np.exp(-r * t) * ss.norm.cdf(-d2))
def rho(self):
otype = self.otype
t = self.t
k = self.K
S = self.S
vol = self.vol
r = self.r
d2 = 1/(vol*np.sqrt(t))*(np.log(S/k) + (r - np.power(vol,2)/2)*(t))
if otype == 1:
return k * ss.norm.cdf(d2) * t * np.exp(-r * t)
else:
return -k * ss.norm.cdf(-d2) * t * np.exp(-r * t)
# + [markdown] colab_type="text" id="rdPRhkW0fhkn"
# Next, we create the gbm class, which is
# determined by three parameters. We shall initialize it
# as it is created.
# + [markdown] colab_type="text" id="6qcWtlDCgAO9"
# BSM formula is given by a method of Gbm class with an input of an option.
# + colab_type="code" id="iDswnsxjf_h5" outputId="35a74b95-5207-4fc4-d5d2-a75f8fe512ed" colab={"base_uri": "https://localhost:8080/", "height": 51}
'''===============
Test bsm_price
================='''
# At the money option w/ 6 months to expiry
option1 = VanillaOption(S= 100, K=100, t = 0.5, vol = 0.217, r = 0.019)
print('>>>>>>>>>>call value is ' + str(option1.bsm_price()))
option2 = VanillaOption(S= 100, K=100, t = 0.5, vol = 0.217, r = 0.019, otype=-1)
print('>>>>>>>>>>put value is ' + str(option2.bsm_price()))
# + id="WISFMhyayC2v" colab_type="code" outputId="535961d1-785a-423f-c272-57589f89fe94" colab={"base_uri": "https://localhost:8080/", "height": 153}
'''===============
Test Arbitrage Free Model
================='''
#Delta
call_delta = option1.delta()
put_delta = option2.delta()
print(">>>>>>The Call Delta is " + str(call_delta))
print(">>>>>>The Put Delta is " + str(put_delta))
#Gamma
option_gamma = option1.gamma()
print(">>>>>>The Option's Gamma is " + str(option_gamma))
#Vega
option_vega = option1.vega()
print(">>>>>>The Option's Vega is " + str(option_vega))
#Theta
call_theta = option1.theta()
put_theta = option2.theta()
print(">>>>>>The Call Theta is " + str(call_theta))
print(">>>>>>The Put Theta is " + str(put_theta))
#Rho
call_rho = option1.rho()
put_rho = option2.rho()
print(">>>>>>The Call Rho is " + str(call_rho))
print(">>>>>>The Put Rho is " + str(put_rho))
# + colab_type="code" id="BrvYN7v0gWK5" colab={}
###############
# Arbitrage-Free Model Object-Oriented
#############
class ArbitrageFree:
def pc_parity(self, call_option, put_option):
call_price = call_option.bsm_price()
put_price = put_option.bsm_price()
k = call_option.K #Note: Put and Call with same strike k
r = call_option.r #and interest r
t = call_option.t #and t
s = call_option.S
#give some space for machine precision error
if call_price - put_price + np.exp(-r*t) - s<= 10^(-10):
return ">>>>>>>>>Option is arbitrage-free"
else:
return ">>>>>>>>>Option is not arbitrage-free"
# + [markdown] id="L9ftNyq1CeSt" colab_type="text"
#
# + id="yJecYrdO4pWp" colab_type="code" outputId="8843b0d7-6a7a-4dc4-b647-f7afbba18b0c" colab={"base_uri": "https://localhost:8080/", "height": 34}
'''===============
Test Arbitrage Free
================='''
arbFree = ArbitrageFree()
arbFree.pc_parity(option1,option2)
# + id="fljlDrT_zU9C" colab_type="code" colab={}
class ImpliedVolatility:
def newtonImpliedVolCalc(self, vanillaoption, marketprice):
otype = vanillaoption.otype
t = vanillaoption.t
K=vanillaoption.K
r = vanillaoption.r
vol = vanillaoption.vol
S = vanillaoption.S
tolerance = 0.000001
x0 = vol
xnew = x0
xold = x0 - 1
while abs(xnew - xold) > tolerance:
d1 = 1/(xnew*np.sqrt(t))*(np.log(S/K) + (r + np.power(xnew,2)/2)*(t))
d2 = d1 - (xnew*np.sqrt(t))
optionprice = otype * S * ss.norm.cdf(otype * d1) - otype * K * np.exp(-r * t) * ss.norm.cdf(otype * d2)
vega = ss.norm.pdf(d1) * np.sqrt(t)* S/ 100.0
xold = xnew
xnew = xnew - ((optionprice - marketprice) / (vega))
return abs(xnew)
# + id="6FeVJquU2RIv" colab_type="code" outputId="9fae0abd-53ab-448d-b555-32178e4c505f" colab={"base_uri": "https://localhost:8080/", "height": 51}
impliedVol = ImpliedVolatility()
call_implied_vol = impliedVol.newtonImpliedVolCalc(option1,6.5706)
print("Call implied vol is", call_implied_vol)
put_implied_vol = impliedVol.newtonImpliedVolCalc(option2, 5.6254)
print("Put implied vol is",put_implied_vol)
# + id="WUxGg_uncjRB" colab_type="code" colab={}
marketpriceCall = 6.57
marketpricePut = 5.62
impliedVolCallArray = []
for i in range(int(100*(marketpriceCall-2.0)), int(100*(marketpricePut+2.0)), 1):
x_call_array = []
for j in range(int(100*(0.01)),int(100*(2.51)),1):
s = i * (0.01)
t = j * (0.01)
optionImpVol = VanillaOption(t = t)
x_call_array.append(impliedVol.newtonImpliedVolCalc(optionImpVol, s))
impliedVolCallArray.append(x_call_array)
impliedVolPutArray = []
for i in range(int(100*(marketpricePut-3.0)), int(100*(marketpricePut+3.0)), 1):
x_put_array = []
for j in range(int(100*(0.01)),int(100*(2.5)),1):
s = i * (0.01)
t = j * (0.01)
optionImpVolPut = VanillaOption(t = t, otype = -1)
x_put_array.append(impliedVol.newtonImpliedVolCalc(optionImpVolPut, s))
impliedVolPutArray.append(x_put_array)
# + id="frS3wKGy-IgF" colab_type="code" outputId="0d99a904-916c-463d-ea1d-0fab9ec74030" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(len(impliedVolCallArray))
print(len(impliedVolCallArray[0]))
|
Vital/Restructure_w_Implied_Vol_Surface.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a name="top"></a>
# <div style="width:1000 px">
#
# <div style="float:right; width:98 px; height:98px;">
# <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/src/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
# </div>
#
# <h1>Advanced Surface Observations: Working with Mesonet Data</h1>
# <h3>Unidata Python Workshop</h3>
#
# <div style="clear:both"></div>
# </div>
#
# <hr style="height:2px;">
#
# <div style="float:right; width:250 px"><img src="http://weather-geek.net/images/metar_what.png" alt="METAR" style="height: 200px;"></div>
#
# ### Questions
# 1. How do I read in complicated mesonet data with Pandas?
# 1. How do I merge multiple Pandas DataFrames?
# 1. What's the best way to make a station plot of data?
# 1. How can I make a time series of data from one station?
#
# ### Objectives
# 1. <a href="#reading">Read Mesonet data with Pandas</a>
# 2. <a href="#merge">Merge multiple Pandas DataFrames together </a>
# 3. <a href="#plot">Plot mesonet data with MetPy and CartoPy</a>
# 4. <a href="#timeseries">Create time series plots of station data</a>
# <a name="reading"></a>
# # Reading Mesonet Data
# In this notebook, we're going to use the Pandas library to read text-based data. Pandas is excellent at handling text, csv, and other files. However, you have to help Pandas figure out how your data is formatted sometimes. Lucky for you, mesonet data frequently comes in forms that are not the most user-friendly. Through this notebook, we'll see how these complicated datasets can be handled nicely by Pandas to create useful station plots for hand analysis or publication.
# Import Pandas
import pandas as pd
# ### West Texas Mesonet
# The [West Texas Mesonet](http://www.depts.ttu.edu/nwi/research/facilities/wtm/index.php) is a wonderful data source for researchers and storm chasers alike! We have some 5-minute observations from the entire network on 22 March 2019 that we'll analyze in this notebook.
# Pandas can parse time into a nice internal storage format as we read in the file. If the time is specified in the file in a somewhat standard form, pandas will even guess at the format if you tell it which column to use. However, in this case the time is reported in a horrible format: between one and four characters that, if there are four characters, represent hours and minutes as HHMM. Let's turn take a charater string, turn it into an integer, and then use integer string formatting to write out a four character string.
for t in ['0', '05', '100', '1005']:
print('{0:04d}'.format(int(t)))
# Pandas can be told how to parse non-standard dates formats by writing an arbitrary function that takes a string and returns a datetime. Here's what that function looks like in this case. We can use timedelta to convert hours and minutes, and then add them to the start date using date math.
def parse_tx_date(v, start_date=None):
s = '{0:04d}'.format(int(v)) # regularize the data to a four character string
hour = pd.to_timedelta(int(s[0:2]), 'hour')
minute = pd.to_timedelta(int(s[2:4]), 'minute')
return start_date + hour + minute
# +
# Read in the data and handle the lines that cause issues
# Get a nice date variable cooresponding to the start time
start_date = pd.datetime.strptime('2019-03-22', '%Y-%m-%d')
print(start_date)
# Pre-apply the start date to our date parsing function, so that pandas only passes one value
from functools import partial
date_parser = partial(parse_tx_date, start_date=start_date)
filename = 'West_Texas_data/FIVEMIN_82.txt'
tx_data = pd.read_csv(filename, delimiter=',', header=None, error_bad_lines=False, warn_bad_lines=False,
parse_dates=[2], date_parser=date_parser
)
tx_data
# -
# Rename columns to be understandable
tx_data.columns = ['Array_ID', 'QC_flag', 'Time', 'Station_ID', '10m_scalar_wind_speed',
'10m_vector_wind_speed', '10m_wind_direction',
'10m_wind_direction_std', '10m_wind_speed_std',
'10m_gust_wind_speed', '1.5m_temperature',
'9m_temperature', '2m_temperature',
'1.5m_relative_humidity', 'station_pressure', 'rainfall',
'dewpoint', '2m_wind_speed', 'solar_radiation']
tx_data
# The West Texas mesonet provides data on weather, agriculture, and radiation. These different observations are encoded 1, 2, and 3, respectively in the Array ID column. Let's parse out only the meteorological data for this exercise.
# Remove non-meteorological rows
tx_data = tx_data[tx_data['Array_ID'] == 1]
tx_data
# Station pressure is 600 hPa lower than it should be, so let's correct that as well!
# Correct presssure
tx_data['station_pressure'] += 600
tx_data['station_pressure']
# Finally, let's read in the station metadata file for the West Texas mesonet, so that we can have coordinates to plot data later on.
tx_stations = pd.read_csv('WestTexas_stations.csv')
tx_stations
# ### Oklahoma Data
# Try reading in the Oklahoma Mesonet data located in the `201903222300.mdf` file using Pandas. Check out the documentation on Pandas if you run into issues! Make sure to handle missing values as well. Also read in the Oklahoma station data from the `Oklahoma_stations.csv` file. Only read in the station ID, latitude, and longitude columns from that file.
# Your code here
def parse_ok_date(v, start_date=None):
s = '{0:04d}'.format(int(v)) # regularize the data to a four character string
minute = pd.to_timedelta(int(s), 'minute')
return start_date + minute
# # %load solutions/read_ok.py
# <a name="merge"></a>
# # Merging DataFrames
# We now have two data files per mesonet - one for the data itself and one for the metadata. It would be really nice to combine these DataFrames together into one for each mesonet. Pandas has some built in methods to do this - see [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html). For this example, we'll be using the `merge` method. First, let's rename columns in the Oklahoma station DataFrame to be more understandable.
# Rename columns so merging can occur
ok_stations.columns = ['STID', 'LAT', 'LON']
# Conveniently, we have a `STID` column in both DataFrames. Let's base our merge on that and see what we get!
# Merge the two data frames based on the Station ID
ok_data = pd.merge(ok_data, ok_stations, on='STID')
ok_data
# That was nice! But what if our DataFrames don't have the same column name, and we want to avoid renaming columns? Check out the documentation for `pd.merge` and see how we can merge the West Texas DataFrames together. Also, subset the data to only be from 2300 UTC, which is when our Oklahoma data was taken. Call the new DataFrame `tx_one_time`.
# Your code here
# # %load solutions/merge_texas.py
# <a name="plot"></a>
# # Creating a Station Plot
# Let's say we want to plot temperature, dewpoint, and wind barbs. Given our data from the two mesonets, do we have what we need? If not, use MetPy to calculate what you need!
# +
import metpy.calc as mpcalc
from metpy.units import units
# Your code here
# -
# # %load solutions/data_conversion.py
# Now, let's make a Station Plot with our data using MetPy and CartoPy.
from metpy.plots import StationPlot
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
# +
# Set up a plot with map features
fig = plt.figure(figsize=(12, 12))
proj = ccrs.Stereographic(central_longitude=-100, central_latitude=35)
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='black')
ax.gridlines()
# Create a station plot pointing to an Axes to draw on as well as the location of points
stationplot = StationPlot(ax, ok_data['LON'].values, ok_data['LAT'].values, transform=ccrs.PlateCarree(),
fontsize=10)
stationplot.plot_parameter('NW', ok_data['TAIR'], color='red')
stationplot.plot_parameter('SW', ok_dewpoint, color='green')
stationplot.plot_barb(ok_u, ok_v)
# Texas Data
stationplot = StationPlot(ax, tx_one_time['Long'].values, tx_one_time['Lat'].values, transform=ccrs.PlateCarree(),
fontsize=10)
stationplot.plot_parameter('NW', tx_one_time['2m_temperature'], color='red')
stationplot.plot_parameter('SW', tx_one_time['dewpoint'], color='green')
stationplot.plot_barb(tx_u, tx_v)
# -
# This is an informative plot, but is rather crowded. Using MetPy's `reduce_point_density` function, try cleaning up this plot to something that would be presentable/publishable. This function will return a mask, which you'll apply to all arrays in the plotting commands to filter down the data.
# +
# Oklahoma
xy = proj.transform_points(ccrs.PlateCarree(), ok_data['LON'].values, ok_data['LAT'].values)
# Reduce point density so that there's only one point within a 50km circle
ok_mask = mpcalc.reduce_point_density(xy, 50000)
# Texas
# Your code here
# Plot
# Your code here
# -
# # %load solutions/reduce_and_plot.py
# <a name="timeseries"></a>
# # Creating Time Series for Stations
# What if we want to take data from all times from a single station to make a time series (or meteogram) plot? How can we easily do that with Pandas without having to aggregate the data by hand?
# +
import numpy as np
# Select daylight hours
tx_daytime = tx_data[(tx_data['Time'] >= '2019-03-22 06:00') & (tx_data['Time'] <= '2019-03-22 20:00')]
# Create sub-tables for each station
tx_grp = tx_daytime.groupby('ID')
# Get data from station DIMM
station_data = tx_grp.get_group('DIMM')
# Create hourly averaged data
# time_bins = pd.cut(station_data['Time'], np.arange(600, 2100, 100))
# xarray has groupby_bins, but pandas has cut
station_data.index=station_data['Time']
station_hourly = station_data.resample('H')
# station_hourly = station_data.groupby(time_bins)
station_hourly_mean = station_hourly.mean()
station_hourly_mean = station_hourly_mean.reset_index() # no longer index by time so that we get it back as a regular variable.
# The times are reported at the beginning of the interval, but really represent
# the mean symmetric about the half hour. Let's fix that.
# from datetime import timedelta timedelta(minutes=30) #
station_hourly_mean['Time'] += pd.to_timedelta(30, 'minutes')
print(station_hourly_mean['Time'])
print(station_data['Time'])
# -
# Use the data above to make a time series plot of the instantaneous data and the hourly averaged data:
# Your code here
# # %load solutions/mesonet_timeseries.py
|
pages/workshop/Surface_Data/Advanced StationPlots with Mesonet Data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: capstone_nf
# language: python
# name: capstone_nf
# ---
import pandas as pd
from dotenv import load_dotenv, find_dotenv
import os
from pathlib import Path
# Load environment variables
load_dotenv(find_dotenv())
data_base_dir = os.environ.get('DATA_DIR_BASE_PATH')
fname = os.path.join(data_base_dir, 'raw', 'ISO10383_MIC.csv')
fname
#fname = os.path.join(data_base_dir, 'raw', 'ISO10383_MIC.csv')
#fname = Path(fname)
pd.read_csv(fname)
with open(fname, 'rb') as f:
text = f.read()
pd.DataFrame.from_items(text)
from atomm.DataManager.main import MSDataManager
from dateutil.relativedelta import relativedelta
from datetime import datetime
todaydate = datetime.now()
dh = MSDataManager()
options = dh.IndexConstituentsDict('SPY')
dh.ReturnData(
'AAPL',
start_date=(todaydate - relativedelta(weeks=52)),
end_date=todaydate,
)
dh.ReturnIndexConstituents('DJIA')
|
notebooks/01_Business_Understanding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
LICENSE MIT
2020
<NAME>
Website : http://www.covidtracker.fr
Mail : <EMAIL>
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
I'm currently cleaning the code, please ask me if something is not clear enough.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
def nbWithSpaces(nb):
str_nb = str(int(round(nb)))
if(nb>100000):
return str_nb[:3] + " " + str_nb[3:]
elif(nb>10000):
return str_nb[:2] + " " + str_nb[2:]
elif(nb>1000):
return str_nb[:1] + " " + str_nb[1:]
else:
return str_nb
# +
import pandas as pd
import plotly.graph_objects as go
import france_data_management as data
from datetime import datetime
from datetime import timedelta
from plotly.subplots import make_subplots
import plotly
import math
import os
import json
PATH = "../../"
PATH_STATS = "../../data/france/stats/"
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
# -
df, df_confirmed, dates, df_new, df_tests, df_deconf, df_sursaud, df_incid, df_tests_viros = data.import_data()
data.download_data_variants_deps()
df_variants = data.import_data_variants_deps()
# +
df_departements = df.groupby(["jour", "departmentName"]).sum().reset_index()
df_incid_departements = df_incid[df_incid["cl_age90"]==0].groupby(["jour", "departmentName", "dep"]).sum().reset_index()
df_new_departements = df_new.groupby(["jour", "departmentName"]).sum().reset_index()
departements = list(dict.fromkeys(list(df_departements['departmentName'].values)))
dates_incid = list(dict.fromkeys(list(df_incid['jour'].values)))
last_day_plot = (datetime.strptime(max(dates), '%Y-%m-%d') + timedelta(days=1)).strftime("%Y-%m-%d")
last_day_plot_plus2 = (datetime.strptime(max(dates), '%Y-%m-%d') + timedelta(days=3)).strftime("%Y-%m-%d")
departements_nb = list(dict.fromkeys(list(df_tests_viros['dep'].values)))
# +
lits_reas = pd.read_csv(PATH+'data/france/lits_rea.csv', sep=",")
df_departements_lits = df_departements.merge(lits_reas, left_on="departmentName", right_on="nom_dpt")
# -
data.download_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement()
df_vaccination = data.import_donnees_vaccination_par_tranche_dage_type_de_vaccin_et_departement()
df_vaccination = df_vaccination[df_vaccination["libelle_classe_age"] != "Tout âge"]
# +
def cas_journ(departement):
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"] == departement]
df_incid_dep_rolling = df_incid_dep["P"].rolling(window=7, center=True).mean()
df_incid_tests_dep_rolling = df_incid_dep["T"].rolling(window=7, center=True).mean()
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "cas_journ_"+departement, [0, df_incid_dep["P"].max()]
title = "<b>Cas positifs</b> au Covid19 - <b>" + departement + "</b>"
fig = go.Figure()
fig = make_subplots(rows=1, cols=1, shared_yaxes=True, subplot_titles=[""], vertical_spacing = 0.08, horizontal_spacing = 0.1, specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"],
y = df_incid_dep_rolling,
name = "Nouveaux décès hosp.",
marker_color='rgb(8, 115, 191)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(8, 115, 191, 0.3)",
showlegend=False
), secondary_y=True)
fig.add_trace(go.Bar(
x = df_incid_dep["jour"],
y = df_incid_tests_dep_rolling,
name = "Tests réalisés",
marker_color='rgba(0, 0, 0, 0.2)',
opacity=0.8,
showlegend=False,
), secondary_y=False)
fig.add_trace(go.Scatter(
x = [dates_incid[-4]],
y = [df_incid_dep_rolling.values[-4]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='rgb(8, 115, 191)',
marker_size=15,
opacity=1,
showlegend=False
), secondary_y=True)
"""fig.add_trace(go.Scatter(
x = df_incid_dep["jour"],
y = df_incid_dep["P"],
name = "",
mode="markers",
marker_color='rgb(8, 115, 191)',
line_width=3,
opacity=0.4,
showlegend=False
), secondary_y=True)"""
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18), secondary_y=True)
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18), secondary_y=False)
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates_incid[-4], y = df_incid_dep_rolling.values[-4], # annotation point
xref='x1',
yref='y2',
text=" <b>{} {}".format('%d' % df_incid_dep_rolling.values[-4], "cas quotidiens<br></b>en moyenne du {} au {}.".format(datetime.strptime(dates_incid[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates_incid[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="rgb(8, 115, 191)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=1,
ax=-250,
ay=-70,
arrowcolor="rgb(8, 115, 191)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),dict(
x = dates_incid[-4], y = df_incid_tests_dep_rolling.values[-4], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_incid_tests_dep_rolling.values[-4], "tests réalisés<br></b>en moyenne du {} au {}.".format(datetime.strptime(dates_incid[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates_incid[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=0,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="rgba(0, 0, 0, 0.5)",
size=15
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=1,
ax=-250,
ay=-70,
arrowcolor="rgba(0, 0, 0, 0.5)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
))
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
#cas_journ("Savoie")
# -
def nombre_variants(departement):
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"] == departement]
df_incid_dep["P_rolling"] = df_incid_dep["P"].rolling(window=7).mean()
df_variants_dep = df_variants[df_variants["dep"] == df_incid_dep["dep"].values[0]]
fig = go.Figure()
n_days = len(df_variants_dep)
y=df_incid_dep["P_rolling"].values[-n_days:] * (100 - df_variants_dep.tx_C1.values)/100
proportion = str(round(y[-1]/df_incid_dep["P_rolling"].values[-1]*100, 1)).replace(".", ",")
fig.add_trace(
go.Scatter(
x=df_variants_dep.jour,
y=y,
name="<b>Autres </b><br>" + str(nbWithSpaces(y[-1])) + " cas (" + proportion + " %)",
stackgroup='one'
)
)
y=df_incid_dep["P_rolling"].values[-n_days:] * df_variants_dep.tx_C1.values/100
proportion = str(round(y[-1]/df_incid_dep["P_rolling"].values[-1]*100, 1)).replace(".", ",")
fig.add_trace(
go.Scatter(
x=df_variants_dep.jour,
y=y,
name="Mutation L452R, dont <b>Delta </b><br>" + str(nbWithSpaces(y[-1])) + " cas (" + proportion + " %)",
showlegend=True,
stackgroup='one'
)
)
fig.update_yaxes(ticksuffix="")
fig.update_layout(
title={
'text': "Nombre de variants dans les cas détectés - " + departement,
'y':0.97,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
'font': {'size': 20}
},
annotations = [
dict(
x=0.5,
y=1.1,
xref='paper',
yref='paper',
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier - covidtracker.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format("variants_nombre_"+departement), scale=1.5, width=750, height=500)
"""import numpy as np
def cas_journ_departements_couvre_feu(departements):
fig = go.Figure()
normalisation = True
range_x, name_fig, range_y, n = ["2020-10-29", last_day_plot], "impact_couvre_feu", [0, df_incid_departements["P"].max()*0.7], 30
title = "<b>Taux d'incidence</b>"
deps_couvre_feu_2_janvier = ["Hautes-Alpes", "Alpes-Maritimes", "Ardennes", "Doubs", "Jura", "Marne", "Haute-Marne", "Meurthe-et-Moselle", "Meuse", "Haute-Saône", "Vosges", "Territoire de Belfort", "Moselle", "Nièvre", \
"Saône-et-Loire"]
deps_couvre_feu_8_janvier = ["Bas-Rhin", "Haut-Rhin", "Côte-d'or", "Cher", "Allier", "Bouches-du-Rhône", "Vaucluse", "Alpes-de-Haute-Provence"]
#deps_couvre_feu_8_janvier = []
df_incid_dep_couvre_feu = [0]*n
df_incid_dep_couvre_feu_8 = [0]*n
df_incid_dep_autres = [0]*n
df_incid_dep_couvre_feu_ecart = [0]*n
df_incid_dep_couvre_feu_ecart_8 = [0]*n
df_incid_dep_autres_ecart = [0]*n
n_deps_couvre_feu = 0
n_deps_couvre_feu_8 = 0
n_autres_deps = 0
pop_deps_couvre_feu = 0
pop_deps_couvre_feu_8 = 0
pop_autres_deps = 0
for departement in departements:
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"] == departement]
n_days= (datetime.strptime(max(df_incid_dep["jour"]), '%Y-%m-%d') - datetime.strptime("2021-01-02", '%Y-%m-%d')).days
df_incid_dep_rolling = df_incid_dep["P"].rolling(window=7, center=True).sum()*100000#/df_incid_dep["T"].rolling(window=7, center=False).mean() * 100
values = df_incid_dep_rolling.values[-n:]
if departement in deps_couvre_feu_2_janvier:
df_incid_dep_couvre_feu += values
n_deps_couvre_feu += 1
pop_deps_couvre_feu += df_incid_dep["pop"].values[0]
elif departement in deps_couvre_feu_8_janvier:
df_incid_dep_couvre_feu_8 += values
n_deps_couvre_feu_8 += 1
pop_deps_couvre_feu_8 += df_incid_dep["pop"].values[0]
else:
df_incid_dep_autres += values
n_autres_deps += 1
pop_autres_deps += df_incid_dep["pop"].values[0]
df_incid_dep_couvre_feu_mean = np.array(df_incid_dep_couvre_feu)/pop_deps_couvre_feu
df_incid_dep_couvre_feu_8_mean = np.array(df_incid_dep_couvre_feu_8)/pop_deps_couvre_feu_8
df_incid_dep_autres_mean = np.array(df_incid_dep_autres)/pop_autres_deps
suffix = ""
if normalisation:
suffix = " %"
df_incid_dep_couvre_feu_8_mean=df_incid_dep_couvre_feu_8_mean/df_incid_dep_couvre_feu_8_mean[-n_days-1]*100-100
df_incid_dep_couvre_feu_mean=df_incid_dep_couvre_feu_mean/df_incid_dep_couvre_feu_mean[-n_days-1]*100-100
df_incid_dep_autres_mean=df_incid_dep_autres_mean/df_incid_dep_autres_mean[-n_days-1]*100-100
#df_incid_dep_autres_mean/=df_incid_dep_autres_mean[-n_days-1]
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"].values[-n:],
y = df_incid_dep_couvre_feu_mean,
name = "Départements en couvre-feu renforcé (02/01)",
marker_color='rgb(8, 115, 191)',
line_width=5,
opacity=0.8,
showlegend=True
))
if len(deps_couvre_feu_8_janvier)>0:
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"].values[-n:],
y = df_incid_dep_couvre_feu_8_mean,
name = "Départements en couvre-feu renforcé (08/01)",
marker_color='orange',
line_width=5,
opacity=0.8,
showlegend=True
))
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"].values[-n:],
y = df_incid_dep_autres_mean,
name = "Départements en couvre-feu classique",
marker_color='black',
line_width=5,
opacity=0.8,
showlegend=True
))
###
max_value = max(max(df_incid_dep_autres_mean), max(df_incid_dep_couvre_feu_8_mean), max(df_incid_dep_couvre_feu_mean))
min_value = min(0, min(df_incid_dep_autres_mean), min(df_incid_dep_couvre_feu_8_mean), min(df_incid_dep_couvre_feu_mean))
fig.add_shape(type="line",
x0="2021-01-12", y0=min_value*1.5, x1="2021-01-12", y1=max_value*1.5,
line=dict(color="rgba(8, 115, 191, 1)",width=2, dash="dot")
)
fig.add_shape(type="line",
x0="2021-01-02", y0=min_value*1.5, x1="2021-01-02", y1=max_value*1.5,
line=dict(color="rgba(8, 115, 191, 1)",width=2, dash="dot")
)
### Orange
annots = []
if len(deps_couvre_feu_8_janvier)> 0:
fig.add_shape(type="line",
x0="2021-01-08", y0=min_value*1.5, x1="2021-01-08", y1=max_value*1.5,
line=dict(color="orange",width=2, dash="dot")
)
fig.add_shape(type="line",
x0="2021-01-18", y0=min_value*1.5, x1="2021-01-18", y1=max_value*1.5,
line=dict(color="orange",width=2, dash="dot")
)
annots = [dict(
x=df_incid_dep["jour"].values[-3],
y=df_incid_dep_autres_mean[-4],
xref='x1',
yref='y1',
ax=150,
ay=200,
font=dict(size=12, color="black"),
arrowcolor='black',
text= ["+"+str(round(value, 1))+" %" if value>0 else str(round(value, 1)) + " %" for value in [df_incid_dep_autres_mean[-4]]][0], #str(round(df_incid_dep_couvre_feu_8_mean[-4], 1))+" %",
showarrow = False
),
dict(
x=df_incid_dep["jour"].values[-3],
y=df_incid_dep_couvre_feu_8_mean[-4],
xref='x1',
yref='y1',
ax=150,
ay=200,
font=dict(size=12, color="orange"),
arrowcolor='orange',
text= ["+"+str(round(value, 1))+" %" if value>0 else str(round(value, 1))+" %" for value in [df_incid_dep_couvre_feu_8_mean[-4]]][0], #str(round(df_incid_dep_couvre_feu_8_mean[-4], 1))+" %",
showarrow = False
),
dict(
x=df_incid_dep["jour"].values[-3],
y=df_incid_dep_couvre_feu_mean[-4],
xref='x1',
yref='y1',
ax=150,
ay=200,
font=dict(size=12, color='rgb(8, 115, 191)'),
arrowcolor='rgb(8, 115, 191)',
text= ["+"+str(round(value, 1))+" %" if value>0 else str(round(value, 1))+" %" for value in [df_incid_dep_couvre_feu_mean[-4]]][0], #str(round(df_incid_dep_couvre_feu_8_mean[-4], 1))+" %",
showarrow = False
),
dict(
x="2021-01-08",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="orange"),
arrowcolor="orange",
text="Couvre feu 08/01",
showarrow = True
),
dict(
x="2021-01-18",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="orange"),
text='J+10',
arrowcolor="orange",
showarrow = True
),]
fig.update_yaxes(zerolinecolor='Grey', range=[min_value*1.5, max_value*1.5], tickfont=dict(size=18), ticksuffix=suffix)
fig.update_xaxes( ticks='inside', tickangle=0, tickfont=dict(size=18))
fig.update_layout(legend=dict(
yanchor="top",
y=0.2,
xanchor="left",
x=0.1
))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=10,
b=50,
t=70,
pad=0
),
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = annots + [
dict(
x=0.45,
y=1.07,
xref='paper',
yref='paper',
xanchor="center",
font=dict(size=14),
text='{}</b>'.format("Nb de cas/semaine/100k hab. Moyennes pondérées à la population de chaque dép."),
showarrow = False
),
dict(
x=0,
y=1.0,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b'), "Nb de cas/semaine/100k hab."),
showarrow = False
),
dict(
x="2021-01-02",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="rgba(8, 115, 191, 1)"),
arrowcolor="rgba(8, 115, 191, 1)",
text='Couvre feu 02/01'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b'), "Nb de cas/semaine/100k hab."),
showarrow = True
),
dict(
x="2021-01-12",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="rgb(8, 115, 191)"),
text='J+10',
arrowcolor="rgba(8, 115, 191, 1)",
showarrow = True
),
]
)
fig.write_image(PATH+"images/charts/france/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/{}.html'.format(name_fig), auto_open=False)
print("> " + name_fig)
cas_journ_departements_couvre_feu(departements)"""
"""import numpy as np
def cas_journ_departements_couvre_feu_hosp(departements):
fig = go.Figure()
normalisation = True
range_x, name_fig, range_y, n = ["2020-10-29", last_day_plot], "impact_couvre_feu", [0, df_incid_departements["P"].max()*0.7], 20
title = "<b>Taux d'incidence</b>"
deps_couvre_feu_2_janvier = ["Hautes-Alpes", "Alpes-Maritimes", "Ardennes", "Doubs", "Jura", "Marne", "Haute-Marne", "Meurthe-et-Moselle", "Meuse", "Haute-Saône", "Vosges", "<NAME>", "Moselle", "Nièvre", \
"Saône-et-Loire"]
deps_couvre_feu_8_janvier = ["Bas-Rhin", "Haut-Rhin", "Côte-d'or", "Cher", "Allier", "Bouches-du-Rhône", "Vaucluse", "Alpes-de-Haute-Provence"]
deps_couvre_feu_8_janvier = []
df_incid_dep_couvre_feu = [0]*n
df_incid_dep_couvre_feu_8 = [0]*n
df_incid_dep_autres = [0]*n
df_incid_dep_couvre_feu_ecart = [0]*n
df_incid_dep_couvre_feu_ecart_8 = [0]*n
df_incid_dep_autres_ecart = [0]*n
n_deps_couvre_feu = 0
n_deps_couvre_feu_8 = 0
n_autres_deps = 0
pop_deps_couvre_feu = 0
pop_deps_couvre_feu_8 = 0
pop_autres_deps = 0
for departement in departements:
#df_dep = df_departements[df_departements["departmentName"] == departement]
df_new_dep = df_new_departements[df_new_departements["departmentName"] == departement]
print(max(df_new_dep["jour"]))
n_days= (datetime.strptime(max(df_new_dep["jour"]), '%Y-%m-%d') - datetime.strptime("2021-01-05", '%Y-%m-%d')).days
df_incid_dep_rolling = df_new_dep["incid_hosp"].rolling(window=7, center=True).mean() #/df_incid_dep["T"].rolling(window=7, center=False).mean() * 100
values = df_incid_dep_rolling.values[-n:]
if departement in deps_couvre_feu_2_janvier:
df_incid_dep_couvre_feu += values
n_deps_couvre_feu += 1
pop_deps_couvre_feu += df_incid_dep["pop"].values[0]
elif departement in deps_couvre_feu_8_janvier:
df_incid_dep_couvre_feu_8 += values
n_deps_couvre_feu_8 += 1
pop_deps_couvre_feu_8 += df_incid_dep["pop"].values[0]
else:
df_incid_dep_autres += values
n_autres_deps += 1
pop_autres_deps += df_incid_dep["pop"].values[0]
df_incid_dep_couvre_feu_mean = np.array(df_incid_dep_couvre_feu)/pop_deps_couvre_feu
df_incid_dep_couvre_feu_8_mean = np.array(df_incid_dep_couvre_feu_8)/pop_deps_couvre_feu_8
df_incid_dep_autres_mean = np.array(df_incid_dep_autres)/pop_autres_deps
suffix = ""
if normalisation:
suffix = " %"
df_incid_dep_couvre_feu_8_mean=df_incid_dep_couvre_feu_8_mean/df_incid_dep_couvre_feu_8_mean[-n_days-1]*100-100
df_incid_dep_couvre_feu_mean=df_incid_dep_couvre_feu_mean/df_incid_dep_couvre_feu_mean[-n_days-1]*100-100
df_incid_dep_autres_mean=df_incid_dep_autres_mean/df_incid_dep_autres_mean[-n_days-1]*100-100
#df_incid_dep_autres_mean/=df_incid_dep_autres_mean[-n_days-1]
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"].values[-n:],
y = df_incid_dep_couvre_feu_mean,
name = "Départements en couvre-feu renforcé (02/01)",
marker_color='rgb(8, 115, 191)',
line_width=5,
opacity=0.8,
showlegend=True
))
if len(deps_couvre_feu_8_janvier)>0:
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"].values[-n:],
y = df_incid_dep_couvre_feu_8_mean,
name = "Départements en couvre-feu renforcé (08/01)",
marker_color='orange',
line_width=5,
opacity=0.8,
showlegend=True
))
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"].values[-n:],
y = df_incid_dep_autres_mean,
name = "Départements en couvre-feu classique",
marker_color='black',
line_width=5,
opacity=0.8,
showlegend=True
))
###
max_value = max(max(df_incid_dep_autres_mean), max(df_incid_dep_couvre_feu_8_mean), max(df_incid_dep_couvre_feu_mean))
min_value = min(0, min(df_incid_dep_autres_mean), min(df_incid_dep_couvre_feu_8_mean), min(df_incid_dep_couvre_feu_mean))
fig.add_shape(type="line",
x0="2021-01-12", y0=min_value*1.5, x1="2021-01-12", y1=max_value*1.5,
line=dict(color="rgba(8, 115, 191, 1)",width=2, dash="dot")
)
fig.add_shape(type="line",
x0="2021-01-02", y0=min_value*1.5, x1="2021-01-02", y1=max_value*1.5,
line=dict(color="rgba(8, 115, 191, 1)",width=2, dash="dot")
)
### Orange
annots = []
if len(deps_couvre_feu_8_janvier)> 0:
fig.add_shape(type="line",
x0="2021-01-08", y0=min_value*1.5, x1="2021-01-08", y1=max_value*1.5,
line=dict(color="orange",width=2, dash="dot")
)
fig.add_shape(type="line",
x0="2021-01-18", y0=min_value*1.5, x1="2021-01-18", y1=max_value*1.5,
line=dict(color="orange",width=2, dash="dot")
)
annots = [dict(
x="2021-01-08",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="orange"),
arrowcolor="orange",
text="Couvre feu 08/01",
showarrow = True
),
dict(
x="2021-01-18",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="orange"),
text='J+10',
arrowcolor="orange",
showarrow = True
),]
fig.update_yaxes(zerolinecolor='Grey', range=[min_value*1.5, max_value*1.5], tickfont=dict(size=18), ticksuffix=suffix)
fig.update_xaxes( ticks='inside', tickangle=0, tickfont=dict(size=18))
fig.update_layout(legend=dict(
yanchor="top",
y=0.2,
xanchor="left",
x=0.1
))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=10,
b=50,
t=70,
pad=0
),
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = annots + [
dict(
x=0.45,
y=1.07,
xref='paper',
yref='paper',
xanchor="center",
font=dict(size=14),
text='{}</b>'.format("Nb de cas/semaine/100k hab. Moyennes pondérées à la population de chaque dép."),
showarrow = False
),
dict(
x=0,
y=1.0,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b'), "Nb de cas/semaine/100k hab."),
showarrow = False
),
dict(
x="2021-01-02",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="rgba(8, 115, 191, 1)"),
arrowcolor="rgba(8, 115, 191, 1)",
text='Couvre feu 02/01'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b'), "Nb de cas/semaine/100k hab."),
showarrow = True
),
dict(
x="2021-01-12",
y=max_value*0.95,
xref='x1',
yref='y1',
font=dict(size=9, color="rgb(8, 115, 191)"),
text='J+10',
arrowcolor="rgba(8, 115, 191, 1)",
showarrow = True
),
]
)
fig.write_image(PATH+"images/charts/france/{}_hosp.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
cas_journ_departements_couvre_feu_hosp(departements)"""
def incid_dep(departement):
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"] == departement]
df_incid_dep_rolling = df_incid_dep["P"].rolling(window=7, center=True).sum()/df_incid_dep["pop"] * 100000
dep_nb = df_incid_dep["dep"].values[0]
range_x, name_fig, range_y = ["2020-09-29", last_day_plot], "incid_"+departement, [0, df_incid_dep_rolling.max()]
title = "<b>" + departement + " (" + dep_nb + ")" + "</b>"
fig = go.Figure()
fig.add_shape(type="line",
x0="2019-03-17", y0=50, x1="2021-03-17", y1=50,
line=dict(color="Red",width=1.5, dash="dot")
)
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"][:len(df_incid_dep["jour"])-13],
y = df_incid_dep_rolling[:len(df_incid_dep_rolling)-13],
name = "",
marker_color='rgb(8, 115, 191)',
line_width=0.5,
mode="lines",
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(8, 115, 191, 0.2)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"][len(df_incid_dep["jour"])-14:],
y = df_incid_dep_rolling[len(df_incid_dep_rolling)-14:],
name = "",
marker_color='rgb(8, 115, 191)',
mode="lines",
line_width=2,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(8, 115, 191, 0.4)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates_incid[-4]],
y = [df_incid_dep_rolling.values[-4]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='rgb(8, 115, 191)',
marker_size=30,
opacity=1,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18), visible=False)
fig.update_xaxes(nticks=10, ticks='inside', range=range_x, tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=50),
xaxis=dict(
title='',
tickformat='%d/%m'),
)
fig['layout']['annotations'] += (dict(
x = dates_incid[-4], y = df_incid_dep_rolling.values[-4], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_incid_dep_rolling.values[-4], "".format()),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(8, 115, 191)",
size=50
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=1,
ax=-40,
ay=-50,
arrowcolor="rgb(8, 115, 191)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
incid_j0 = df_incid_dep_rolling.dropna().values[-1]
incid_j7 = df_incid_dep_rolling.dropna().values[-8]
if incid_j0 > 50:
if (incid_j0 - incid_j7) < 0:
class_dep = "higher_low"
else:
class_dep = "higher_high"
else:
if (incid_j0 - incid_j7) < 0:
class_dep = "lower_low"
else:
class_dep = "lower_high"
folder = "covidep/"+class_dep
fig.write_image(PATH+"images/charts/france/{}/{}.svg".format(folder, name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
return class_dep
#incid_dep("Savoie")
def comparaison_cas_dc(departement):
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"] == departement]
df_dep = df_new_departements[df_new_departements["departmentName"] == departement]
y1 = df_incid_dep.incidence
y2 = (df_dep.incid_dc/df_dep.departmentPopulation).rolling(window=7).mean().shift(-12)
coef_normalisation = 50000000 #y1.max()/y2.max()
max_y = math.ceil(max(y1.max(), y2.max()*coef_normalisation)*1.1 / 100) * 100
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_incid_dep.jour,
y=y1,
name="Cas pour 100 k",
marker_color='rgb(8, 115, 191)',
fillcolor="rgba(8, 115, 191, 0.3)",
fill='tozeroy'))
fig.add_trace(go.Scatter(
x=df_incid_dep.jour,
y=-y1,
name="Miroir des cas",
marker_color='rgba(8, 115, 191, 0.2)',
line=dict(
dash="dot")
))
fig.add_trace(go.Scatter(
x=df_dep.jour,
y=-y2*coef_normalisation,
marker_color='black',
fillcolor="rgba(0,0,0,0.3)",
name="Décès hospitaliers<br>décalés de 12 j.<br>pour {} Mio".format(round(coef_normalisation/1000000)),
fill='tozeroy'))
fig.update_yaxes(range=[-max_y, max_y], tickvals=[-max_y, -max_y/2, 0, max_y/2, max_y], ticktext=[max_y, max_y/2, 0, max_y/2, max_y])
fig.update_layout(
title={
'text': "Cas vs. Décès hospitaliers - {}".format(departement),
'y':0.97,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=30),
annotations = [
dict(
x=0.5,
y=1.12,
xref='paper',
yref='paper',
font=dict(size=14),
text="Cas pour 100 000 habitants et décès hospitaliers avancés de 12 j. pour {} Millions d'habitants<br>{} - @GuillaumeRozier - covidtracker.fr".format(round(coef_normalisation/1000000), datetime.strptime(df.jour.max(), '%Y-%m-%d').strftime('%d %B %Y')),#'Date : {}. Source : Santé publique France. Auteur : GRZ - covidtracker.fr.'.format(), showarrow = False
showarrow=False
),
]
)
fig.write_image(PATH + "images/charts/france/departements_dashboards/comparaison_cas_dc_{}.jpeg".format(departement), scale=2, width=900, height=600)
#plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/{}.html'.format(name_fig), auto_open=False)
#comparaison_cas_dc("Pyrénées-Orientales")
def hosp_journ(departement):
df_dep = df_departements[df_departements["departmentName"] == departement]
df_new_dep = df_new_departements[df_new_departements["departmentName"] == departement]
#df_incid_reg_rolling = df_incid_reg["P"].rolling(window=7, center=True).mean()
range_x, name_fig = ["2020-03-29", last_day_plot], "hosp_journ_"+departement
title = "Personnes <b>hospitalisées</b> pour Covid19 - <b>" + departement +"</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_dep["jour"],
y = df_dep["hosp"],
name = "Nouveaux décès hosp.",
marker_color='rgb(209, 102, 21)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [df_dep["hosp"].values[-1]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='rgb(209, 102, 21)',
marker_size=15,
opacity=1,
showlegend=False
))
fig.add_trace(go.Bar(
x = df_new_dep["jour"],
y = df_new_dep["incid_hosp"],
name = "Admissions hosp.",
marker_color='rgb(209, 102, 21)',
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = df_new_dep["jour"],
y = df_new_dep["incid_hosp"].rolling(window=7).mean(),
name = "Admissions hosp.",
marker_color='rgb(209, 102, 21)',
#mode="lines"
line_width=2,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_dep["hosp"].values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_dep["hosp"].values[-1], "personnes<br>hospitalisées</b><br>le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(209, 102, 21)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-250,
ay=-90,
arrowcolor="rgb(209, 102, 21)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = df_new_dep["jour"].values[-1], y = (df_new_dep["incid_hosp"].values[-1]), # annotation point
xref='x1',
yref='y1',
text="<b>{}</b> {}".format('%d' % df_new_dep["incid_hosp"].values[-1], "<br>admissions"),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(209, 102, 21)",
size=10
),
opacity=0.8,
ax=-20,
ay=-40,
arrowcolor="rgb(209, 102, 21)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
# +
def hosp_comparaison_vagues(departement):
df_dep = df_departements[df_departements["departmentName"] == departement]
#df_incid_reg_rolling = df_incid_reg["P"].rolling(window=7, center=True).mean()
range_x, name_fig = ["2020-03-29", last_day_plot], "hosp_comp_vagues_"+departement
title = ""#"<b>Personnes hospitalisées</b> pour Covid19 - " + departement
fig = go.Figure()
premiere_vague = df_dep[ df_dep["jour"] < "2020-08"]["hosp"].max()
premiere_vague_date = df_dep[ df_dep["hosp"] == premiere_vague]["jour"].min()
deuxieme_vague = df_dep[ df_dep["jour"] > "2020-09"]["hosp"].max()
deuxieme_vague_date = df_dep[ (df_dep["hosp"] == deuxieme_vague) & (df_dep["jour"] > "2020-09")]["jour"].min()
color_deuxieme_vague = "green"
if deuxieme_vague > premiere_vague:
color_deuxieme_vague = "red"
hosp_values = df_dep["hosp"].values
trace_to_add = [max(0, hosp - premiere_vague) for hosp in hosp_values]
#deuxieme_vague += [df_dep[ df_dep["jour"] > "2020-09"]["hosp"].max()]
color = ["red" if hosp > premiere_vague else "rgb(209, 102, 21)" for hosp in df_dep["hosp"].values]
fig.add_trace(go.Bar(
x = df_dep["jour"],
y = df_dep["hosp"].values - trace_to_add,
name = "Nouveaux décès hosp.",
marker_color="orange",
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Bar(
x = df_dep["jour"],
y = trace_to_add,
name = "<NAME> hosp.",
marker_color="red",
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_shape(
type="line",
x0="2000-01-01",
y0=premiere_vague,
x1="2030-01-01",
y1=premiere_vague,
opacity=1,
#fillcolor="orange",
line=dict(
dash="dash",
color="black",
width=1,
)
)
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18), range=["2020-03-15", last_day_plot])
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
bargap=0,
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='stack',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=-0.08,
xref='paper',
yref='paper',
text="Date : {}. Source : Santé publique France. Auteur : <NAME> - covidtracker.fr - nombre d'hospitalisations".format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = deuxieme_vague_date, y = deuxieme_vague, # annotation point
xref='x1',
yref='y1',
text="Deuxième vague",
xshift=-5,
yshift=10,
xanchor="center",
align='center',
font=dict(
color=color_deuxieme_vague,
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-150,
ay=-50,
arrowcolor=color_deuxieme_vague,
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),dict(
x = premiere_vague_date, y = premiere_vague, # annotation point
xref='x1',
yref='y1',
text="Première vague",
xshift=0,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="black",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=0,
ay=-50,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
))
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=1000, height=700)
print("> " + name_fig)
#hosp_comparaison_vagues("Savoie")
# +
def hosp_journ_elias(dep):
df_new_dep = df_new[df_new["departmentName"]==dep]
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "hosp_journ_flux_"+dep, [0, df_new_dep["incid_hosp"].max()*0.9]
title = "<b>Entrées et sorties de l'hôpital</b> pour Covid19 • <b>" + dep + "</b>"
for i in [""]:
if i=="log":
title+= " [log.]"
fig = go.Figure()
entrees_rolling = df_new[df_new["departmentName"]==dep]["incid_hosp"].rolling(window=7).mean().values
fig.add_trace(go.Scatter(
x = dates,
y =entrees_rolling,
name = "",
marker_color='red',
line_width=6,
opacity=1,
fill='tozeroy',
fillcolor="rgba(235, 64, 52,0.5)",
showlegend=False
))
rad_rolling = df_new_dep["incid_rad"].rolling(window=7).mean()
dc_rolling = df_new_dep["incid_dc"].rolling(window=7).mean()
sorties_rolling = (rad_rolling + dc_rolling).values
fig.add_trace(go.Scatter(
x = dates,
y = sorties_rolling,
name = "",
marker_color='green',
line_width=0,
opacity=1,
fill='tozeroy',
fillcolor="rgba(12, 161, 2, 0.5)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y = [entrees_rolling[i] if entrees_rolling[i]<sorties_rolling[i] else sorties_rolling[i] for i in range(len(entrees_rolling))],
name = "",
marker_color='yellow',
line_width=0,
opacity=1,
fill='tozeroy',
fillcolor="rgba(255, 255, 255, 1)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y = sorties_rolling,
name = "",
marker_color='green',
line_width=6,
opacity=1,
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y =entrees_rolling,
name = "",
marker_color='red',
line_width=6,
opacity=1,
showlegend=False
))
fig.add_shape(type="line",
x0="2020-03-17", y0=0, x1="2020-03-17", y1=300000,
line=dict(color="Red",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-05-11", y0=0, x1="2020-05-11", y1=300000,
line=dict(color="Green",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-10-30", y0=0, x1="2020-10-30", y1=300000,
line=dict(color="Red",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-11-28", y0=0, x1="2020-11-28", y1=300000,
line=dict(color="Orange",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-12-15", y0=0, x1="2020-12-15", y1=300000,
line=dict(color="green",width=0.5, dash="dot")
)
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [sorties_rolling[-1]],
name = "",
mode="markers",
marker_color='green',
marker_size=13,
opacity=1,
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [entrees_rolling[-1]],
name = "",
mode="markers",
marker_color='red',
marker_size=13,
opacity=1,
showlegend=False
))
###
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18), ) #range=["2020-03-17", last_day_plot_dashboard]
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18), range=range_y)
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
margin=dict(
l=50,
r=150,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=30),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0.5,
y=1.01,
font=dict(size=14),
xref='paper',
yref='paper',
text="Moyenne mobile 7 jours. Données Santé publique France. Auteurs @eorphelin @guillaumerozier - <b>covidtracker.fr</b>.", #'Date : {}. Source : Santé publique France. Auteur : guillaumerozier.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
),
]
)
if entrees_rolling[-1]<sorties_rolling[-1]:
y_e = -20
y_s = -100
else:
y_e = -100
y_s = -20
fig['layout']['annotations'] += (
dict(
x = "2020-05-20", y = (entrees_rolling[62]+sorties_rolling[62])/2, # annotation point
xref='x1',
yref='y1',
text="L'aire représente le solde.<br>Si elle est <span style='color:green'>verte</span>, il y a plus de sorties que d'entrées,<br>le nombre de lits occupés diminue.",
xshift=0,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="black",
size=10
),
bgcolor="rgba(255, 255, 255, 0)",
opacity=0.8,
ax=80,
ay=-100,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=6,
showarrow=True
),
dict(
x = dates[-1], y = (entrees_rolling[-1]), # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format(round(entrees_rolling[-1], 1), "entrées à l'hôpital</b><br>en moyenne le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="red",
size=12
),
bgcolor="rgba(255, 255, 255, 0)",
opacity=0.8,
ax=100,
ay=y_e,
arrowcolor="red",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = dates[-1], y = (sorties_rolling[-1]), # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format(round(sorties_rolling[-1], 1), "sorties de l'hôpital</b><br>en moyenne le {}.<br>dont {} décès et<br>{} retours à domicile".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'), round(dc_rolling.values[-1], 1), round(rad_rolling.values[-1], 1))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="green",
size=12
),
bgcolor="rgba(255, 255, 255, 0)",
opacity=0.8,
ax=100,
ay=y_s,
arrowcolor="green",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = "2020-10-30", y = 40000, # annotation point
xref='x1',
yref='y1',
text="Confinement",
xanchor="left",
yanchor="top",
align='center',
font=dict(
color="red",
size=8
),
showarrow=False
),
dict(
x = "2020-05-11", y = 40000, # annotation point
xref='x1',
yref='y1',
text="Déconfinement",
xanchor="left",
yanchor="top",
align='center',
font=dict(
color="green",
size=8
),
showarrow=False
),
dict(
x=0.5,
y=-0.1,
font=dict(size=10),
xref='paper',
yref='paper',
text="",#'Date : {}. Source : Santé publique France. Auteur : guillaumerozier.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False
showarrow=False
))
fig.write_image(PATH + "images/charts/france/departements_dashboards/{}.jpeg".format(name_fig+i), scale=1.5, width=1100, height=600)
#plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/departements_dashboards/{}.html'.format(name_fig+i), auto_open=False)
print("> " + name_fig)
#hosp_journ_elias("Savoie")
# +
def rea_journ(departement):
df_dep = df_departements[df_departements["departmentName"] == departement]
df_new_dep = df_new_departements[df_new_departements["departmentName"] == departement]
range_x, name_fig = ["2020-03-29", last_day_plot], "rea_journ_" + departement
title = "Personnes en <b>réanimation</b> pour Covid19 - <b>" + departement + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = dates,
y = df_dep["rea"],
name = "Nouveaux décès hosp.",
marker_color='rgb(201, 4, 4)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(201, 4, 4,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [df_dep["rea"].values[-1]],
name = "<NAME>.",
mode="markers",
marker_color='rgb(201, 4, 4)',
marker_size=15,
opacity=1,
showlegend=False
))
fig.add_trace(go.Bar(
x = df_new_dep["jour"],
y = df_new_dep["incid_rea"],
name = "Admissions",
marker_color='rgb(201, 4, 4)',
opacity=0.8,
showlegend=False
))
fig.add_trace(go.Scatter(
x = df_new_dep["jour"],
y = df_new_dep["incid_rea"].rolling(window=7).mean(),
name = "Admissions",
marker_color='rgb(201, 4, 4)',
marker_size=2,
opacity=0.8,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=10,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_dep["rea"].values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_dep["rea"].values[-1], "personnes<br>en réanimation</b><br>le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(201, 4, 4)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-250,
ay=-90,
arrowcolor="rgb(201, 4, 4)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = df_new_dep["jour"].values[-1], y = (df_new_dep["incid_rea"].values[-1]), # annotation point
xref='x1',
yref='y1',
text="<b>{}</b> {}".format('%d' % df_new_dep["incid_rea"].values[-1], "<br>admissions"),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color='rgb(201, 4, 4)',
size=10
),
opacity=0.8,
ax=-20,
ay=-40,
arrowcolor='rgb(201, 4, 4)',
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
#rea_journ("Isère")
# +
def dc_journ(departement):
df_dep = df_new_departements[df_new_departements["departmentName"] == departement]
dc_new_rolling = df_dep["incid_dc"].rolling(window=7).mean()
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "dc_journ_"+departement, [0, df_dep["incid_dc"].max()]
title = "Décès <b>hospitaliers quotidiens</b> du Covid19 - <b>" + departement + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_dep["jour"],
y = dc_new_rolling,
name = "Nouveaux décès hosp.",
marker_color='black',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(0,0,0,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [dc_new_rolling.values[-1]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='black',
marker_size=15,
opacity=1,
showlegend=False
))
#
fig.add_trace(go.Scatter(
x = df_dep["jour"],
y = df_dep["incid_dc"],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='black',
line_width=3,
opacity=0.4,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = dc_new_rolling.values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % math.trunc(round(dc_new_rolling.values[-1], 2)), "décès quotidiens</b><br>en moyenne<br>du {} au {}.".format(datetime.strptime(dates[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="black",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-250,
ay=-90,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
#dc_journ("Paris")
# +
def saturation_rea_journ(dep):
df_dep = df_departements_lits[df_departements_lits["departmentName"] == dep]
df_saturation = 100 * df_dep["rea"] / df_dep["LITS_y"]
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "saturation_rea_journ_"+dep, [0, df_saturation.max()]
title = "<b>Occupation des réa.</b> par les patients Covid19 - " + dep
fig = go.Figure()
colors_sat = ["green" if val < 40 else "red" if val > 80 else "orange" for val in df_saturation.values]
fig.add_trace(go.Bar(
x = df_dep["jour"],
y = df_saturation,
name = "Saturation",
marker_color=colors_sat,
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(8, 115, 191, 0.3)",
showlegend=False
))
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
text='Date : {}. Source : Santé publique France. Auteur : guillaumerozier.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_saturation.values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % round(df_saturation.values[-1]), " %</b> des lits de réa. occupés par<br>des patients Covid19 le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color=colors_sat[-1],
size=20
),
opacity=1,
ax=-250,
ay=-70,
arrowcolor=colors_sat[-1],
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
return df_saturation.values[-1]
# -
#for dep in departements:
#comparaison_cas_dc(dep)
# +
import cv2
import shutil
shutil.rmtree(PATH+"images/charts/france/covidep")
os.mkdir(PATH+"images/charts/france/covidep")
os.mkdir(PATH+"images/charts/france/covidep/lower_low")
os.mkdir(PATH+"images/charts/france/covidep/higher_low")
os.mkdir(PATH+"images/charts/france/covidep/lower_high")
os.mkdir(PATH+"images/charts/france/covidep/higher_high")
stats = {"higher_low": [], "higher_high": [], "lower_low": [], "lower_high": [], "update": dates[-1][-2:] + "/" + dates[-1][-5:-3]}
for dep in departements:
hosp_journ_elias(dep)
class_dep = incid_dep(dep)
stats[class_dep] += [dep]
cas_journ(dep)
hosp_journ(dep)
rea_journ(dep)
dc_journ(dep)
hosp_comparaison_vagues(dep)
comparaison_cas_dc(dep)
im1 = cv2.imread(PATH+'images/charts/france/departements_dashboards/cas_journ_{}.jpeg'.format(dep))
im2 = cv2.imread(PATH+'images/charts/france/departements_dashboards/hosp_journ_{}.jpeg'.format(dep))
im3 = cv2.imread(PATH+'images/charts/france/departements_dashboards/rea_journ_{}.jpeg'.format(dep))
im4 = cv2.imread(PATH+'images/charts/france/departements_dashboards/dc_journ_{}.jpeg'.format(dep))
im_haut = cv2.hconcat([im1, im2])
#cv2.imwrite('images/charts/france/tests_combinaison.jpeg', im_h)
im_bas = cv2.hconcat([im3, im4])
im_totale = cv2.vconcat([im_haut, im_bas])
cv2.imwrite(PATH+'images/charts/france/departements_dashboards/dashboard_jour_{}.jpeg'.format(dep), im_totale)
#os.remove(PATH+'images/charts/france/departements_dashboards/cas_journ_{}.jpeg'.format(dep))
#os.remove('images/charts/france/departements_dashboards/hosp_journ_{}.jpeg'.format(dep))
#os.remove(PATH+'images/charts/france/departements_dashboards/rea_journ_{}.jpeg'.format(dep))
#os.remove(PATH+'images/charts/france/departements_dashboards/dc_journ_{}.jpeg'.format(dep))
with open(PATH + 'images/charts/france/covidep/stats.json', 'w') as outfile:
json.dump(stats, outfile)
# -
for dep in departements:
print("variants " + dep)
nombre_variants(dep)
# +
with open(PATH_STATS + 'incidence_departements.json', 'r') as f:
incidence_departements = json.load(f)
for dep in departements:
saturation_rea = saturation_rea_journ(dep)
incidence_departements["donnees_departements"][dep]["saturation_rea"] = saturation_rea
with open(PATH_STATS + 'incidence_departements.json', 'w') as outfile:
json.dump(incidence_departements, outfile)
# +
n_tot=1
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
for i in range(0, n_tot):
evol_tests_deps, evol_hosp_deps = [], []
fig = go.Figure()
fig.add_shape(type="rect",
x0=-1000, y0=0, x1=0, y1=1000,
line=dict(color="orange",width=0.5, dash="dot"), fillcolor="orange", opacity=0.2,
layer="below"
)
fig.add_shape(type="rect",
x0=0, y0=-1000, x1=1000, y1=0,
line=dict(color="orange",width=0.5, dash="dot"), fillcolor="orange", opacity=0.2,
layer="below"
)
fig.add_shape(type="rect",
x0=0, y0=0, x1=1000, y1=1000,
line=dict(color="Red",width=0.5, dash="dot"), fillcolor="red", opacity=0.2,
layer="below"
)
fig.add_shape(type="rect",
x0=-1000, y0=-1000, x1=0, y1=0,
line=dict(color="red",width=0.5, dash="dot"), fillcolor="green", opacity=0.2,
layer="below"
)
### Reds
"""for (color, x_sign, y_sign, translation_x, translation_y) in [("red", "+", "+", 0, 0), ("orange", "+", "+", -200, 0), ("orange", "+", "+", 0, -200), ("green", "-", "-", 0, 0)]:
for j in range(4):
x0=j*50+translation_x
y0=0+translation_y
x1=50+j*50+translation_x
y1=50+j*50+translation_y
if y_sign == "-":
y0 = -y0
y1 = -y1
if x_sign == "-":
x0 = -x0
x1 = -x1
fig.add_shape(type="rect",
x0=x0, y0=y0, x1=x1, y1=y1,
line=dict(color="red",width=0.5, dash="dot"), fillcolor=color, opacity=0.07+0.07*j,
layer="below"
)
x0=0+translation_x
y0=0+j*50+translation_y
x1=0+j*50+translation_x
y1=50+j*50+translation_y
if y_sign == "-":
y0 = -y0
y1 = -y1
if x_sign == "-":
x0 = -x0
x1 = -x1
fig.add_shape(type="rect",
x0=x0, y0=y0, x1=x1, y1=y1,
line=dict(color="red",width=0.5, dash="dot"), fillcolor=color, opacity=0.07+0.07*j,
layer="below"
)"""
deps_vert, deps_orange, deps_rouge = [], [], []
nb_vert, nb_orange, nb_rouge = 0, 0, 0
for dep in departements:
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"]==dep]
tests_dep_rolling = df_incid_dep["P"].rolling(window=7).mean().values
evol_tests_dep = (tests_dep_rolling[-1-i] - tests_dep_rolling[-8-i]) / tests_dep_rolling[-8] * 100
evol_tests_deps += [evol_tests_dep]
hosp_dep_rolling = df_new_departements[df_new_departements["departmentName"]==dep]["incid_hosp"].rolling(window=7).mean().values
evol_hosp_dep = ( hosp_dep_rolling[-1-i] - hosp_dep_rolling[-8-i]) / hosp_dep_rolling[-8] * 100
evol_hosp_deps += [evol_hosp_dep]
if (evol_tests_dep < 0) & (evol_hosp_dep<0):
color = "green"
deps_vert += [df_incid_dep["dep"].values[0]]
nb_vert += 1
elif (evol_tests_dep > 0) & (evol_hosp_dep > 0):
color = "red"
deps_rouge += [df_incid_dep["dep"].values[0]]
nb_rouge += 1
else:
color = "orange"
deps_orange += [df_incid_dep["dep"].values[0]]
nb_orange += 1
fig.add_trace(go.Scatter(
x = [evol_tests_dep],
y = [evol_hosp_dep],
name = dep,
text=["<b>"+df_incid_dep["dep"].values[0]+"</b>"],
textfont=dict(size=10),
marker=dict(size=15,
color = color,
line=dict(width=0.3,
color='DarkSlateGrey')),
line_width=8,
opacity=0.8,
fill='tozeroy',
mode='markers+text',
fillcolor="rgba(8, 115, 191, 0.3)",
textfont_color="white",
showlegend=False,
textposition="middle center"
))
def make_string_deps(deps_list):
deps_list = sorted(deps_list)
list_string = [""]
for idx,dep in enumerate(deps_list):
list_string[-1] += dep
if (idx==len(deps_list)-1) or (len(list_string[-1])/150 >= 1):
list_string += [""]
else:
list_string[-1] += ", "
return_string=""
for idx,liste in enumerate(list_string):
return_string += liste
if idx < len(list_string)-1:
return_string += "<br>"
if len(return_string)==0:
return_string = "aucun"
return return_string
#liste_deps_str = "{} en <b>vert</b> : {}<br><br>{} en <b>orange</b> : {}<br><br>{} en <b>rouge</b> : {}".format(nb_vert, make_string_deps(deps_vert), nb_orange, make_string_deps(deps_orange), nb_rouge, make_string_deps(deps_rouge))
liste_deps_str_vert = "<span style='color: green;'>Vert ({})</span> : {}<br>".format(nb_vert, make_string_deps(deps_vert))
liste_deps_str_orange = "<span style='color: orange;'>Orange ({})</span> : {}<br>".format(nb_orange, make_string_deps(deps_orange))
liste_deps_str_rouge = "<span style='color: red;'>Rouge ({})</span> : {}<br>".format(nb_rouge, make_string_deps(deps_rouge))
liste_deps_str = liste_deps_str_vert + liste_deps_str_orange + liste_deps_str_rouge
fig['layout']['annotations'] += (dict(
x = 100, y = 100, # annotation point
xref='x1', yref='y1',
text="Les cas augmentent.<br>Les admissions à l'hôpital augmentent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x = -50, y = -50, # annotation point
xref='x1', yref='y1',
text="Les cas baissent.<br>Les admissions à l'hôpital baissent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x = -50, y = 100, # annotation point
xref='x1', yref='y1',
text="Les cas baissent.<br>Les admissions à l'hôpital augmentent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x = 100, y = -50, # annotation point
xref='x1', yref='y1',
text="Les cas augmentent.<br>Les admissions à l'hôpital baissent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x=0.5,
y=1.05,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
dict(
x=-0.08,
y=-0.3,
xref='paper',
yref='paper',
font=dict(size=14),
align="left",
text=liste_deps_str, showarrow = False
),)
fig.update_xaxes(title="Évolution hebdomadaire des cas positifs", range=[-100, 200], ticksuffix="%")
fig.update_yaxes(title="Évolution hedbomadaire des admissions à l'hôpital", range=[-100, 200], ticksuffix="%")
fig.update_layout(
title={
'text': "<b>Évolution des cas et hospitalisations dans les départements</b> • {}".format(datetime.strptime(dates[-i-1], '%Y-%m-%d').strftime('%d %b')),
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
margin=dict(
b=200
),
)
fig.write_image(PATH+"images/charts/france/evolution_deps/{}_{}.jpeg".format("evolution_deps", dates_incid[-(i+1)]), scale=3, width=1000, height=900)
if i==0:
fig.write_image(PATH+"images/charts/france/evolution_deps/{}_{}.jpeg".format("evolution_deps", 0), scale=3, width=1000, height=900)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/evolution_deps/evolution_deps_0.html', auto_open=False)
# +
#import glob
n_tot = 40
import cv2
for (folder, n, fps) in [("evolution_deps", n_tot, 3)]:
img_array = []
for i in range(n-1, 0-1, -1):
print(i)
try:
img = cv2.imread((PATH + "images/charts/france/{}/evolution_deps_{}.jpeg").format(folder, dates_incid[-(i+1)]))
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
if i==-n:
for k in range(4):
img_array.append(img)
if i==-1:
for k in range(12):
img_array.append(img)
except:
print("image manquante")
out = cv2.VideoWriter(PATH + 'images/charts/france/{}/evolution_deps.mp4'.format(folder),cv2.VideoWriter_fourcc(*'MP4V'), fps, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
try:
import subprocess
subprocess.run(["ffmpeg", "-y", "-i", PATH + "images/charts/france/{}/evolution_deps.mp4".format(folder), PATH + "images/charts/france/{}/evolution_deps_opti.mp4".format(folder)])
subprocess.run(["rm", PATH + "images/charts/france/{}/evolution_deps.mp4".format(folder)])
except:
print("error conversion h265")
# -
"""for idx,dep in enumerate(departements):
numero_dep = df[df["departmentName"] == dep]["dep"].values[-1]
heading = "<!-- wp:heading --><h2 id=\"{}\">{}</h2><!-- /wp:heading -->\n".format(dep, dep + " (" + numero_dep + ")")
string = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/dashboard_jour_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/dashboard_jour_{}.jpeg\" width=\"75%\"> </a></p>\n".format(dep, dep)
string2 = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/heatmaps_deps/heatmap_taux_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/heatmaps_deps/heatmap_taux_{}.jpeg\" width=\"60%\"> </a></p>\n".format(numero_dep, numero_dep)
string_saturation = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/saturation_rea_journ_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/saturation_rea_journ_{}.jpeg\" width=\"60%\"> </a></p>\n".format(dep, dep)
space = "<!-- wp:spacer {\"height\":50} --><div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer\"></div><!-- /wp:spacer -->"
retourmenu="<a href=\"#Menu\">Retour au menu</a>"
print(space+retourmenu+heading+string+string2+string_saturation)
"""
"""#print("<!-- wp:buttons --><div class=\"wp-block-buttons\">\n")
output = ""
for dep in departements:
numero_dep = df[df["departmentName"] == dep]["dep"].values[-1]
output+= "<a href=\"#{}\">{} ({})</a> • ".format(dep, dep, numero_dep)
#print(output[:-2])
"""
#print("<!-- /wp:buttons -->")
|
src/france/covid19_departements_dashboards.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MicroPython - USB
# language: micropython
# name: micropython
# ---
%serialconnect /dev/ttyUSB0 --baudrate=115200 --user='micro' --password='<PASSWORD>' --wait=0
%lsmagic
import os
print(os.listdir())
|
resources/example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Out-of-core Learning - Large Scale Text Classification for Sentiment Analysis
# ## Scalability Issues
# The `sklearn.feature_extraction.text.CountVectorizer` and `sklearn.feature_extraction.text.TfidfVectorizer` classes suffer from a number of scalability issues that all stem from the internal usage of the `vocabulary_` attribute (a Python dictionary) used to map the unicode string feature names to the integer feature indices.
#
# The main scalability issues are:
#
# - **Memory usage of the text vectorizer**: all the string representations of the features are loaded in memory
# - **Parallelization problems for text feature extraction**: the `vocabulary_` would be a shared state: complex synchronization and overhead
# - **Impossibility to do online or out-of-core / streaming learning**: the `vocabulary_` needs to be learned from the data: its size cannot be known before making one pass over the full dataset
#
#
# To better understand the issue let's have a look at how the `vocabulary_` attribute work. At `fit` time the tokens of the corpus are uniquely indentified by a integer index and this mapping stored in the vocabulary:
# +
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=1)
vectorizer.fit([
"The cat sat on the mat.",
])
vectorizer.vocabulary_
# -
# The vocabulary is used at `transform` time to build the occurrence matrix:
# +
X = vectorizer.transform([
"The cat sat on the mat.",
"This cat is a nice cat.",
]).toarray()
print(len(vectorizer.vocabulary_))
print(vectorizer.get_feature_names())
print(X)
# -
# Let's refit with a slightly larger corpus:
# +
vectorizer = CountVectorizer(min_df=1)
vectorizer.fit([
"The cat sat on the mat.",
"The quick brown fox jumps over the lazy dog.",
])
vectorizer.vocabulary_
# -
# The `vocabulary_` is the (logarithmically) growing with the size of the training corpus. Note that we could not have built the vocabularies in parallel on the 2 text documents as they share some words hence would require some kind of shared datastructure or synchronization barrier which is complicated to setup, especially if we want to distribute the processing on a cluster.
#
# With this new vocabulary, the dimensionality of the output space is now larger:
# +
X = vectorizer.transform([
"The cat sat on the mat.",
"This cat is a nice cat.",
]).toarray()
print(len(vectorizer.vocabulary_))
print(vectorizer.get_feature_names())
print(X)
# -
# ## The IMDb movie dataset
# To illustrate the scalability issues of the vocabulary-based vectorizers, let's load a more realistic dataset for a classical text classification task: sentiment analysis on text documents. The goal is to tell apart negative from positive movie reviews from the [Internet Movie Database](http://www.imdb.com) (IMDb).
#
# In the following sections, with a [large subset](http://ai.stanford.edu/~amaas/data/sentiment/) of movie reviews from the IMDb that has been collected by Maas et al.
#
# - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Learning Word Vectors for Sentiment Analysis. In the proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 142–150, Portland, Oregon, USA, June 2011. Association for Computational Linguistics.
#
# This dataset contains 50,000 movie reviews, which were split into 25,000 training samples and 25,000 test samples. The reviews are labeled as either negative (neg) or positive (pos). Moreover, *positive* means that a movie received >6 stars on IMDb; negative means that a movie received <5 stars, respectively.
#
#
# Assuming that the `../fetch_data.py` script was run successfully the following files should be available:
# +
import os
train_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'train')
test_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'test')
# -
# Now, let's load them into our active session via scikit-learn's `load_files` function
# +
from sklearn.datasets import load_files
train = load_files(container_path=(train_path),
categories=['pos', 'neg'])
test = load_files(container_path=(test_path),
categories=['pos', 'neg'])
# -
# <div class="alert alert-warning">
# <b>NOTE</b>:
# <ul>
# <li>
# Since the movie datasets consists of 50,000 individual text files, executing the code snippet above may take ~20 sec or longer.
# </li>
# </ul>
# </div>
# The `load_files` function loaded the datasets into `sklearn.datasets.base.Bunch` objects, which are Python dictionaries:
train.keys()
# In particular, we are only interested in the `data` and `target` arrays.
# +
import numpy as np
for label, data in zip(('TRAINING', 'TEST'), (train, test)):
print('\n\n%s' % label)
print('Number of documents:', len(data['data']))
print('\n1st document:\n', data['data'][0])
print('\n1st label:', data['target'][0])
print('\nClass names:', data['target_names'])
print('Class count:',
np.unique(data['target']), ' -> ',
np.bincount(data['target']))
# -
# As we can see above the `'target'` array consists of integers `0` and `1`, where `0` stands for negative and `1` stands for positive.
# ## The Hashing Trick
# Remember the bag of word representation using a vocabulary based vectorizer:
#
# <img src="figures/bag_of_words.svg" width="100%">
# To workaround the limitations of the vocabulary-based vectorizers, one can use the hashing trick. Instead of building and storing an explicit mapping from the feature names to the feature indices in a Python dict, we can just use a hash function and a modulus operation:
# <img src="figures/hashing_vectorizer.svg" width="100%">
# More info and reference for the original papers on the Hashing Trick in the [following site](http://www.hunch.net/~jl/projects/hash_reps/index.html) as well as a description specific to language [here](http://blog.someben.com/2013/01/hashing-lang/).
# +
from sklearn.utils.murmurhash import murmurhash3_bytes_u32
# encode for python 3 compatibility
for word in "the cat sat on the mat".encode("utf-8").split():
print("{0} => {1}".format(
word, murmurhash3_bytes_u32(word, 0) % 2 ** 20))
# -
# This mapping is completely stateless and the dimensionality of the output space is explicitly fixed in advance (here we use a modulo `2 ** 20` which means roughly 1M dimensions). The makes it possible to workaround the limitations of the vocabulary based vectorizer both for parallelizability and online / out-of-core learning.
# The `HashingVectorizer` class is an alternative to the `CountVectorizer` (or `TfidfVectorizer` class with `use_idf=False`) that internally uses the murmurhash hash function:
# +
from sklearn.feature_extraction.text import HashingVectorizer
h_vectorizer = HashingVectorizer(encoding='latin-1')
h_vectorizer
# -
# It shares the same "preprocessor", "tokenizer" and "analyzer" infrastructure:
analyzer = h_vectorizer.build_analyzer()
analyzer('This is a test sentence.')
# We can vectorize our datasets into a scipy sparse matrix exactly as we would have done with the `CountVectorizer` or `TfidfVectorizer`, except that we can directly call the `transform` method: there is no need to `fit` as `HashingVectorizer` is a stateless transformer:
docs_train, y_train = train['data'], train['target']
docs_valid, y_valid = test['data'][:12500], test['target'][:12500]
docs_test, y_test = test['data'][12500:], test['target'][12500:]
# The dimension of the output is fixed ahead of time to `n_features=2 ** 20` by default (nearly 1M features) to minimize the rate of collision on most classification problem while having reasonably sized linear models (1M weights in the `coef_` attribute):
h_vectorizer.transform(docs_train)
# Now, let's compare the computational efficiency of the `HashingVectorizer` to the `CountVectorizer`:
h_vec = HashingVectorizer(encoding='latin-1')
# %timeit -n 1 -r 3 h_vec.fit(docs_train, y_train)
count_vec = CountVectorizer(encoding='latin-1')
# %timeit -n 1 -r 3 count_vec.fit(docs_train, y_train)
# As we can see, the HashingVectorizer is much faster than the Countvectorizer in this case.
# Finally, let us train a LogisticRegression classifier on the IMDb training subset:
# +
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
h_pipeline = Pipeline([
('vec', HashingVectorizer(encoding='latin-1')),
('clf', LogisticRegression(random_state=1)),
])
h_pipeline.fit(docs_train, y_train)
# -
print('Train accuracy', h_pipeline.score(docs_train, y_train))
print('Validation accuracy', h_pipeline.score(docs_valid, y_valid))
# +
import gc
del count_vec
del h_pipeline
gc.collect()
# -
# # Out-of-Core learning
# Out-of-Core learning is the task of training a machine learning model on a dataset that does not fit into memory or RAM. This requires the following conditions:
#
# - a **feature extraction** layer with **fixed output dimensionality**
# - knowing the list of all classes in advance (in this case we only have positive and negative reviews)
# - a machine learning **algorithm that supports incremental learning** (the `partial_fit` method in scikit-learn).
#
# In the following sections, we will set up a simple batch-training function to train an `SGDClassifier` iteratively.
# But first, let us load the file names into a Python list:
# +
train_path = os.path.join('datasets', 'IMDb', 'aclImdb', 'train')
train_pos = os.path.join(train_path, 'pos')
train_neg = os.path.join(train_path, 'neg')
fnames = [os.path.join(train_pos, f) for f in os.listdir(train_pos)] +\
[os.path.join(train_neg, f) for f in os.listdir(train_neg)]
fnames[:3]
# -
# Next, let us create the target label array:
y_train = np.zeros((len(fnames), ), dtype=int)
y_train[:12500] = 1
np.bincount(y_train)
# Now, we implement the `batch_train function` as follows:
# +
from sklearn.base import clone
def batch_train(clf, fnames, labels, iterations=25, batchsize=1000, random_seed=1):
vec = HashingVectorizer(encoding='latin-1')
idx = np.arange(labels.shape[0])
c_clf = clone(clf)
rng = np.random.RandomState(seed=random_seed)
for i in range(iterations):
rnd_idx = rng.choice(idx, size=batchsize)
documents = []
for i in rnd_idx:
with open(fnames[i], 'r', encoding='latin-1') as f:
documents.append(f.read())
X_batch = vec.transform(documents)
batch_labels = labels[rnd_idx]
c_clf.partial_fit(X=X_batch,
y=batch_labels,
classes=[0, 1])
return c_clf
# -
# Note that we are not using `LogisticRegression` as in the previous section, but we will use a `SGDClassifier` with a logistic cost function instead. SGD stands for `stochastic gradient descent`, an optimization alrogithm that optimizes the weight coefficients iteratively sample by sample, which allows us to feed the data to the classifier chunk by chuck.
# And we train the `SGDClassifier`; using the default settings of the `batch_train` function, it will train the classifier on 25*1000=25000 documents. (Depending on your machine, this may take >2 min)
# +
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(loss='log', random_state=1)
sgd = batch_train(clf=sgd,
fnames=fnames,
labels=y_train)
# -
# Eventually, let us evaluate its performance:
vec = HashingVectorizer(encoding='latin-1')
sgd.score(vec.transform(docs_test), y_test)
# ### Limitations of the Hashing Vectorizer
# Using the Hashing Vectorizer makes it possible to implement streaming and parallel text classification but can also introduce some issues:
#
# - The collisions can introduce too much noise in the data and degrade prediction quality,
# - The `HashingVectorizer` does not provide "Inverse Document Frequency" reweighting (lack of a `use_idf=True` option).
# - There is no easy way to inverse the mapping and find the feature names from the feature index.
#
# The collision issues can be controlled by increasing the `n_features` parameters.
#
# The IDF weighting might be reintroduced by appending a `TfidfTransformer` instance on the output of the vectorizer. However computing the `idf_` statistic used for the feature reweighting will require to do at least one additional pass over the training set before being able to start training the classifier: this breaks the online learning scheme.
#
# The lack of inverse mapping (the `get_feature_names()` method of `TfidfVectorizer`) is even harder to workaround. That would require extending the `HashingVectorizer` class to add a "trace" mode to record the mapping of the most important features to provide statistical debugging information.
#
# In the mean time to debug feature extraction issues, it is recommended to use `TfidfVectorizer(use_idf=False)` on a small-ish subset of the dataset to simulate a `HashingVectorizer()` instance that have the `get_feature_names()` method and no collision issues.
# <div class="alert alert-success">
# <b>EXERCISE</b>:
# <ul>
# <li>
# In our implementation of the batch_train function above, we randomly draw *k* training samples as a batch in each iteration, which can be considered as a random subsampling ***with*** replacement. Can you modify the `batch_train` function so that it iterates over the documents ***without*** replacement, i.e., that it uses each document ***exactly once*** per iteration?
# </li>
# </ul>
# </div>
# +
# # %load solutions/23_batchtrain.py
|
notebooks/23.Out-of-core_Learning_Large_Scale_Text_Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# # Simulating Data
# *Neural Time Series Data*
# ### Prerequisites
# For this chapter, you should be familiar with the following concepts and techniques:
# * Basic Python programming
# * Basic Math. **(recap your skills in Linea Algebra, Sine Waves and Euler's Formula)**
# ### Scope of this tutorial
#
# In this tutorial, you will learn the conceptual, mathematical, and implementational (via python programming) basis of time- and time-frequency-analysis of EEG recordings. Alternating between theoretical background knowledge and practical exercises you will learn the basics of how EEG Data is recorded, preprocessed and analysed. We will however only cover the **fundamental basics of EEG analysis**; but with this, you will then be prepared to dig deeper into the endless opportunities of Neural Time Series analysis.
#
#
# <div class="alert alert-block alert-warning">
# <b>General remark:</b> In order to make the most out of the exercises, we highly recommend you to exploit the interactive character of these notebooks; play around with the parameters and see how this affects your results... <span style=font-style:italic>(e.g. what happens if you use a lower transition bandwidth in your filter or if you don't extract the abs() values of the Fourier Transform?)</span> This way, it is much easier to really understand what you are doing!
# </div>
#
#
#
#
# ## 1. The Math behind EEG signals
#
# Before we are ready to work with real EEG data, we will first create artificial signals. This makes it much easier to understand the *maths* behind EEG signals, which in return will help you to understand the following analysis steps a lot better.
#
# Step by step, we will make our signal more complex until it approximates *'real'* EEG data. In the next section of this chapter, we will then start to use this knowledge in order to analyse EEG Data, recorded by a Neurobiopsychology research group of our institute.
#
# For the following exercises, we will use the signal processing toolbox from scipy. [This link](https://docs.scipy.org/doc/scipy/reference/signal.html) leads you to the documentation of the toolbox, where you can find almost all the functions that you need to solve the following tasks. Whenever you are working with a toolbox, I highly recommend to take some time to explore the corresponding documentation. It helps you to make the most of all the tools it supplies!
# !pip install mne
#run this if mne is not installed on the colab kernel
import matplotlib.pyplot as plt
import numpy as np
import random
from scipy import signal
#dont name any of your variables signal otherwise the package won't work!
# ### Simple signals
# One of the key concepts you need in order to understand the maths behind oscillatory signals (like neural signals in EEG) is the **sine wave**. Here you can find a short overview of the parameters that define a sine wave (more details have been covered in the video "Analysing Neural Time Series Data / EEG Intro" in Chapter 6.1. on studIP).
# 
# 
# With the parameters ```amplitude```,```frequency``` and ```phase```($\theta$), a sine wave can be described with the following formula:
#
# $$Asin(2*\pi ft + \theta)$$
# ### 1.1 Simple Sinewave
# With this information, we are now ready to create a simple signal as a combination of two sinusoids.
#
# For this:
# - Define a time scale of 1 second, i.e. 1000ms
# - Create THREE sinewaves with a length of 1sec: one with a frequency of 10Hz, 15Hz, and 20Hz (*for simplicity, we will for now ignore amplitude and phase, they will be used in the next step though*)
# - Add them together to create your first simple signal
# - Create a plot for each of the sinusoids
# +
t = np.linspace(0, 1, 1000, False) # 1 second
signal_1 = np.sin(2*np.pi*10*t)
signal_2 = np.sin(2*np.pi*15*t)
signal_3 = np.sin(2*np.pi*20*t)
signal_final = signal_1 + signal_2 + signal_3
# plot all three figures
fig, ax = plt.subplots(4, 1, sharex=True, sharey=True, figsize=(16,8))
# Plot each graph
ax[0].plot(t, signal_1)
ax[0].set_title('10 Hz sinusoid', fontsize = 14)
ax[1].plot(t, signal_2)
ax[1].set_title('15 Hz sinusoid', fontsize = 14)
ax[2].plot(t, signal_3)
ax[2].set_title('20 Hz sinusoid', fontsize = 14)
ax[3].plot(t, signal_final)
ax[3].set_title('Signal composed of 10, 15 and 20 Hz Sinusoids', fontsize = 14)
plt.show()
# -
# ### 1.2 More complex signal
#
# As a next step, we want to achieve something more 'complex'.
#
# For this, we select a list of frequencies, that we want our signal to be composed of, and define their amplitudes and phases. The exact values that you should use for this are already predefined; but play around with them and see how your results change!
#
# With the help of these parameters:
# - Create a new, a bit more complex signal by combining the resulting sinusoids (you should get 6 sinusoids with the respective ```freq```, ```amplit``` and ```phase```)
# - To make it more realistic, create some random Gaussian noise with the same length and add it to your signal
# - Then plot both, the clean and the noisy signal
#
# <div class="alert alert-block alert-warning">
# <b>The Nyquist Sampling Theorem states:</b> In order to prevent distortions of the underyling information, the minimum sampling frequency of a signal should be double the frequency of its highest frequency component (which is in our case 60Hz).
# </div>
#
# We will define it a little bit higher for our filter to properly work. But you can of course change it and see how this affects your plots (especially your filter in exercise 2.2).
# set parameters:
srate = 1000 # define sampling rate
nyq = srate/2 #nyquist frequency
freq = [3, 10, 5, 15, 35, 60] # define a list of frequencies
amplit = [5, 15, 10, 5, 7, 1] # define their amplitudes
phase = [np.pi/7, np.pi/8, np.pi, np.pi/2, -np.pi/4, np.pi/3] # and their respective phases
# +
# 1. create signal
t = np.linspace(0, 1, 1000, False) # 1 second
sig = []
for i in range(len(freq)):
sine = amplit[i] * np.sin(2*np.pi* freq[i]* t + phases[i])
sig.append(sine)
# 2. add some random noise
# info: the third paramter defines the size of your noise
noise = np.random.normal(0,2.0,1000)
signal_final = sum(sig)
signal_noisy = sum(sig)+ noise
# 3. plot both figures (signal with and without noise)
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(15,8))
# add big shared axes, hide frame to share ylabel between subplots
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel('Time [s]',fontsize = 12)
plt.ylabel('Amplitude',fontsize = 12)
# Plot each graph
axs[0].plot(t, signal_final)
axs[0].set_title('Signal without noise',fontsize = 14)
axs[1].plot(t, signal_noisy)
axs[1].set_title('Noisy signal',fontsize = 14)
plt.show()
# -
# In reality, your EEG signal is roughly based on the same components: it typically contains a mixture of simultaneous neural oscillations at different frequencies plus some noise. This noise can be non-neural (caused by line noise or muscle activity); but also neural oscillations that are not of your interest can be considered as 'noise'. In order to be able to do your analysis as "clean" as possible, you want to isolate only the part of the signal that you are interested in, thereby increasing the **signal-to-noise-ratio (SNR)** of your signal. A way to do this, is by **filtering** your data - this will be the focus of the following exercises.
# ## 2. How to get rid of unwanted noise?
# ### The Fourier Transform
# Before telling you more about how EEG data can be filtered, you need to first learn about the **Fourier Transform (FT)**, which is a really useful and important mathematical tool to analyse EEG time series data (and any kind of time series data in general); with its help we can separate the different frequency components that compose our signal and thus get rid of unwanted frequencies. To get a more intuitive explanation of Fourier Transform watch this video by [3Blue1Brown](https://www.youtube.com/watch?v=spUNpyF58BY&ab_channel=3Blue1Brown).
# <span style=color:#1F618D;font-size:11pt>→ If you want to have a more thorough and more EEG-based intro, [check out this vdeo](https://www.youtube.com/watch?v=d1Yj_7ti_IU&list=PLn0OLiymPak3lrIErlYVnIc3pGTwgt_ml&index=3) by <NAME>. Or [watch the whole playlist](https://www.youtube.com/playlist?list=PLn0OLiymPak3lrIErlYVnIc3pGTwgt_ml) for even more detailed explanations.</span>
# ### 2.1 Extracting the frequency spectrum with the FFT
# Now we are ready to apply the **fast Fourier Transform** [```fft.fft()```](https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fft.html) on our signals, in order to get its frequency spectrum. Since we created the signal on our own, we can check whether it shows all the frequencies that we used to compose it.
# * We will have to compute the frequency spectrum several times, therefore it is useful to write a proper function that can compute the fourier transform of any signal and already selects the absolute part of the Fourier Transform. The explanation for why we only use the absolute values of the FT gets really mathematical; but if you are curious, I can highly recommend [this video!](https://www.youtube.com/watch?v=Nupda1rm01Y).
# * Then apply and plot the FFT of the noisy signal from 1.2. You will see a lot more, if you limit your x-axis to 100, since we are not interested in higher frequencies anyway.
#
#
# If you look at the amplitudes, you will realize that they are half the amplitudes that we predefined when creating the signal. This happens because we are only taking the absolute values of the FT-frequencies and the amplitudes for the negative frequencies have been "removed".
def getFT(sig):
# compute fft
FFT = np.fft.fft(sig)
FFT = np.abs(FFT)/len(sig) #normalise the frequencies to the number of time-points
return FFT
# +
# compute and plot FFT of the noisy signal
frequencies = getFT(signal_noisy)
N = int(len(frequencies)/2)
fig, ax = plt.subplots(figsize=(8,8))
ax.plot(frequencies[:N]/len(signal_noisy))
plt.suptitle('Fourier Transform of the Signal')
ax.set(
title='Frequencies {} plus random Gaussian noise'.format(freq),
xlim=(0,100),
xlabel='Frequency (Hz)',
ylabel='Amplitude'
)
plt.show()
print(N)
# -
# ### Filtering EEG Data
# Now that we have recovered the components of our signal with Fourier Transform, we will have a short look into how EEG data is filtered,in order to remove the noise. This knowledge will be important for the second half of the Notebook.
#
# ### 2.2 Filtering in the time-domain vs. filtering in the frequency-domain [5]
#
# In this part, we will see with some practical examples how we can filter our data in two different ways. Usually, you only filter in the frequency domain since this is computationally a lot faster. Yet, it is really useful to learn about both procedures in order to better understand the concept of filtering in general. In the video above you already got a first impression of filtering in the frequency domain. In order to better understand its time-domain equivalent, you need to first learn about the process of convolution, i.e. the (mathematical) procedure of applying your filter to your data in the time domain. This should however not be entirely new to you, you will find some similarities to the procedure behind the Fourier Transform:
#
# <div class="alert alert-block alert-success">
# <b>Convolution:</b> Convolution is used to isolate frequency-band-specific activity and to localize that frequency-band-specific activity in time.
# This is done by <b>convolving wavelets— i.e. time-limited sine waves—with EEG data.</b> As the wavelet (i.e. the convolution kernel) is dragged along the EEG data (the convolution signal): it reveals when and to what extent the EEG data contain features that look like the wavelet. When convolution is repeated on the same EEG data using wavelets of different frequencies, a time-frequency representation can be formed."
# <span style=font-style:italic>(<NAME>, "Analyzing Neural Time Series Data: Theory and Practice"</span>
#
# → If you want a more thorough and more visual explanation of convolution, I can highly recommend [this video](https://www.youtube.com/watch?v=9Hk-RAIzOaw) by <NAME>.
# </div>
#
# <div class="alert alert-block alert-success">
# <b>Convolution theorem:</b> Convolution in the time domain is the same as multiplication in the frequency domain.
# </div>
#
#
# 
#
#
# ### 2.2. a) Filter in the time domain
# According to the figure above, in order to filter our signal in the time domain, we need a windowed sinewave as a filter-kernel. The windowing helps to obtain **temporally localized frequency information**. We then convolve this wavelet with our signal, extracting the frequency bands that we want to work with.
# - First define your pass-band as 25Hz. Ideally everything above this frequency is filtered out; in reality however, we need a transition band of about 10 Hz, or a region between the pass-frequency ```f_p``` and stop-frequency ```f_s```. In this range, frequencies are only attenuated instead completely excluded. This is necessary in order to account for the trade-off between precision in the frequency-domain and precision in the time-domain.
# - Next, we define the gains at each frequency band: everything outside 0 and our pass-band of 25Hz should be attenuated, i.e. have a gain close to 0.
# - Using the function [```firwin2()```](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.firwin2.html) of the signal package and the parameters from above, we can now construct our filter-kernel ```h_win``` (the result should be a wavelet with a length/duration of 0.6 seconds)
# - Plot your kernel as well as its frequency spectrum. It should look like a step-function, that assigns a gain of 1 to all frequencies in our pass-band between 0 - 25Hz.
#
# Tip: Play around with your the parameters of your filter (e.g. the filter's duration, its transition bandwidth or its stop- and passband, the sampling rate etc.) and see how the plots change. You can then also proceed with the whole filtering process and check out what different filters (with different paramters) do to your data. This way, you can properly understand how the different parameters are finally affecting you data.
# +
# Create a Low-pass Filter: Windowed 10-Hz transition
# 1. Define Filtering parameteres
filter_duration = 0.8
n = int(round(srate * filter_duration)+1) #odd number of filter coefficients for linear phase
f_p = 25. # define passband
trans_bandwidth = 10.
f_s = f_p + trans_bandwidth # stopband = 35 Hz
print(f_s)
# define gains of each frequency band
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
# 2. Compute filter graph
h_win = signal.firwin2(n, freq, gain, nyq=nyq)
# 3. Compute freqeuncy spectrum of the filter
frequencies = getFT(h_win)
# 4. Plot filter in time and in frequency domain
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,8))
plt.subplots_adjust(hspace=0.5)
time = np.linspace(-1, 1, len(h_win))
ax1.plot(time,h_win)
ax1.set_title('Filter Coefficients, filter-duration = {}s'. format(filter_duration))
ax1.set_xlabel('Time [s]')
ax2.plot(frequencies[:100])
ax2.set_title('Frequency Response')
ax2.set_ylabel('Gain')
ax2.set_xlabel('Frequency [Hz]')
plt.show()
# -
# use inbuilt mne-function to plot filter characteristics (a lot more detailed)
import mne
flim = (1., nyq) # limits for plotting
mne.viz.plot_filter(h_win, srate, freq, gain, 'Windowed {} Hz transition ({}s filter-duration)'.format(trans_bandwidth, filter_duration),flim=flim, compensate=True)
# Now we are ready to convolve our signal with our self-constructed FIR filter ```h_win```.
# - For this, we use the [```convolve()```](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve.html) function of the *signal* package.
# - Then plot both, the filtered and the unfiltered signal in order to see the effect of your filter.
# - Aftwards, we want to test in the frequency-spectrum of our signal whether our filter successfully attenuated the frequency-components above 25Hz. For this: compute and plot the FT of both, the filtered and the unfiltered signal.
# - In order to compare which filtering procedure is faster, record the computation-time of the time-domain convolution with the help of the magic function [```%timeit```](https://docs.python.org/2/library/timeit.html) (you can write an extra line for this, where you again perform your convolution).
# +
# 1. Convolve signal with the filter
conv_time = signal.convolve(signal_noisy, h_win, mode='same')
# and Calculate computation time
# %timeit signal.convolve(signal_noisy, h_win, mode='same')
# -
# 2. Plot filtered and unfiltered signal
plt.plot(signal_noisy,'g--', label = 'unfiltered signal')
plt.plot(conv_time, 'r-',label = 'filtered signal (via convolution), filter-duration = {}s'. format(filter_duration))
plt.title('Compare signal before and after filtering (via convolution)')
plt.ylabel('Amplitude')
plt.xlabel('Time in ms')
plt.legend()
# +
# 3. Compute and plot frequency spectrum of the filtered and the unfiltered signal
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,8))
plt.subplots_adjust(hspace=0.5)
ax1.plot(getFT(signal_noisy)[:100])
ax1.set_title('Frequency Spectrum of the unfiltered Signal')
ax1.set_ylabel('Gain')
ax2.plot(getFT(conv_time)[:100])
ax2.set_title('Frequency Spectrum of LP-filtered Signal (0-25Hz passband), filter-duration = {}s'. format(filter_duration))
ax2.set_ylabel('Gain')
plt.xlabel('Frequ(Hz)')
plt.show()
# -
# ### 2.2 b) Filter in the frequency domain
# Filtering in the frequency domain is computationally much faster and easier. According to the convolution theorem (see above):
# - Multiply the frequency-spectrum of our filter-kernel with the frequency-spectrum of our signal.
# - In order to compare the filtered and the unfiltered signal, first compute the inverse Fourier-Transform of your filtering result with [```fft.ifft```](https://numpy.org/doc/stable/reference/generated/numpy.fft.ifft.html) (in order to translate it to the time domain) and then plot both signals, the unfiltered and filtered one, in one plot.
#
# <div class="alert alert-block alert-warning">
# <b>Note:</b> So far, every time we applied the fourier transform (FT) to our signal, we only used the absolute values of the FT-result, because this was what we were interested in. To visualize what that means, just plot the FT of any of our signals with and without the abs()-function. For the inverse FT to work porperly however, we need the "whole" result of the FT, which is why we omit the abs() function this time.
# </div>
#
# - In a second plot, compare your result from filtering in the frequency domain with your convolution result from before in the time domain (from 2.2 a). According to relationship between frequency and time domain, both curves should look exactly the same!
#
# - In order to compare which filtering procedure is faster, again record the computation-time of the frequency-domain filtering with the help of the magic function [```%timeit```](https://docs.python.org/2/library/timeit.html). Compare the result to the computation time of the time-domain convolution. Which one is faster?
# +
# 1. Compute lengths of the result
# in order to make the inverse FFT return the correct number of time points:
# we need to make sure to compute the FFTs of the signal and the kernel using
# the appropriate number of time points. In other words:
# the length of the signal (=srate = 1000) plus the length of the kernel (= 801) minus one (= 1800)
# Afterwards the result has to be trimmed to its orignal length again (step 5)
n_signal = len(signal_noisy)
n_kernel = len(h_win)
print(n_signal, n_kernel)
nconv = n_signal + n_kernel -1
halfk = np.floor(n_kernel/2)
# 2. Compute FT of the kernel and the signal
h_winX = np.fft.fft(h_win, nconv)
signalX = np.fft.fft(signal_noisy, nconv)
# 3. Multiply frequecies
result_frequencydomain = h_winX*signalX
# 4. Compute inverse FT (convert frequency-domain to the time-domain)
result_timedomain = np.fft.ifft(result_frequencydomain)
print(len(result_timedomain))
# 5. Cut the signal to original length
result_timedomain = result_timedomain[int(halfk):-int(halfk)]
# 6. Plot both signals (unfiltered and filtered) in one plot
plt.plot(result_timedomain, 'b-', label = 'Result of filter in Frequency Domain (frequency multiplication), filter-duration = {}s'. format(filter_duration))
plt.plot(signal_noisy, 'r',label = 'Unfiltered Signal')
plt.ylabel('Amplitude')
plt.xlabel('Time in ms')
plt.legend(bbox_to_anchor=[1.2, 1.3], ncol=2)
# +
# 7. Plot results of filtering in the frequency domain and filtering in the time domain
fig, ax = plt.subplots(3,1, figsize=(15,8), sharey=True)
ax[0].plot(
signal_noisy,
'b-',
label = 'original noisy signal'
)
ax[1].plot(
result_timedomain,
'b-',
label = 'Result of filter in Time Domain (convolution), filter-duration = {}s'.format(filter_duration)
)
ax[2].plot(
conv_time,
'r:',
label = 'Result of filter in Frequency Domain (frequency multiplication)'
)
plt.subplots_adjust(hspace=0.2)
fig.add_subplot(111, frameon=False)
plt.suptitle('Compare results of filtering in the frequency domain and filtering in the time domain')
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel('Time [ms]',fontsize = 12)
plt.ylabel('Amplitude',fontsize = 12)
plt.legend();
# -
# 8. calculate computation time
filtering_fd_time = get_ipython().run_line_magic('timeit', "conv_fd = h_winX*signalX")
# ## 3. Bonus Exercise: Time-Frequency Analysis
# FT alone does not describe the signal perfectly. For non-stationary signals (like EEG), we are interested in the evoked response of the brain. The result of the simple FT alone will not show us that. Hence, we rely on Time-Frequency Analysis in order to understand the temporal structure of the different frequencies in the signal. **Spectrograms** will do the trick! After applying a specific (time-resolved) version of the FT, they shows us how much of each frequency compoment was present at a specific time point. [This section of one of Cohen's videos about the Fast Fourier Transform](https://youtu.be/T9x2rvdhaIE?t=118), nicely explains and visualizes how a spectogram is computed.
# - Plot the power spectogram of the noisy signal using the function [```plt.specgram()```]
# (https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.specgram.html).
#
# - Also compare the results with the [stft](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html) method
# Plot simple spectogram of the noisy signal
plt.specgram(signal_noisy, Fs=1000)
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.ylim(0,200)
plt.title('Power-Spectogram')
plt.show()
# ## Further reading
# In case you want to learn more about EEG processing, I highly recommend the following books:
# - <NAME>. (2014). *Analyzing neural time series data: Theory and practice*. MIT press.
# - <NAME>. (2014). *An introduction to the event-related potential technique, second edition*. Cambridge, Massachusetts: The MIT Press.
# ## Summary: What you have learned about Neural Time Series Data
# Congratulations, you've mastered the first chapter about neural time series data analysis!
#
# In this chapter you have learned:
# - The basic mathematical concepts behind EEG signals
# - How to first create an artificial signal and then decompose it into its parts with the Fourier Transform
# - How to apply this knowledge to the different filtering procedures, creating your own filter-kernel and then playing around with its parameters
# - How to filter your data in the frequency and the time domain and thereby smoothly move between the two spheres
|
Tutorials/Day-04/solutions/Data_simulation_solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this script, we build CNN model2 (with Boston Train and Test Images as the training set), and predict on Toronto Images. This modle could test whether increasing training images would boost model performance; model would then generate more accurate predictions on Toronto Images. We also generated LIME predictions on some randomly selected Toronto images.
#
#
# + [markdown] _uuid="da035fe58e548e8b1b7e8e89725b9e6bc745aa7b"
# # Toronto Streetscore - CNN with Keras
# + _uuid="0d9c73ad23e6c2eae3028255ee00c3254fe66401"
import numpy as np
import pandas as pd
import os, sys
import matplotlib.pyplot as plt
import matplotlib.image as mplimg
from matplotlib.pyplot import imshow
import gc
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from keras import layers
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout
from keras.models import Sequential, Model, load_model, model_from_yaml
from keras.optimizers import SGD
from keras.constraints import maxnorm
from keras.callbacks import ModelCheckpoint
import keras
import keras.backend as K
from keras.models import Sequential
import lime
# This chunk of code was added to resolve the error "OSError: image file is truncated" in the below cell
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import warnings
warnings.simplefilter("ignore", category=DeprecationWarning)
# -
# Reading in Streetscore data from csv file, creating classification breaks based on qscore
# + _uuid="46a8839e13a14eb8d16ea6823de9927ea63d5001"
# importing the Boston streetscore data for training
df = pd.read_csv('/media/DATADRIVE/walkability/Toronto_Streetscore/bostonimage/bostonimage.csv')
df.columns =['id1', 'id2', 'id3', 'city', 'latitude',
'longitude', 'qscore', 'safety_binary', 'Coordinates', 'objectid',
'subdistric', 'score', 'location', 'pano_id', 'date', 'id', 'imageid']
# df.set_index('id1')
import pysal as ps
# Create a Natural Breaks classifier
classifier = ps.Percentiles.make(pct=[5,15,25,35,45,55,65,75,85,95],rolling=True)
classifications = df[['qscore']].apply(classifier)
# Let's see what we have
df['percentile'] = classifications
# -
# Checking for images in directory to ensure they exists for each orientation and setting vales accordingly
df['id2'] = df["id2"].map(str) + '.jpg'
df.rename(columns = {'id2':'Image_Id'}, inplace = True)
df.rename(columns = {'percentile':'safety'}, inplace = True)
df["Image_Id"] = df["Image_Id"].apply(lambda x: x.split('.')[0]+'_cmb.png')
df['exists']= df["Image_Id"].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/bo_combined_images/"+x))
df = df[df.exists == True]
train_df = df[["Image_Id", "safety"]]
train_df_sample = train_df#.sample(frac=.999)
train_df_sample.safety.value_counts()
# + _uuid="f46b24dbba74f22833cac6140e60348b15a8e047"
image_size = 100
image_dim =4
# this will be used to preprocess the datasets
# this will be used to preprocess the datasets
def prepareImages(data, m, dataset, city):
print("Preparing images")
X = np.zeros((m, image_size, image_size, image_dim))
count = 0
for fig in data['Image_Id']:
#load images into images of size 100x100x3
img = image.load_img(city+"/"+dataset+"/"+fig, color_mode='rgba',target_size=(image_size, image_size, image_dim))
x = image.img_to_array(img)
x = preprocess_input(x)
X[count] = x
if (count%500 == 0):
print("Processing image: ", count+1, ", ", fig)
count += 1
return X
# + _uuid="6587a101b58af064af0f9c60a1070c6c8f52d45f"
# this will be used to generate labels for the training set
def prepare_labels(y):
values = np.array(y)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
# print(integer_encoded)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
# print(onehot_encoded)#
y = onehot_encoded
# print(y.shape)
return y, label_encoder
# -
# + _uuid="4afe4128a0cd6859848c8a80686208082d647c39"
# now the training data will be imported
X = prepareImages( train_df_sample, train_df_sample.shape[0], "bo_combined_images" ,"/media/DATADRIVE/walkability/Toronto_Streetscore")
X /= 255
y, label_encoder = prepare_labels(train_df_sample['safety'])
# + _uuid="675924f8863aef27cf90dc668e0a68cd609dfc1c"
model = keras.applications.densenet.DenseNet201(include_top=True, weights=None, input_tensor= layers.Input(shape=(100, 100, 4)), input_shape=(100, 100,4), pooling=None, classes=9)
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
# + _uuid="169f45e150c3a584e0f655a8eda523e0675da63a"
# this will train the CNN
history = model.fit(X, y, validation_split=0.33, epochs=125 ,batch_size=100, verbose=1)
gc.collect()
# +
# checkpoint logic to save the model with best Validation loss
#filepath="best_model_keras_cnn.h5"
#checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
#callbacks_list = [checkpoint]
# + _uuid="7bca48a1d0963cbf70685b75431435cef9499895"
# This will plot the accuracy of the CNN on the training/validation set after each epoch
plt.plot(history.history['acc'], label='Training acc')
plt.plot(history.history['val_acc'], label='Validation acc')
plt.legend()
plt.title('Training and Validation accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.show()
# -
# This will plot the loss on the training and validation set after each epoch
plt.plot(history.history['loss'], label='Training loss')
plt.plot(history.history['val_loss'], label='Validation loss')
plt.legend()
plt.title('Training and Validation loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.show()
# +
# # importing the Toronto data for testing the model
# toronto_md = pd.read_json("toronto/metadata/metadata.json")
# toronto_md.rename(columns = {'_file':'Image_Id'}, inplace = True)
# toronto_md.head()
# -
image_size = 100
image_dim =4
# this will be used to preprocess the datasets
# this will be used to preprocess the datasets
def prepareImages_classification(image_path,data, m, ):
print("Preparing images")
X = np.zeros((m, image_size, image_size, image_dim))
count = 0
for i,row in data.iterrows():
#load images into images of size 100x100x3
img = image.load_img("/media/DATADRIVE/walkability/Toronto_Streetscore/to_combined_intersection_images/"+row[image_path],color_mode='rgba', target_size=(image_size, image_size, image_dim))
x = image.img_to_array(img)
x = preprocess_input(x)
X[count] = x
if (count%500 == 0):
print("Processing image: ", count+1, ", ", i)
count += 1
return X
# +
toronto_df = pd.read_csv('/media/DATADRIVE/walkability/Toronto_Streetscore/toronto/toronto_street_intersections.csv')
toronto_df.drop(['intersecti', 'date_effec', 'date_expir',
'elevation_', 'intersec_1', 'classifica', 'classifi_1', 'number_of_',
'elevation1', 'elevatio_1', 'elevatio_2', 'elevation', 'elevatio_3',
'height_res', 'height_r_1', 'state',
'trans_id_c', 'trans_id_e', 'objectid', 'geom'], axis=1,inplace =True)
# toronto_df = toronto_df[toronto_df.['image_'+heading].notna()]
#toronto_df = toronto_df.dropna(subset=['image_0','image_90','image_180','image_270'],axis=0)
toronto_df['Image_Id_'+str(90)] = toronto_df['image_'+str(90)].apply(lambda x: x.split('/')[-1].split('.')[0]+'_cmb.png')
toronto_df['Image_Id_'+str(0)] = toronto_df['image_'+str(0)].apply(lambda x: x.split('/')[-1].split('.')[0]+'_cmb.png')
toronto_df['Image_Id_'+str(180)] = toronto_df['image_'+str(180)].apply(lambda x: x.split('/')[-1].split('.')[0]+'_cmb.png')
toronto_df['Image_Id_'+str(270)] = toronto_df['image_'+str(270)].apply(lambda x: x.split('/')[-1].split('.')[0]+'_cmb.png')
#toronto_df['90_exists'] = toronto_df['Image_Id_'+str(90)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/toronto_intersection_cropped/"+x))
# toronto_df = toronto_df[toronto_df['Image_Id_'+str(0)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/toronto_intersection_cropped//"+x))]
# toronto_df = toronto_df[toronto_df['Image_Id_'+str(180)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/toronto_intersection_cropped//"+x))]
# toronto_df = toronto_df[toronto_df['Image_Id_'+str(270)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/toronto_intersection_cropped/"+x))]
toronto_df['90_exists'] = toronto_df['Image_Id_'+str(90)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/to_combined_intersection_images//"+x))
toronto_df['180_exists'] = toronto_df['Image_Id_'+str(180)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/to_combined_intersection_images/"+x))
toronto_df['0_exists'] = toronto_df['Image_Id_'+str(0)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/to_combined_intersection_images/"+x))
toronto_df['270_exists'] = toronto_df['Image_Id_'+str(270)].apply(lambda x: os.path.isfile("/media/DATADRIVE/walkability/Toronto_Streetscore/to_combined_intersection_images/"+x))
# -
# this will preprocess the test data
heading = str(180)
for heading in ['0','90','180','270']:
images = toronto_df[toronto_df[heading+'_exists']== True]
X1 = prepareImages_classification('Image_Id_'+heading,images , images.shape[0])
X1 /= 255
predictions = model.predict(np.array(X1), verbose=1)
predictions1 = []
for i, pred in enumerate(predictions):
lp = label_encoder.inverse_transform(pred.argsort()[-1:][::-1])
p2 = pred[:]
predictions1.append(' '.join(str(v) for v in lp))
images['predictions_'+heading] = np.nan
images['predictions_'+heading] =predictions1
toronto_df.loc[images.index,'predictions_'+heading] = images['predictions_'+heading]
# +
toronto_df.fillna(-1,inplace=True)
toronto_df.predictions_0=toronto_df.predictions_0.astype(int,errors='raise')
toronto_df.predictions_90=toronto_df.predictions_90.astype(int,errors='ignore')
toronto_df.predictions_180=toronto_df.predictions_180.astype(int,errors='ignore')
toronto_df.predictions_270 =toronto_df.predictions_270.astype(int,errors='ignore')
toronto_df['highest_pred'] = toronto_df[['predictions_180','predictions_90','predictions_270','predictions_0']].max(axis=1)
toronto_df['highest_pred'].value_counts()
# -
# Persist the mode
model_yaml = model.to_yaml()
with open("/media/DATADRIVE/densenet_urban_safety_0801_175_100x9.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("/media/DATADRIVE/densenet_urban_safety_0801_175_100x9.h5")
print("Saved model to disk")
yaml_file.close()
# +
# %%javascript
IPython.OutputArea.auto_scroll_threshold = 25;
toronto_df
# -
# displays 4 random images from Test set
from PIL import Image
c = 0
torontona = toronto_df[toronto_df.predictions_180.notna()]
images = torontona[torontona.predictions_180.astype(int) >5 ]
base = '/media/DATADRIVE/walkability/Toronto_Streetscore/to_combined_intersection_images/'
for i, img in images.iterrows():
if i > 0:
print("{} Classified as {}".format(img.Image_Id_180, img.predictions_180))
img1 = Image.open(base+img.Image_Id_180)
display(img1)
c+=1
if c > 25:
break
# +
import geopandas as gpd
from shapely.geometry import Point
import matplotlib.pyplot as plt
import geopandas as gpd
import pysal as ps
geometry = [Point(xy) for xy in zip(toronto_df.longitude, toronto_df.latitude)]
crs = {'init': 'epsg:4326'}
gdf = gpd.GeoDataFrame(toronto_df, crs=crs, geometry=geometry)
#f, ax = plt.subplots(1,figsize=(24, 12))
#gdf[gdf.predictions_180.astype(int) >= 0].plot(markersize=.01,column='predictions_180',alpha=0.99, axes=ax, categorical =True,legend=True, cmap='RdYlBu')
# -
gdf.drop(['90_exists','180_exists','0_exists','270_exists'],axis=1).to_file('/media/DATADRIVE/walkability/Toronto_Streetscore/to_intersection_predictions.shp')
# +
f, ax = plt.subplots(1,figsize=(24, 12))
gdf.highest_pred= gdf.highest_pred.astype(int)
gdf[gdf.highest_pred >= 0].plot(markersize=.01,column='highest_pred',alpha=0.99, axes=ax, categorical =True,legend=True, cmap='RdYlBu')
# -
# + _uuid="52262195fc0b8755cff78bf8c98e6116d50f79af"
filepath = r'/media/DATADRIVE/walkability/Toronto_Streetscore/toronto_instersection_cropped/'
crap_folder = r'/media/DATADRIVE/walkability/Toronto_Streetscore/crap_toronto_downloads/'
test_image = '43644_90.jpg'
try:
# Attempt to open an image file
testimage = Image.open(os.path.join(filepath, test_image))
except IOError:
# Report error, and then skip to the next argument
print("Problem opening", filepath, ":", IOError)
testimagedata = list(testimage.getdata())
# Loop through all provided arguments
for filename in os.listdir(filepath):
if "." not in filename:
continue
# if "_0_cropped" not in filename:
# continue
ending = filename.split(".")[1]
try:
# Attempt to open an image file
image = Image.open(os.path.join(filepath, filename))
except IOError:
# Report error, and then skip to the next argument
print("Problem opening", filepath, ":", IOError)
continue
if list(image.getdata()) == testimagedata :
print(filename)
shutil.move(os.path.join(filepath, filename),os.path.join(crap_folder, filename))
# +
filepath = r'/media/DATADRIVE/walkability/Toronto_Streetscore/toronto/intersection_images/43644_90.jpg'
img = image.load_img(filepath)#'/media/DATADRIVE/walkability/Toronto_Streetscore/toronto_intersection_cropped/45005_180_cropped.jpg')
img
# + _uuid="88c8d8ff98fbdb1df4218abb6bd51889e855a6fb"
# now the labels for the test data will be generated
# -
# +
# now the predictions will be saved in a csv file
# toronto_df9['predictions_90'] = np.nan
# toronto_df9['predictions_90'] =predictions1
#test_df['results_acc'] = test_df.apply(lambda x: int(x.safety) == int(x.Predictions),axis=1)
#test_df['results_diff'] = abs(test_df.safety.map(int) - test_df.Predictions.map(int))
#test_df['results_acc'].value_counts()
# -
toronto_df180.loc[45436]
# +
# %matplotlib inline
xs = X[0:4]
n_cols = 2
n_rows = int(len(xs)/2)
plt.figure(figsize=(3*n_cols, 5*n_rows))
#plt.figure(figsize=(100, 100))
for i in range(len(xs)):
plt.subplot(2, 2, i+1)
plt.imshow(xs[i].reshape(100,100,3), cmap='gray')
# -
from keras.models import Sequential, Model, load_model, model_from_yaml
#Load the model
yaml_file = open('/media/DATADRIVE/densenet_urban_safety_0801_175_100x9.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights("/media/DATADRIVE/densenet_urban_safety_0801_175_100x9.h5")
print("Loaded model from disk")
### Predictions on the selected input
input_x = X[0:4]
sample_prediction = loaded_model.predict_classes(input_x)
print(sample_prediction)
# + _uuid="e7af799d186a1b97b6aa325d7d576a1fb55a6c5d"
model = Sequential()
model.add(Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0', input_shape = (image_size, image_size,4)))
model.add(BatchNormalization(axis = 3, name = 'bn0'))
model.add(Activation('relu'))
model.add(Conv2D(32, (6, 6), strides = (1, 1), name = 'conv1'))
model.add(BatchNormalization(axis = 3, name = 'bn1'))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), name='max_pool'))
model.add(Conv2D(64, (3, 3), strides = (1,1), name="conv2"))
model.add(Activation('relu'))
model.add(AveragePooling2D((3, 3), name='avg_pool1'))
model.add(Conv2D(64, (5, 5), strides = (1,1), name="conv3"))
model.add(Activation('relu'))
model.add(AveragePooling2D((3, 3), name='avg_pool2'))
model.add(MaxPooling2D((2, 2), name='max_pool1'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(500, kernel_initializer='normal', activation="relu", kernel_constraint=maxnorm(3), name='rl'))
model.add(Dropout(0.5))
model.add(Dense(y.shape[1], kernel_initializer='normal', activation='softmax', kernel_constraint=maxnorm(3), name='sm'))
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
#sgd = SGD(lr=0.1, momentum=0.9, decay=1e-6, nesterov=False)
#model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.summary()
# -
from lime import lime_image
explainer = lime_image.LimeImageExplainer()
import skimage
print(skimage.__version__)
# %%time
# Hide color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels
explanation = explainer.explain_instance(input_x[0], loaded_model.predict, top_labels=2, num_samples=1000)
from skimage.segmentation import mark_boundaries
temp, mask = explanation.get_image_and_mask(sample_prediction[0], positive_only=False, num_features=10, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
# %%time
explanation = explainer.explain_instance(input_x[1], loaded_model.predict, top_labels=2, num_samples=1000)
temp, mask = explanation.get_image_and_mask(sample_prediction[1], positive_only=False, num_features=10, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
# %%time
explanation = explainer.explain_instance(input_x[2], loaded_model.predict, top_labels=2, num_samples=1000)
temp, mask = explanation.get_image_and_mask(sample_prediction[2], positive_only=False, num_features=10, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
# %%time
explanation = explainer.explain_instance(input_x[3], loaded_model.predict, top_labels=2, num_samples=1000)
temp, mask = explanation.get_image_and_mask(sample_prediction[3], positive_only=False, num_features=10, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
# +
"""
Clean and simple Keras implementation of network architectures described in:
- (ResNet-50) [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385.pdf).
- (ResNeXt-50 32x4d) [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf).
Python 3.
"""
from keras import layers
from keras import models
#
# image dimensions
#
img_height = 224
img_width = 224
img_channels = 3
#
# network params
#
cardinality = 32
def residual_network(x):
"""
ResNeXt by default. For ResNet set `cardinality` = 1 above.
"""
def add_common_layers(y):
y = layers.BatchNormalization()(y)
y = layers.LeakyReLU()(y)
return y
def grouped_convolution(y, nb_channels, _strides):
# when `cardinality` == 1 this is just a standard convolution
if cardinality == 1:
return layers.Conv2D(nb_channels, kernel_size=(3, 3), strides=_strides, padding='same')(y)
assert not nb_channels % cardinality
_d = nb_channels // cardinality
# in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
# and convolutions are separately performed within each group
groups = []
for j in range(cardinality):
group = layers.Lambda(lambda z: z[:, :, :, j * _d:j * _d + _d])(y)
groups.append(layers.Conv2D(_d, kernel_size=(3, 3), strides=_strides, padding='same')(group))
# the grouped convolutional layer concatenates them as the outputs of the layer
y = layers.concatenate(groups)
return y
def residual_block(y, nb_channels_in, nb_channels_out, _strides=(1, 1), _project_shortcut=False):
"""
Our network consists of a stack of residual blocks. These blocks have the same topology,
and are subject to two simple rules:
- If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
- Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
"""
shortcut = y
# we modify the residual building block as a bottleneck design to make the network more economical
y = layers.Conv2D(nb_channels_in, kernel_size=(1, 1), strides=(1, 1), padding='same')(y)
y = add_common_layers(y)
# ResNeXt (identical to ResNet when `cardinality` == 1)
y = grouped_convolution(y, nb_channels_in, _strides=_strides)
y = add_common_layers(y)
y = layers.Conv2D(nb_channels_out, kernel_size=(1, 1), strides=(1, 1), padding='same')(y)
# batch normalization is employed after aggregating the transformations and before adding to the shortcut
y = layers.BatchNormalization()(y)
# identity shortcuts used directly when the input and output are of the same dimensions
if _project_shortcut or _strides != (1, 1):
# when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)
# when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2
shortcut = layers.Conv2D(nb_channels_out, kernel_size=(1, 1), strides=_strides, padding='same')(shortcut)
shortcut = layers.BatchNormalization()(shortcut)
y = layers.add([shortcut, y])
# relu is performed right after each batch normalization,
# expect for the output of the block where relu is performed after the adding to the shortcut
y = layers.LeakyReLU()(y)
return y
# conv1
x = layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2), padding='same')(x)
x = add_common_layers(x)
# conv2
x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
for i in range(3):
project_shortcut = True if i == 0 else False
x = residual_block(x, 128, 256, _project_shortcut=project_shortcut)
# conv3
for i in range(4):
# down-sampling is performed by conv3_1, conv4_1, and conv5_1 with a stride of 2
strides = (2, 2) if i == 0 else (1, 1)
x = residual_block(x, 256, 512, _strides=strides)
# conv4
for i in range(6):
strides = (2, 2) if i == 0 else (1, 1)
x = residual_block(x, 512, 1024, _strides=strides)
# conv5
for i in range(3):
strides = (2, 2) if i == 0 else (1, 1)
x = residual_block(x, 1024, 2048, _strides=strides)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(9)(x)
return x
image_tensor = layers.Input(shape=(200, 200, 4))
network_output = residual_network(image_tensor)
model = models.Model(inputs=[image_tensor], outputs=[network_output])
print(model.summary())
# -
|
train_predict_urban_safety.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Settings
# +
# %env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
sys.path.append('..'+sep_local+'..')
print(sep_local)
# -
os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')
print(os.getcwd())
import tensorflow as tf
print(tf.__version__)
# # Dataset loading
dataset_name='Dstripes'
from training.generators.file_image_generator import create_image_lists, get_generators
imgs_list = create_image_lists(
image_dir=images_dir,
validation_pct=validation_percentage,
valid_imgae_formats=valid_format
)
inputs_shape= image_size=(200, 200, 3)
batch_size = 32//2
latents_dim = 32
intermediate_dim = 50
training_generator, testing_generator = get_generators(
images_list=imgs_list,
image_dir=images_dir,
image_size=image_size,
batch_size=batch_size,
class_mode=None
)
import tensorflow as tf
# +
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=tf.float32 ,
output_shapes=tf.TensorShape((batch_size, ) + image_size)
)
# -
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
_outputs_shape
# # Model's Layers definition
# +
menc_lays = [tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=latents_dim)]
venc_lays = [tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=latents_dim)]
dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim, activation='relu'),
tf.keras.layers.Dense(units=_outputs_shape),
tf.keras.layers.Reshape(inputs_shape)]
# -
# # Model definition
model_name = dataset_name+'VAE_Dense_reconst_1ell_05ssmi'
experiments_dir='experiments'+sep_local+model_name
from training.autoencoding_basic.autoencoders.VAE import VAE as AE
inputs_shape=image_size
variables_params = \
[
{
'name': 'inference_mean',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': menc_lays
},
{
'name': 'inference_logvariance',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': venc_lays
},
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
_restore
# +
#to restore trained model, set filepath=_restore
# -
ae = AE(
name=model_name,
latents_dim=latents_dim,
batch_size=batch_size,
variables_params=variables_params,
filepath=None
)
from evaluation.quantitive_metrics.structural_similarity import prepare_ssim_multiscale
from statistical.losses_utilities import similarity_to_distance
from statistical.ae_losses import expected_loglikelihood_with_lower_bound as ellwlb
ae.compile(loss={'x_logits': lambda x_true, x_logits: ellwlb(x_true, x_logits)+ 0.5*similarity_to_distance(prepare_ssim_multiscale([ae.batch_size]+ae.get_inputs_shape()))(x_true, x_logits)})
# # Callbacks
# +
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
# -
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, ae.name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
csv_dir
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
# # Model Training
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=int(1e4),
epochs=int(1e6),
verbose=2,
callbacks=[ es, ms, csv_log, sg],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=int(1e4)
)
# # Model Evaluation
# ## inception_score
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
# ## Frechet_inception_distance
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
# ## perceptual_path_length_score
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
# ## precision score
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
# ## recall score
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
# # Image Generation
# ## image reconstruction
# ### Training dataset
# %load_ext autoreload
# %autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
# +
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
# +
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
# -
# ## with Randomness
from training.generators.image_generation_testing import generate_images_like_a_batch
# +
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
# +
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
# -
# ### Complete Randomness
from training.generators.image_generation_testing import generate_images_randomly
# +
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, save_dir)
# -
from training.generators.image_generation_testing import interpolate_a_batch
# +
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'interpolate_dir')
create_if_not_exist(save_dir)
interpolate_a_batch(ae, testing_generator, save_dir)
# -
|
notebooks/Dstripes/Basic/dense/VAE/DstripesVAE_Dense_reconst_1ellwlb_05ssim.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Line Plot
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('ggplot')
np.random.seed(37)
# -
# ## Basic
# +
from scipy.special import expit as logistic
x = np.arange(-6, 6.1, 0.1)
y = logistic(x)
s = pd.Series(y, x)
fig, ax = plt.subplots(figsize=(15, 3), dpi=100)
_ = s.plot.line(x, y, ax=ax)
_ = ax.set_title('Basic line plot')
_ = ax.set_xticks(np.arange(-6, 6.1, 1))
_ = ax.set_yticks(np.arange(0, 1.1, 0.1))
# -
# ## With error bands
# +
x = np.arange(-6, 6.1, 0.1)
y = logistic(x)
sd = (y + (0.1 * np.random.randn(10, y.shape[0]))).std(axis=0)
y_p = y + sd
y_n = y - sd
s = pd.Series(y, x)
fig, ax = plt.subplots(figsize=(15, 3), dpi=100)
_ = s.plot.line(x, y, color='w', lw='3', ax=ax)
_ = ax.fill_between(x, y_p, y_n, color='m', alpha=0.5)
_ = ax.set_title('Line plot with error bands')
_ = ax.set_xticks(np.arange(-6, 6.1, 1))
_ = ax.set_yticks(np.arange(0, 1.1, 0.1))
_ = ax.spines['top'].set_alpha(0)
_ = ax.spines['bottom'].set_alpha(1)
_ = ax.spines['right'].set_alpha(0)
_ = ax.spines['left'].set_alpha(1)
|
sphinx/scikit-intro/source/plot-line.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: datasci
# language: python
# name: datasci
# ---
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from scipy.linalg import svd
from scipy.stats import norm
from scipy.sparse.linalg import svds
import sys
import numpy as np
from graphstats.embed.svd import selectDim
from graphstats.simulations.simulations import binary_sbm
n = [30,30,30]
p = [[0.9,0.4,0.9],[0.4,0.9,0.4],[0.9,0.4,0.9]]
p = np.array(p)
bsbm = binary_sbm(n,p)
plt.matshow(bsbm)
plt.axis('off')
plt.savefig('bsbm.png')
plt.show()
elbows, e_l, sing, all_l = selectDim(bsbm, 8)
print(elbows+1)
plt.plot(e_l)
plt.title('likelihoods at each elbow')
plt.savefig('e_l.png')
plt.show()
plt.plot(sing)
plt.title('singular values')
plt.savefig('svals.png')
plt.show()
plt.plot(all_l[0])
plt.title('all elbows [0]')
plt.savefig('all_l_0.png')
plt.show()
print(len(all_l))
plt.plot(sing)
plt.plot(elbows,sing[elbows],'ro')
plt.savefig('elbows_svals.png')
plt.show()
for elbow in range(len(elbows)):
if elbow == 0:
plt.plot(range(len(all_l[elbow])),all_l[elbow]/sum(all_l[elbow]), label='elbow {}'.format(elbow+1))
else:
plt.plot(range(elbows[elbow-1],len(all_l[elbow])+elbows[elbow-1]),all_l[elbow]/sum(all_l[elbow]), label='elbow {}'.format(elbow+1))
plt.plot(elbows[elbow]-1,max(all_l[elbow]/sum(all_l[elbow])),'ro')
plt.legend(loc='best')
plt.savefig('elbows_all_l')
plt.show()
print(len(all_l))
import nibabel as nib
file = nib.load('../921/sub-NDARAA075AMK_task-rest_bold.nii')
from matplotlib import animation, rc
from IPython.display import HTML
data = file.dataobj[:,:,0,:]
plt.imshow(data[:,:,0])
plt.show()
print(data[:,:,0].shape)
plt.plot(data[:,:,0].reshape(-1,1))
plt.show()
flat = [data[:,:,i].reshape(-1,1) for i in range(data.shape[2])]
y=np.array([np.array(xi) for xi in flat]).reshape(420,6084)
print(y.shape)
elbows, e_l, sing, all_l = selectDim(y, 8)
from sklearn.decomposition import PCA
pca = PCA(n_components = 3)
y3 = pca.fit_transform(y)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(y3[:,0], y3[:,1], y3[:,2])
plt.show()
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
for j in [0,10,20,50]:
data = file.dataobj[:,:,j,:]
flat = [data[:,:,i].reshape(-1,1) for i in range(data.shape[2])]
y=np.array([np.array(xi) for xi in flat]).reshape(data.shape[2],6084)
pca = PCA(n_components = 3)
y3 = pca.fit_transform(y)
ax.scatter(y3[:,0], y3[:,1], y3[:,2], label=str(j))
plt.legend(loc='best')
plt.show()
|
notebooks/bvarjav1/928/test ZG2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -
# # Apache Spark on Dataproc - Bank Marketing Demo
#
# Demo video link - https://www.youtube.com/watch?v=RbLOi1Cgsmo&feature=emb_title
#
# 
#
# Apache Spark on Dataproc provides features to allow you to build an end to end solution for a wide number of use cases. This demo notebook focuses on using data from a financial services company who can build a data lake on Google Cloud and show how data enginers, data analysts and data scientists can use Apache Spark can work together to build a model to predict if a customer campaign will result in a deposit being made.
#
# This notebook can be easily adapted to show to build a model to predict if a loan should be approved as shown in the video above.
# ## 1. Set-up Data Lake and Dataproc
#
# This demo is designed to be run on Google Cloud Dataproc. Follow these steps to create create a Dataproc Cluster and then copy the notebook to your notebooks folder.
#
# These steps should be run in the [Google Cloud Shell](https://cloud.google.com/shell)
# ### 1.1 - Set env configuration
#
# ```
# export REGION=us-central1
# export PROJECT_ID=<project-id>
# ```
# ### 1.2 - Create GCS bucket
#
# #### GCS bucket for Dataproc Clusters and Hive Warehouse
#
# ```
# export BUCKET_NAME=${PROJECT_ID}-demo
#
# gsutil mb -l ${REGION} gs://${BUCKET_NAME}
# ```
# ### 1.3 - Create a Hive Metastore
#
# _Note: [Dataproc Metastore](https://cloud.google.com/blog/products/data-analytics/cloud-hive-metastore-now-available) is now available in private Alpha. This section can be replaced with Dataproc Metastore if you have access_
#
# #### 1.3.1. Create a Hive Cloud SQL database
#
# ```
# gcloud sql instances create hive-metastore-db \
# --database-version="MYSQL_5_7" \
# --activation-policy=ALWAYS \
# --region ${REGION}
# ```
#
# #### 1.3.2. Create a Hive Metastore Dataproc Cluster
#
# Once the Cloud SQL instance is created create a new Dataproc hive metastore cluster that connects to the cluster.
#
# ```
# gcloud dataproc clusters create hive-cluster \
# --async \
# --scopes sql-admin \
# --image-version 1.5 \
# --region ${REGION} \
# --initialization-actions gs://goog-dataproc-initialization-actions-${REGION}/cloud-sql-proxy/cloud-sql-proxy.sh \
# --properties hive:hive.metastore.warehouse.dir=gs://${BUCKET_NAME}/hive-warehouse \
# --metadata "hive-metastore-instance=${PROJECT_ID}:${REGION}:hive-metastore-db"
# ```
# ### 1.4 - Create Dataproc cluster with Jupyter, Rapids and GPUs
#
# #### 1.4.1. Create Cluster
#
# ```
# export CLUSTER_NAME=jupyter-gpu-cluster
# export NUM_GPUS=2
# export NUM_WORKERS=2
#
# gcloud dataproc clusters create $CLUSTER_NAME \
# --region $REGION \
# --image-version=preview-ubuntu \
# --master-machine-type n1-standard-4 \
# --num-workers $NUM_WORKERS \
# --worker-accelerator type=nvidia-tesla-t4,count=$NUM_GPUS \
# --worker-machine-type n1-highmem-4\
# --initialization-actions gs://goog-dataproc-initialization-actions-${REGION}/gpu/install_gpu_driver.sh,gs://goog-dataproc-initialization-actions-${REGION}/rapids/rapids.sh \
# --optional-components=ANACONDA,JUPYTER \
# --metadata gpu-driver-provider="NVIDIA" \
# --metadata rapids-runtime=SPARK \
# --scopes https://www.googleapis.com/auth/cloud-platform \
# --bucket $BUCKET_NAME \
# --enable-component-gateway \
# --properties="^#^spark:spark.yarn.unmanagedAM.enabled=false"
# ```
# ### 1.5 - Go to JupyterLab and copy this notebook
#
# Once your cluster is ready go follow these steps to copy this notebook:
#
# - On the Dataproc cluster UI go to web interfaces tab
# - Cick on the link to open JupyterLab.
# - Go the Local Disk folder in JupyterLab
# - Click on the plus (+) button to open the launcher
# - Open terminal and run the cmd below to copy the notebook to your cluster
#
# ```
# wget https://raw.githubusercontent.com/tfayyaz/cloud-dataproc/master/notebooks/examples/Spark%20-%20Bank%20Marketing%20Demo.ipynb
# ```
#
#
# ### 1.6 - Run example code in this notebook broken down into these sections
#
# - Data Engineer - Convert CSV to Hive Tables (Parquet format)
# - Data Analyst - Run SQL on tables and plot data
# - Data Scientist - Create ML models with Spark
# - Data Ops - Deploy Spark pipeline using Dataproc Workflows
# +
# Run this to stop any currently running spark sessions
# spark.stop()
# -
# ## 2. Data Engineering - Process CSV files into Hive Tables (Parquet)
# ### Create Spark session with Hive Enabled
#
# Create a Spark session, connect to Hive Metastore and enable Hive support in Spark
# +
from pyspark.sql import SparkSession
from pyspark.sql.types import FloatType, IntegerType, StructField, StructType
warehouse_location = 'gs://<project-id>-demo/hive-warehouse'
service_endpoint = 'thrift://hive-cluster-m.us-central1-f:9083'
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName('Spark - Data Eng Demo') \
.config("hive.metastore.uris", service_endpoint) \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
# -
# Check the first 1000 bytes of a file on GCS
# !gsutil cat -h -r 0-1000 gs://cloud-ml-tables-data/bank-marketing.csv
# ### Get Spark application ID
#
# This is useful to easily fine application in the Spark History UI
spark.conf.get("spark.app.id")
# Check what databases are in the Hive Warehouse
spark.sql("""
SHOW DATABASES;
""").show()
spark.sql("DESCRIBE DATABASE EXTENDED default").show(5, False)
# Create a new database called bank_demo_db
spark.sql("""
CREATE DATABASE IF NOT EXISTS bank_demo_db;
""").show()
spark.sql("DESCRIBE DATABASE EXTENDED bank_demo_db").show(5, False)
# Load the CSV file into a Spark Dataframe
# +
df_bank_marketing = spark \
.read \
.option ( "inferSchema" , "true" ) \
.option ( "header" , "true" ) \
.csv ( "gs://cloud-ml-tables-data/bank-marketing.csv" )
df_bank_marketing.printSchema()
# -
df_bank_marketing.show(5)
# Run transformations on the data
# +
## Any transformations on your data can be done at this point
# -
# Save the dataframe as a Hive table in Parquet format
df_bank_marketing.write.mode('overwrite').format("parquet").saveAsTable("bank_demo_db.bank_marketing")
# Check that table was created
spark.sql("SHOW TABLES in bank_demo_db").show()
spark.sql("DESCRIBE TABLE EXTENDED bank_demo_db.bank_marketing").show(100, False)
# ### Compute statistics for columns in table
spark.sql("DESCRIBE TABLE EXTENDED bank_demo_db.bank_marketing Age").show()
cols = "Age, Job, MaritalStatus"
analyzeTableSQL = "ANALYZE TABLE bank_demo_db.bank_marketing COMPUTE STATISTICS FOR COLUMNS Age, Job, MaritalStatus"
spark.sql(analyzeTableSQL).show()
cols = "Age, Job, MaritalStatus"
analyzeTableSQL = "ANALYZE TABLE bank_demo_db.bank_marketing COMPUTE STATISTICS FOR ALL COLUMNS"
spark.sql(analyzeTableSQL).show()
spark.sql("DESCRIBE TABLE EXTENDED bank_demo_db.bank_marketing Age").show()
spark.sql("DESCRIBE TABLE EXTENDED bank_demo_db.bank_marketing Job").show()
# You can now also see how many rows are in the table
spark.sql("DESCRIBE TABLE EXTENDED bank_demo_db.bank_marketing").show(100, False)
# ## 3. Data Analyst - Run SQL on tables and plot data
spark.sql("""
SELECT
Deposit,
COUNT(*) as count
FROM bank_demo_db.bank_marketing
GROUP BY Deposit
""").show()
spark.sql("""
SELECT
Job,
AVG(Age) as avg_age,
AVG(Balance) as avg_balance
FROM bank_demo_db.bank_marketing
GROUP BY JOB
ORDER BY avg_age DESC
""").show()
# ### Spark to Pandas DataFrame
#
# Enable Apache Arrow to allow faster conversion from Spark DataFrame to Pandas DataFrame.
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
df_jobs = spark.sql("""
SELECT
Job,
AVG(Age) as avg_age,
AVG(Balance) as avg_balance
FROM bank_demo_db.bank_marketing
GROUP BY JOB
ORDER BY avg_age DESC
""").toPandas()
df_jobs.head()
# ### Plot data using Pandas Plotting
import matplotlib.pyplot as plt
df_jobs.plot.bar(x='Job', y='avg_age', rot=90)
df_jobs.set_index('Job', inplace=True)
df_jobs.head()
df_jobs.plot.bar(rot=90, subplots=True)
# ## 4. Data Scientist - Create ML models with Spark
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# Create a Spark DataFrame from hive table
data = spark.sql("""
SELECT *
FROM bank_demo_db.bank_marketing
""")
# Cache the DataFrame in memory
data.cache()
data.groupBy("Deposit").count().show()
# ### Split training and test data
(train_data, test_data) = data.randomSplit([0.7, 0.3], seed=42)
train_data.groupBy("Deposit").count().show()
train_data.count()
test_data.count()
# ## Create Spark ML Pipeline
# Train a RandomForestClassifier model
# +
from pyspark.ml.feature import OneHotEncoder, StringIndexer
from pyspark.ml.classification import RandomForestClassifier
categorical_cols = [field for (field, data_type) in train_data.dtypes
if ((data_type == "string") & (field != 'Deposit'))]
index_output_cols = [x + "_Index" for x in categorical_cols]
ohe_output_cols = [x + "_OHE" for x in categorical_cols]
categorical_string_indexer = StringIndexer(
inputCols=categorical_cols,
outputCols=index_output_cols,
handleInvalid="skip")
ohe_encoder = OneHotEncoder(
inputCols=index_output_cols,
outputCols=ohe_output_cols)
numeric_cols = [field for (field, data_type) in train_data.dtypes
if (((data_type == "double") | (data_type == "int") | (data_type == "bigint"))
& (field != 'Deposit'))]
assembler_inputs = ohe_output_cols + numeric_cols
vec_assembler = VectorAssembler(
inputCols=assembler_inputs,
outputCol="features")
label_string_indexer = StringIndexer(). \
setInputCol("Deposit"). \
setOutputCol("label")
# Train a RandomForestClassifier model.
rf = RandomForestClassifier(labelCol="label", featuresCol="features")
pipeline = Pipeline(stages=[
categorical_string_indexer,
ohe_encoder,
vec_assembler,
label_string_indexer,
rf
])
# Train model on training data
pipeline_model = pipeline.fit(train_data)
# Make predictions on test.
predictions = pipeline_model.transform(test_data)
# Select example rows to display.
predictions.select("prediction", "label", "features").show(5)
# -
# As the dataset is imbalanced a good metric is AUC: Area Under the ROC Curve. [Learn more about AUC here.](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc#AUC)
# +
from pyspark.ml.evaluation import BinaryClassificationEvaluator
binaryEvaluator = BinaryClassificationEvaluator(labelCol="label")
auc = binaryEvaluator.evaluate(predictions, {binaryEvaluator.metricName: "areaUnderROC"})
print(auc)
# +
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score, f1_score
predictions_np = np.array((predictions.select("label","prediction").collect()))
np_acc = accuracy_score(predictions_np[:,0], predictions_np[:,1])
np_f1 = f1_score(predictions_np[:,0], predictions_np[:,1])
np_precision = precision_score(predictions_np[:,0], predictions_np[:,1])
np_recall = recall_score(predictions_np[:,0], predictions_np[:,1])
np_auc = roc_auc_score(predictions_np[:,0], predictions_np[:,1])
print("f1:", np_f1)
print("precision:", np_precision)
print("recall:", np_recall)
# +
# import package that will generate the confusion matrix scores
from sklearn.metrics import confusion_matrix
# import packages that will help display the scores
import pandas as pd
confusion_matrix_scores = confusion_matrix(predictions_np[:,0],
predictions_np[:,1],
labels=[1, 0])
# display scores as a heatmap
df = pd.DataFrame(confusion_matrix_scores,
columns = ["Predicted True", "Predicted Not True"],
index = ["Actually True", "Actually Not True"])
df.head()
# -
# ## Improve model using XGBoost
# Train model using XGBoost
# +
# spark.stop()
# +
from pyspark.sql import SparkSession
warehouse_location = 'gs://dataproc-datalake-demo/hive-warehouse'
service_endpoint = 'thrift://hive-cluster-m.us-central1-f:9083'
spark = SparkSession.builder \
.appName('Hive and XGBoost - GPU') \
.config('spark.jars.packages', 'com.google.cloud.spark:spark-bigquery-with-dependencies_2.12:0.17.1') \
.config("spark.rapids.memory.gpu.pooling.enabled", "false") \
.config("spark.executor.instances", "4") \
.config("spark.executor.cores", "2") \
.config("spark.task.cpus", "2") \
.config("spark.task.resource.gpu.amount", "1") \
.config("hive.metastore.uris", service_endpoint) \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
# -
spark.conf.get("spark.app.id")
# +
data = spark.sql("""
SELECT *
FROM bank_demo_db.bank_marketing
""")
(train_data, test_data) = data.randomSplit([0.7, 0.3], seed=42)
train_data.cache()
train_data.show(3)
test_data.cache()
test_data.show(3)
# -
# ## Create ML Pipeline with XGBoost model
# +
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.feature import OneHotEncoder, StringIndexer
from ml.dmlc.xgboost4j.scala.spark import XGBoostClassificationModel, XGBoostClassifier
categorical_cols = [field for (field, data_type) in train_data.dtypes
if ((data_type == "string") & (field != 'Deposit'))]
string_index_output_cols = [x + "_Index" for x in categorical_cols]
categorical_string_indexer = StringIndexer(
inputCols=categorical_cols,
outputCols=string_index_output_cols,
handleInvalid="skip")
numeric_cols = [field for (field, data_type) in train_data.dtypes
if (((data_type == "double") | (data_type == "int") | (data_type == "bigint"))
& (field != 'Deposit'))]
features = string_index_output_cols + numeric_cols
label_string_indexer = StringIndexer(). \
setInputCol("Deposit"). \
setOutputCol("label")
params = {
'treeMethod': 'gpu_hist',
'maxDepth': 10,
'maxLeaves': 256,
'growPolicy': 'depthwise',
'objective': 'binary:logistic',
'numRound': 100,
'numWorkers': 2
}
# For GPU you must use .setFeaturesCols(features) and pass in the list of columns that are the features
xgbc = XGBoostClassifier(**params).setLabelCol("label").setFeaturesCols(features)
# For CPU training you must use .setFeaturesCol('features') which
# expects the features to be vectorised into one column first
# xgbc = XGBoostClassifier(**params).setLabelCol('label').setFeaturesCol('features')
pipeline = Pipeline(stages=[
categorical_string_indexer,
label_string_indexer,
xgbc
])
# -
# %%time
# Train model on training data
pipeline_model = pipeline.fit(train_data)
# +
# Make predictions on test
predictions = pipeline_model.transform(test_data)
predictions.select("prediction", "label").show(5)
# +
from pyspark.ml.evaluation import BinaryClassificationEvaluator
binaryEvaluator = BinaryClassificationEvaluator(labelCol="label")
auc = binaryEvaluator.evaluate(predictions, {binaryEvaluator.metricName: "areaUnderROC"})
print(auc)
# -
# ### View model stats using Numpy and Scikit-learn
#
# PySpark cannot be used to calculate the precision, recall, and f1_score for binary classification evaluation and therefore sklearn.metrics is used
# +
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score, f1_score
predictions_np = np.array((predictions.select("label","prediction").collect()))
np_acc = accuracy_score(predictions_np[:,0], predictions_np[:,1])
np_f1 = f1_score(predictions_np[:,0], predictions_np[:,1])
np_precision = precision_score(predictions_np[:,0], predictions_np[:,1])
np_recall = recall_score(predictions_np[:,0], predictions_np[:,1])
np_auc = roc_auc_score(predictions_np[:,0], predictions_np[:,1])
print("f1:", np_f1)
print("precision:", np_precision)
print("recall:", np_recall)
# +
# import package that will generate the confusion matrix scores
from sklearn.metrics import confusion_matrix
# import packages that will help display the scores
import pandas as pd
confusion_matrix_scores = confusion_matrix(predictions_np[:,0],
predictions_np[:,1],
labels=[1, 0])
# display scores as a heatmap
df = pd.DataFrame(confusion_matrix_scores,
columns = ["Predicted True", "Predicted Not True"],
index = ["Actually True", "Actually Not True"])
df.head()
# -
# ### Save model_pipeline
# +
from pyspark.ml import Pipeline, PipelineModel
model_path = 'gs://dataproc-datalake-examples/xgboost/pipeline_model/bank-marketing'
pipeline_model.write().overwrite().save(model_path)
# -
loaded_pipeline_model = PipelineModel.load(model_path)
# +
# Make predictions using loaded model
predictions = loaded_pipeline_model.transform(test_data)
predictions.show(5)
# -
# ### Save prediction results to a new table
predictions.write.mode('overwrite').format("parquet").saveAsTable("bank_demo_db.bank_marketing_predictions")
spark.sql("SHOW TABLES in bank_demo_db").show(10, False)
# ## 5. Data Ops - Deploy Spark pipeline using Dataproc Workflows
#
# ### Dataproc Workflows
#
# Dataproc Workflows has 2 types of workflow templates.
#
# 1. Manged cluster - Create a new cluster and delete the cluster once the job has completed.
# 2. Cluster selector - Select a pre-existing Dataproc cluster to the run the jobs (does not delete the cluster).
#
# This codelab will use option 1 to create a managed cluster workflow template.
# ### 5.1 Convert code above into 2 python files
#
# 1. Job to convert CSV to Hive Tables
# 2. Job to run predictions on Hive Tables
# +
# %%writefile job_csv_to_hive.py
## Job 1
print('Job 1')
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer
warehouse_location = 'gs://dataproc-datalake-demo/hive-warehouse'
service_endpoint = 'thrift://hive-cluster-m.us-central1-f:9083'
spark = SparkSession.builder \
.appName('csv_to_hive') \
.config("hive.metastore.uris", service_endpoint) \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
#To-Do add code from notebook job 1
# +
# %%writefile job_xgboost_predictions.py
## Job 2
print('Job 2')
# Load the data
data = spark.sql("""
SELECT *
FROM bank_demo_db.bank_marketing
""")
model_path = 'gs://dataproc-datalake-examples/xgboost/pipeline_model/bank-marketing'
loaded_model = XGBoostClassificationModel().load(model_path)
loaded_pipeline_model = PipelineModel.load(model_path)
#To-Do add code from notebook job 2
# -
# ### 5.2. Grant service account permission to deploy workflow from notebooks
#
# Dataproc's service accounts needs to be granted "Dataproc Editor" IAM role.
#
# Go to https://console.cloud.google.com/iam-admin/iam
#
# Look for the service account email under the name "Google Cloud Dataproc Service Agent". It will be in the format
#
# ```bash
# <EMAIL>
# ```
#
# Edit the roles and add the role "Dataproc Editor" and press save.
# +
## Alternatively run all of the below from the Google Cloud Shell
# -
# ### 5.3 Create Dataproc managed cluster workflow Template
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
# echo $WORKFLOW_ID
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
# export REGION=us-central1
#
# gcloud dataproc workflow-templates create $WORKFLOW_ID \
# --region $REGION
# -
# ### 5.4 Configure managed cluster for the workflow template
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
#
# export PROJECT_ID=dataproc-datalake
# export CLUSTER_NAME=spark-workflow-cluster
# export BUCKET_NAME=${PROJECT_ID}-demo
# export REGION=us-central1
# export ZONE=us-central1-f
#
# gcloud beta dataproc workflow-templates set-managed-cluster $WORKFLOW_ID \
# --cluster-name $CLUSTER_NAME \
# --region $REGION \
# --zone $ZONE \
# --image-version preview-ubuntu \
# --master-machine-type n1-standard-4 \
# --worker-machine-type n1-standard-4 \
# --optional-components=ANACONDA,JUPYTER \
# --initialization-actions gs://goog-dataproc-initialization-actions-${REGION}/rapids/rapids.sh \
# --metadata rapids-runtime=SPARK \
# --bucket $BUCKET_NAME
# -
# ### 5.5 Upload PySpark job to GCS
# + language="bash"
# export PROJECT_ID=dataproc-datalake
# export BUCKET_NAME=${PROJECT_ID}-demo
# gsutil cp job_csv_to_hive.py \
# gs://${PROJECT_ID}-demo/workflows/spark-bank-marketing/job_csv_to_hive.py
# + language="bash"
# export PROJECT_ID=dataproc-datalake
# export BUCKET_NAME=${PROJECT_ID}-demo
# gsutil cp job_xgboost_predictions.py \
# gs://${PROJECT_ID}-demo/workflows/spark-bank-marketing/job_xgboost_predictions.py
# -
# ### 5.6 Add job to workflow template
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
# export REGION=us-central1
# export PROJECT_ID=dataproc-datalake
#
# gcloud dataproc workflow-templates add-job pyspark \
# gs://${PROJECT_ID}-demo/workflows/spark-bank-marketing/job_csv_to_hive.py \
# --region $REGION \
# --step-id csv_to_hive \
# --workflow-template $WORKFLOW_ID
# -
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
# export REGION=us-central1
# export PROJECT_ID=dataproc-datalake
#
# gcloud dataproc workflow-templates add-job pyspark \
# gs://${PROJECT_ID}-demo/workflows/spark-bank-marketing/job_xgboost_predictions.py \
# --region $REGION \
# --start-after=csv_to_hive \
# --step-id xgboost_predictions \
# --workflow-template $WORKFLOW_ID
# -
# ### 5.7 Run workflow template
# + language="bash"
# export WORKFLOW_ID=bank-marketing-workflow
# export REGION=us-central1
#
# gcloud dataproc workflow-templates instantiate $WORKFLOW_ID \
# --region $REGION
# -
# ### 5.8 View Cluster, workflow and jobs tabs
#
# Go to the Dataproc UI and view the cluster page. You should see the new cluster spinning up
#
# Once the cluster is ready view the workflow and jobs tabs to check the progress of the jobs.
# ### 5.9 Check new predictions table was created
# ### 5.10 Schedule workflows
#
# View the guide on how to schedule Dataproc workflows
#
# https://cloud.google.com/dataproc/docs/concepts/workflows/workflow-schedule-solutions
|
notebooks/examples/Spark - Bank Marketing Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="W685A3V4qx1Q"
# # Python Functions
# -
from IPython.display import YouTubeVideo
YouTubeVideo('9Os0o3wzS_I', width=900, height=400)
# + [markdown] colab_type="text" id="iueQVnPGqx1U"
# Function is a group of related statements that perform a specific task.
# + [markdown] colab_type="text" id="wUB2YXtBqx1X"
# Functions help break our program into smaller and modular chunks. As our program grows larger and larger, functions make it more organized and manageable.
#
# It avoids repetition and makes code reusable.
# + [markdown] colab_type="text" id="LDZ6MpTwqx1a"
# # Syntax:
# + [markdown] colab_type="text" id="EyqjxCEwqx1c"
# def function_name(parameters):
#
# """
# Doc String
# """
#
# Statement(s)
# + [markdown] colab_type="text" id="k4qKflh4qx1e"
# 1. keyword "def" marks the start of function header
#
# 2. Parameters (arguments) through which we pass values to a function. These are optional
#
# 3. A colon(:) to mark the end of funciton header
#
# 4. Doc string describe what the function does. This is optional
#
# 5. "return" statement to return a value from the function. This is optional
# + [markdown] colab_type="text" id="tHJhKoLgqx1h"
# # Example:
# + colab={} colab_type="code" id="Y8D1p1eAqx1i"
def print_name(name):
"""
This function prints the name
"""
print("Hello " + str(name))
# + [markdown] colab_type="text" id="TDBBivd2qx1p"
# # Function Call
# + [markdown] colab_type="text" id="Dseptsvoqx1q"
# Once we have defined a function, we can call it from anywhere
# + colab={} colab_type="code" id="4CJ01FUpqx1t" outputId="183b4d32-4a77-4d0f-d475-2df249a8b621"
print_name('<NAME>')
# + [markdown] colab_type="text" id="oljIs66Jqx11"
# # Doc String
# + [markdown] colab_type="text" id="7qnfLrtsqx12"
# The first string after the function header is called the docstring and is short for documentation string.
#
# + [markdown] colab_type="text" id="hQmE1qO8qx14"
# Although optional, documentation is a good programming practice, always document your code
# + [markdown] colab_type="text" id="HICWQwepqx15"
# Doc string will be written in triple quotes so that docstring can extend up to multiple lines
# + colab={} colab_type="code" id="hQCrIjvzqx18" outputId="35e42840-e897-46e0-ecf7-9ef1881534f4"
print(print_name.__doc__) # print doc string of the function
# + [markdown] colab_type="text" id="7bXpeRMXqx2D"
# # return Statement
# + [markdown] colab_type="text" id="GoR0PNUOqx2G"
# The return statement is used to exit a function and go back to the place from where it was called.
# + [markdown] colab_type="text" id="XPoedgmGqx2H"
# Syntax:
#
# return [expression]
# + [markdown] colab_type="text" id="IIkX4Y1gqx2J"
# -> return statement can contain an expression which gets evaluated and the value is returned.
#
# -> if there is no expression in the statement or the return statement itself is not present inside a function, then the function will return None Object
# + colab={} colab_type="code" id="e8JqjPNvqx2L"
def get_sum(lst):
"""
This function returns the sum of all the elements in a list
"""
#initialize sum
_sum = 0
#iterating over the list
for num in lst:
_sum += num
return _sum
# + colab={} colab_type="code" id="9tKmO0IQqx2T" outputId="c682c536-157f-449c-9e75-9568e9874636"
s = get_sum([1, 2, 3, 4])
print(s)
# + colab={} colab_type="code" id="aJ2e31Jrqx2Y" outputId="c56387e6-ba8a-4428-dbb2-d8173e75649b"
#print doc string
print(get_sum.__doc__)
# + [markdown] colab_type="text" id="wKMztpt2qx2e"
# # How Function works in Python?
# + [markdown] colab_type="text" id="kKZtprN7qx2f"
# 
# + [markdown] colab_type="text" id="ngEFY7UVqx2i"
# # Scope and Life Time of Variables
# + [markdown] colab_type="text" id="Q_2YkKBlqx2l"
# -> Scope of a variable is the portion of a program where the variable is recognized
# + [markdown] colab_type="text" id="wwAGJbwKqx2n"
# -> variables defined inside a function is not visible from outside. Hence, they have a local scope.
# + [markdown] colab_type="text" id="oR9IZIIfqx2p"
# -> Lifetime of a variable is the period throughout which the variable exits in the memory.
#
# -> The lifetime of variables inside a function is as long as the function executes.
# + [markdown] colab_type="text" id="Sf0XTJGOqx2p"
# -> Variables are destroyed once we return from the function.
# + [markdown] colab_type="text" id="SdCba00Uqx2r"
# # Example:
# + colab={} colab_type="code" id="8jUaZMoiqx2r" outputId="fe6c64c2-4b9e-466d-eaf6-56938eae3a56"
global_var = "This is global variable"
def test_life_time():
"""
This function test the life time of a variables
"""
local_var = "This is local variable"
print(local_var) #print local variable local_var
print(global_var) #print global variable global_var
#calling function
test_life_time()
#print global variable global_var
print(global_var)
#print local variable local_var
print(local_var)
# + [markdown] colab_type="text" id="m57Z9Aq1qx2z"
# # Python program to print Highest Common Factor (HCF) of two numbers
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" executionInfo={"elapsed": 1084, "status": "ok", "timestamp": 1544028099228, "user": {"displayName": "Applied AI Course", "photoUrl": "https://lh3.googleusercontent.com/-EsJzSyawCkQ/AAAAAAAAAAI/AAAAAAAAAWk/jhKHALKaHag/s64/photo.jpg", "userId": "06629147635963609455"}, "user_tz": -330} id="khyxyWWBqx20" outputId="c4883331-8436-4ef8-c939-0f7bc26a6229"
def computeHCF(a, b):
"""
Computing HCF of two numbers
"""
smaller = b if a > b else a #consice way of writing if else statement
hcf = 1
for i in range(1, smaller+1):
if (a % i == 0) and (b % i == 0):
hcf = i
return hcf
num1 = 6
num2 = 36
print("H.C.F of {0} and {1} is: {2}".format(num1, num2, computeHCF(num1, num2)))
# + colab={} colab_type="code" id="en1Kc1Jjqx2_"
|
Lecture 18 Functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("/home/shared/sp18-is590dv/data/chicago-library-data/libraries-2011-circulation-by-location-1.csv",
index_col = "LOCATION")
df.drop(columns=["ADDRESS", "CITY", "ZIP CODE", "ZIP", "ZIPCODE", "YTD"], errors="ignore", inplace=True)
df = df.transpose()
df.head()
df.set_index(pd.date_range("2011-01-31", "2011-12-31", freq="M"), inplace=True)
df.head()
df2 = pd.read_csv("/home/shared/sp18-is590dv/data/chicago-library-data/libraries-2012-circulation-by-location-1.csv",
index_col = "LOCATION")
df2.drop(columns=["ADDRESS", "CITY", "ZIP CODE", "ZIP", "ZIPCODE", "YTD"], errors="ignore", inplace=True)
df2 = df2.transpose()
df2.set_index(pd.date_range("2012-01-31", "2012-12-31", freq="M"), inplace=True)
df2.head()
pd.concat([df, df2], sort=True)
dataframes = []
for year in range(2011, 2020):
df = pd.read_csv("/home/shared/sp18-is590dv/data/chicago-library-data/libraries-%s-circulation-by-location-1.csv" % year,
index_col = "LOCATION")
df.drop(columns=["ADDRESS", "CITY", "ZIP CODE", "ZIP", "ZIPCODE", "YTD"], errors="ignore", inplace=True)
df = df.transpose()
df.rename(lambda a: a.upper(), axis="columns", inplace = True)
df.set_index(pd.date_range("%s-01-31" % year, "%s-12-31" % year, freq="M"), inplace=True)
dataframes.append(df)
circulation = pd.concat(dataframes, sort=True)
circulation.index.name = "DATE"
circulation.reset_index(inplace = True)
circulation.to_json("output.json", orient="records", date_format="iso")
(circulation.unstack().
reset_index().
rename(columns = {'level_0': 'LOCATION', 0: 'CIRCULATION'}).
to_json('unstacked_output.json', orient='records'))
|
week08/examples_week08.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generate spectrograms
# Script to generate spectrogram from raw audio for each call.
import pandas as pd
import os
import librosa
import sys
import numpy as np
import pickle
from pathlib import Path
import librosa.display
import matplotlib.pyplot as plt
from spectrogramming_functions import generate_mel_spectrogram, generate_stretched_mel_spectrogram, generate_freq_spectrogram, generate_ampli_spectrogram
# +
wd = os.getcwd()
DATA = os.path.join(os.path.sep, str(Path(wd).parents[0]), "data", "processed")
RAW_DATA = os.path.join(os.path.sep, str(Path(wd).parents[0]), "data", "raw")
DF_FULL = os.path.join(os.path.sep, DATA, "meerkat_full_audio.pkl")
# -
# Spectrogramming parameters
FFT_WIN = 0.03 # FFT_WIN*samplerate = length of fft/n_fft (number of audio frames that go in one fft)
FFT_HOP = FFT_WIN/8 # FFT_HOP*samplerate = n of audio frames between successive ffts
N_MELS = 40 # number of mel bins
WINDOW = 'hann' # each frame of audio is windowed by a window function (its length can also be
# determined and is then padded with zeros to match n_fft. we use window_length = length of fft
FMAX = 4000
N_MFCC = 13
# # Read in data
df = pd.read_pickle(DF_FULL)
df.shape
# # Generate spectrogram columns
df.samplerate_hz.value_counts()
# +
spectrograms = df.apply(lambda row: generate_mel_spectrogram(row['raw_audio'],
row['samplerate_hz'],
N_MELS,
WINDOW,
FFT_WIN,
FFT_HOP,
FMAX),
axis=1)
df['spectrograms'] = spectrograms
denoised = [(spectrogram - np.median(spectrogram, axis=0)) for spectrogram in df['spectrograms']]
df['denoised_spectrograms'] = denoised
# +
# Show example spectrograms
# +
df_subset = df.sample(n=25)
specs = df_subset.spectrograms.values
plt.figure(figsize=(8, 8))
for i,spec in enumerate(specs,1):
plt.subplot(5, 5, i)
librosa.display.specshow(spec)
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
# -
plt.figure(figsize=(8, 8))
for i,spec in enumerate(specs,1):
plt.subplot(5, 5, i)
plt.imshow(spec, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
# # Generate stretched spectrograms
# +
#df = pd.read_pickle(os.path.join(os.path.sep, DATA, "df_focal.pkl"))
# -
MAX_DURATION = 0.5
# +
spectrograms = df.apply(lambda row: generate_stretched_mel_spectrogram(row['raw_audio'],
row['samplerate_hz'],
row['duration_s'],
N_MELS,
WINDOW,
FFT_WIN,
FFT_HOP,
MAX_DURATION),
axis=1)
df['stretched_spectrograms'] = spectrograms
df['stretched_denoised_spectrograms'] = [(spectrogram - np.median(spectrogram, axis=0)) for spectrogram in df['stretched_spectrograms']]
# -
# # Generate MFCCs
from preprocessing_functions import calc_zscore
mfccs = [librosa.feature.mfcc(S=spectro,n_mfcc=N_MFCC) for spectro in df['stretched_spectrograms']]
df['stretched_mfccs'] = mfccs
mfccs = [librosa.feature.mfcc(S=spectro,n_mfcc=N_MFCC) for spectro in df['spectrograms']]
df['mfccs'] = mfccs
specs = [calc_zscore(s) for s in df.spectrograms]
zmfccs = [librosa.feature.mfcc(S=spectro, n_mfcc=N_MFCC) for spectro in specs]
df['zmfccs'] = zmfccs
# # Generate spectrograms on frequency scale
# +
freq_spectrograms = df.apply(lambda row: generate_freq_spectrogram(row['raw_audio'],
row['samplerate_hz'],
WINDOW,
FFT_WIN,
FFT_HOP),
axis=1)
df['freq_spectrograms'] = freq_spectrograms
# -
shapes = [s.shape[0] for s in df.freq_spectrograms]
pd.Series(shapes).value_counts()
# Because of different sampling rate, resulting spectrograms have different range of frequency. Resolution is the same, but range is different.
# See this for explanations:
# http://localhost:8888/notebooks/Documents/MPI_work/projects/meerkat/master_thesis_analysis/meerkat_code/Spectrogram_Params.ipynb
freq_specs = [x[:121,:] if x.shape[0]==721 else x for x in df.freq_spectrograms]
df['freq_spectrograms'] = freq_specs
shapes = [s.shape[0] for s in df.freq_spectrograms]
pd.Series(shapes).value_counts()
# +
df_subset = df.sample(n=25)
specs = df_subset.freq_spectrograms.values
plt.figure(figsize=(8, 8))
for i,spec in enumerate(specs,1):
plt.subplot(5, 5, i)
#librosa.display.specshow(spec)
plt.imshow(spec, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
# +
specs = df_subset.spectrograms.values
plt.figure(figsize=(8, 8))
for i,spec in enumerate(specs,1):
plt.subplot(5, 5, i)
#librosa.display.specshow(spec)
plt.imshow(spec, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
# -
# # Generate spectrograms on magnitude scale
# +
ampli_specs = df.apply(lambda row: generate_ampli_spectrogram(row['raw_audio'],
row['samplerate_hz'],
WINDOW,
FFT_WIN,
FFT_HOP),
axis=1)
df['ampli_spectrograms'] = ampli_specs
# -
shapes = [s.shape[0] for s in df.ampli_spectrograms]
pd.Series(shapes).value_counts()
ampli_specs = [x[:121,:] if x.shape[0]==721 else x for x in df.ampli_spectrograms]
df['ampli_spectrograms'] = ampli_specs
shapes = [s.shape[0] for s in df.ampli_spectrograms]
pd.Series(shapes).value_counts()
# +
df_subset = df.sample(n=25)
specs = df_subset.ampli_spectrograms.values
plt.figure(figsize=(8, 8))
for i,spec in enumerate(specs,1):
plt.subplot(5, 5, i)
#librosa.display.specshow(spec)
plt.imshow(spec, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
# -
# # Save full labelfile
df.columns
df.to_pickle(os.path.join(os.path.sep, DATA, "meerkat_full_specs.pkl"))
|
notebooks/00_2_generate_spectrograms.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
using ClassicalOrthogonalPolynomials, Plots
# blue curve is T_0(x) = 1
# red curve is T_1(x) = x
plot(ChebyshevT()[:,1:5]; legend=:bottomright)
plot(Legendre()[:,100:100]; legend=:bottomright)
|
lectures/Overview10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Read into Python
#
# Let's first read the required data from CSV file using Pandas library.
import pandas as pd
from sklearn.model_selection import train_test_split
import joblib
from sklearn.feature_extraction.text import CountVectorizer
data = pd.read_csv('google_play_store_apps_reviews_training.csv')
# Now, show the data how looks like…
data.head()
# ## Pre-process Data
#
# We need to remove package name as it's not relevant. Then convert text to lowercase for CSV data. So, this is data pre-process stage.
def preprocess_data(data):
# Remove package name as it's not relevant
data = data.drop('package_name', axis=1)
# Convert text to lowercase
data['review'] = data['review'].str.strip().str.lower()
return data
data = preprocess_data(data)
# ## Splitting Data
#
# First, separate the columns into dependent and independent variables (or features and label). Then you split those variables into train and test set.
# Split into training and testing data
x = data['review']
y = data['polarity']
x, x_test, y, y_test = train_test_split(x,y, stratify=y, test_size=0.25, random_state=42)
# Vectorize text reviews to numbers.
# Vectorize text reviews to numbers
vec = CountVectorizer(stop_words='english')
x = vec.fit_transform(x).toarray()
x_test = vec.transform(x_test).toarray()
# ## Model Generation
#
# After splitting and vectorize text reviews to number, we will generate a random forest model on the training set and perform prediction on test set features.
# +
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
model.fit(x, y)
# -
# ## Evaluating Model
#
# After model generation, check the accuracy using actual and predicted values.
model.score(x_test, y_test)
# Then check prediction…
model.predict(vec.transform(['Love this app simply awesome!']))
# Save model
joblib.dump(model, 'model.pkl')
|
main.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
report = "../report/report.txt"
header = " #%{0}s%10s%10s%10s%10s%10s%10s"
fields = "Architecture\tLambda\tDecay\tMomtm\tAct_Fn\tScore\tTime".split("\t")
fmt = "%2d%{0}s%10g%10g%10g%10s%10g%10g"
print(fields)
def print_header(width):
print(header.format(width) % tuple(fields))
def print_table(lines):
max_width = max(map(lambda x: len(x[0]), lines))
max_width += 1
print_header(max_width)
for i,line in enumerate(lines):
print(fmt.format(max_width) % tuple([i+1] + line))
with open(report) as report:
lines = []
clean = lambda x: re.sub(r"(\s+)?[,:|]\s+[a-z]+$", "", x)
for line in report:
line = line.strip()
if not line:
continue
if line.startswith("##"):
if lines:
print_table(lines)
lines = []
print(line)
elif line.startswith("arch"):
line = map(clean, line.split('=')[1:])
for i in [1,2,3,5,6]:
line[i] = float(line[i])
lines.append(line)
else:
print(line)
print_table(lines)
# -
import re
s = 'architecture=[50, 500, 2], lambda=5e-06, decay=5e-05, momentum=0.99, actfn=relu: score=0.816053507421 | time=11.3747119904'
ss = s.split('=')[1:]
def clean(t):
return
map(clean, ss)
|
usc-csci-ml/hw4/src/report format.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Converting Slocum data to a standard DataFrame
from IPython.lib.pretty import pprint
import logging
logger = logging.getLogger('gutils')
logger.handlers = [logging.StreamHandler()]
logger.setLevel(logging.DEBUG)
# +
import sys
from pathlib import Path
# Just a hack to be able to `import gutils`
sys.path.append(str(Path('.').absolute().parent.parent))
binary_folder = Path('.').absolute().parent.parent / 'gutils' / 'tests' / 'resources' / 'slocum' / 'real' / 'binary'
# -
bass_binary = binary_folder / 'bass-20160909T1733'
# !ls $bass_binary
# # SlocumMerger
#
# Convert binary (*.bd) files into ASCII
# ### Merge a subset of binary files
#
# If you know the flight/science pair you wish to merge
# +
import tempfile
from gutils.slocum import SlocumMerger
ascii_output = tempfile.mkdtemp()
merger = SlocumMerger(
str(bass_binary),
ascii_output,
globs=[
'usf-bass-2016-252-1-12.sbd',
'usf-bass-2016-252-1-12.tbd'
]
)
# The merge results contain a reference to the new produced ASCII file
# as well as which binary files were involved in its creation
merge_results = merger.convert()
# -
# ### Merge all files in a directory
#
# This matches science and flight files together
# +
merger = SlocumMerger(
str(bass_binary),
ascii_output,
)
# The merge results contain a reference to the new produced ASCII file as well as what binary files went into it.
merge_results = merger.convert()
# -
# ### What does the ASCII file look like?
ascii_file = merge_results[0]['ascii']
# !cat $ascii_file
# # SlocumReader
#
# ### Load the ASCII file into a pandas DataFrame
# +
import json
from gutils.slocum import SlocumReader
slocum_data = SlocumReader(ascii_file)
print('Mode: ', slocum_data.mode)
print('ASCII: ', slocum_data.ascii_file)
print('Headers: ', json.dumps(slocum_data.metadata, indent=4))
# -
slocum_data.data.columns.tolist()
slocum_data.data.head(20)[[
'sci_m_present_time',
'm_depth',
'm_gps_lat',
'm_gps_lon',
'sci_water_pressure',
'sci_water_temp'
]]
# ### Standardize into a glider-independent DataFrame
#
# * Lossless (adds columns)
# * Common axis names
# * Common variable names used in computations of density, salinity, etc.
# * Interpolates GPS coordinates
# * Converts to decimal degrees
# * Calcualtes depth from pressure if available
# * Calculates pressure from depth if need be
# * Calculates density and salinity
standard = slocum_data.standardize()
# Which columns were added?
set(standard.columns).difference(slocum_data.data.columns)
standard.head(20)[[
't',
'z',
'y',
'x',
'pressure',
'temperature'
]]
# ## Now the data can (should) be able to be compared and manipulated in the same way as other glider data
|
docs/notebooks/0001 - Converting Slocum data to a standard DataFrame.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Learning Pandas
# # Objectives
# - Setup
# - Setup
# - Loading data
# - Inspect / Sort / Filter data
# - Analytical functions
# ## Functions used:
# - data.index returns information about the index.
# - data.info () returns information about the types and numbers of non-null observations in the DataFrame.
# - data.describe () returns some descriptive statistics (count, mean, std, min, 25%, 50%, 75%, max) regarding the numerical columns of the DataFrame
# - data.sort_values() order the values in a ascending way,“Ascending” is True by default, ie, the lowest value first
# - data.iloc allows you to select rows (and optionally columns) by position (ie, by row number).
# - idxmax or idxmin will return the index of the line where the first minimum / maximum is found.
# - grouṕ.by() group our data and calculate indicators for individual groups
# ### How Sovereign Debt are related to these variables?
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [10,8] # Set default figure size
import requests
from IPython.core.display import display, HTML #set preferences of view
display(HTML("<style>.container {width:90% !important;}</style>"))
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 8)
# -
data= pd.read_csv('lifeexpentacy.csv')
data
# ## reviewing the data: the first five lines
# +
data.head(5)
# -
data.groupby(['Year','Continent'])['Life Ladder'].mean().reset_index().sample(5)
data.groupby(['Year','Continent'])['Life Ladder'].mean().reset_index()
# ## reviewing the data: the last five lines
data.tail(5)
# ## eviewing the data: five random lines
data.sample(5)
data.shape
data.columns
data.index
data.info()
# ## data.sort_values() order the values in a ascending way,“Ascending” is True by default, ie, the lowest value first
data.sort_values(by=['Country name','Year'])
data.sort_values(by='Year', ascending=True)
data.Year.head(5)
data.iloc[10]
data.set_index('Country name',inplace=True)
data.loc['United States']
data.describe()
data.sort_values(by=['Country name','Year'], ascending=[True,True]).head(5)
data.sort_values(by=['Country name','Year'], ascending=[False,True]).head(5)
data.sample(5)
data.groupby(['Year','Continent'])['Log GDP per capita'].idxmax()
data.groupby(['Year','Continent'])['Life Ladder'].idxmax()
data.groupby(['Year','Continent'])['Social support'].idxmax()
data.groupby(['Year','Continent'])['Healthy life expectancy at birth'].idxmax()
data.groupby(['Year','Continent'])['Confidence in national government'].idxmax()
data.groupby(['Year','Continent'])['Confidence in national government'].idxmax()
|
lifeexpentacy2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Analyzing the benchmark with one covariate high up thee tree
# + pycharm={"name": "#%%\n"}
# Setup
import pandas as pd
import numpy as np
import os
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
plot_path = "./plots/"
# -
# Functions for getting classification metrics for every result based on ground truth
# + pycharm={"name": "#%%\n"}
def get_scores(df, add=""):
"""
Calculates extended binary classification summary statistics, such as TPR, TNR, youden index, f1-score, MCC
Parameters
----------
df: DataFrame
Must contain columns tp, tn, fp, fn
Returns
-------
df: DataFrame
Same df with added columns tpr, tnr, precision, accuracy, youden, f1_score, mcc
"""
tp = df[f"tp{add}"].astype("float64")
tn = df[f"tn{add}"].astype("float64")
fp = df[f"fp{add}"].astype("float64")
fn = df[f"fn{add}"].astype("float64")
tpr = (tp / (tp + fn)).fillna(0)
df[f"tpr{add}"] = tpr
tnr = (tn / (tn + fp)).fillna(0)
df[f"tnr{add}"] = tnr
precision = (tp / (tp + fp)).fillna(0)
df[f"precision{add}"] = precision
fdr = (fp / (tp + fp)).fillna(0)
df[f"fdr{add}"] = fdr
acc = ((tp + tn) / (tp + tn + fp + fn)).fillna(0)
df[f"accuracy{add}"] = acc
df[f"youden{add}"] = tpr + tnr - 1
df[f"f1_score{add}"] = 2 * (tpr * precision / (tpr + precision)).fillna(0)
df[f"mcc{add}"] = (((tp * tn) - (fp * fn)) / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))).fillna(0)
return df
def classify(row):
p = row["num_otus"]
gt = np.zeros(p)
gt2 = np.zeros(p)
if type(row["effect_otus"]) == str:
eff = eval(row["effect_otus"].replace("[ ", "[").replace(" ", " ").replace(" ", ", "))
else:
eff = row["effect_otus"]
gt[eff] = 1
gt2[np.arange(13, 24, 1)] = 1
pred = np.zeros(p)
pred_2 = np.zeros(p)
res = [int(x) for x in eval(row["result_otus"])]
pred[[x for x in res if x < p]] = 1
pred_2[[x-p for x in res if x >= p and x < 2*p]] = 1
pred_3 = [x-2*p for x in res if x >= 2*p]
tn, fp, fn, tp = confusion_matrix(gt, pred).ravel()
tn2, fp2, fn2, tp2 = confusion_matrix(gt2, pred_2).ravel()
tn3 = p-len(pred_3)
fp3 = len(pred_3)
fn3 = 0
tp3 = 0
return tp, tn, fp, fn, tp2, tn2, fn2, fp2, tn3, fp3, fn3, tp3
# + [markdown] pycharm={"name": "#%% md\n"}
# Read benchmark data and add ground truth nodes and features. Then calculate classification metrics
# + pycharm={"name": "#%%\n"}
# Read data
tree_agg_path = "../../../tascCODA_data/benchmarks/high_effect/results/"
tree_agg_res = []
for f in os.listdir(tree_agg_path):
if f.startswith("result_df"):
tree_agg_res.append(pd.read_csv(tree_agg_path + f, index_col=0))
tree_agg_df = pd.concat(tree_agg_res)
tree_agg_df
# + pycharm={"name": "#%%\n"}
# add ground truth
ground_truth = {
30: ([39], np.arange(0, 27))
}
tree_agg_df["effect_nodes"] = [ground_truth[x][0] for x in tree_agg_df["num_otus"]]
tree_agg_df["effect_otus"] = [ground_truth[x][1] for x in tree_agg_df["num_otus"]]
tree_agg_df
# + pycharm={"name": "#%%\n"}
# classification
classes = tree_agg_df.apply(classify, axis=1)
tree_agg_df["tp"] = [x[0] for x in classes]
tree_agg_df["tn"] = [x[1] for x in classes]
tree_agg_df["fp"] = [x[2] for x in classes]
tree_agg_df["fn"] = [x[3] for x in classes]
tree_agg_df["tp2"] = [x[4] for x in classes]
tree_agg_df["tn2"] = [x[5] for x in classes]
tree_agg_df["fp2"] = [x[6] for x in classes]
tree_agg_df["fn2"] = [x[7] for x in classes]
tree_agg_df["tp3"] = [x[8] for x in classes]
tree_agg_df["tn3"] = [x[9] for x in classes]
tree_agg_df["fp3"] = [x[10] for x in classes]
tree_agg_df["fn3"] = [x[11] for x in classes]
tree_agg_df = get_scores(tree_agg_df)
tree_agg_df = get_scores(tree_agg_df, add="2")
tree_agg_df = get_scores(tree_agg_df, add="3")
tree_agg_df
# + pycharm={"name": "#%%\n"}
# add no. of effects and hamming distance
def get_num_eff(row):
if row["model"] in ["tree_agg", "adaANCOM"]:
return len([int(x) for x in eval(row["result_nodes"])])
else:
return len([int(x) for x in eval(row["result_otus"])])
num_eff = classes = tree_agg_df.apply(get_num_eff, axis=1)
tree_agg_df["num_effects"] = num_eff + 0.001
tree_agg_df["hamming"] = tree_agg_df["fn"] + tree_agg_df["fp"]
# -
# Plots
# + pycharm={"name": "#%%\n"}
sns.set(style="ticks", font_scale=1.3)
g = sns.lineplot(data=tree_agg_df[tree_agg_df["phi"].isin([-5, -1, 0, 1])], x="effect_size", y="mcc",
palette="tab10",
style="phi",
legend=True,
ci=False,
lw=3,
dashes=[(1,1), (1,1,1,1,1,4), "", (4,1,1,1)]
)
g.set(ylim=[0, 1.05], xticks=[0.3, 0.5, 0.7, 0.9])
sns.despine()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title=r"$\phi$")
plt.xlabel("Effect size")
plt.ylabel("MCC")
# plt.savefig(plot_path + "high_effect_mcc_v3.svg", bbox_inches="tight", format="svg")
plt.show()
# + pycharm={"name": "#%%\n"}
g = sns.lineplot(data=tree_agg_df[tree_agg_df["phi"].isin([-5, -1, 0, 1])], x="effect_size", y="fdr",
palette="tab10",
style="phi",
legend=True,
ci=False,
lw=3,
dashes=[(1,1), (1,1,1,1,1,4), "", (4,1,1,1)]
)
g.set(ylim=[-0.05, 1], xticks=[0.3, 0.5, 0.7, 0.9])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title=r"$\phi$")
plt.xlabel("Effect size")
plt.ylabel("FDR")
sns.despine()
# plt.savefig(plot_path + "high_effect_fdr_v3.svg", bbox_inches="tight", format="svg")
plt.show()
# + pycharm={"name": "#%%\n"}
g = sns.lineplot(data=tree_agg_df[tree_agg_df["phi"].isin([-5, -1, 0, 1])], x="effect_size", y="tpr",
palette="tab10",
style="phi",
legend=True,
ci=False,
lw=3,
dashes=[(1,1), (1,1,1,1,1,4), "", (4,1,1,1)]
)
g.set(ylim=[0,1])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title=r"$\phi$")
g.set(ylim=[0,1.05], xticks=[0.3, 0.5, 0.7, 0.9])
plt.xlabel("Effect size")
plt.ylabel("TPR")
sns.despine()
# plt.savefig(plot_path + "high_effect_tpr_v3.svg", bbox_inches="tight", format="svg")
plt.show()
# + pycharm={"name": "#%%\n"}
g = sns.lineplot(data=tree_agg_df[tree_agg_df["phi"].isin([-5, -1, 0, 1])], x="effect_size", y="hamming",
palette="tab10",
style="phi",
legend=True,
ci=False,
lw=3,
dashes=[(1,1), (1,1,1,1,1,4), "", (4,1,1,1)]
)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., title=r"$\phi$")
# (g.set_axis_labels("Replicates per group", "TPR").tight_layout(w_pad=0))
g.set(xticks=[0.3, 0.5, 0.7, 0.9])
plt.ylabel("Hamming distance")
plt.xlabel("Effect size")
sns.despine()
# plt.savefig(plot_path + "high_effect_hamming_v3.svg", bbox_inches="tight", format="svg")
plt.show()
# + pycharm={"name": "#%%\n"}
|
benchmarks/high_effect/result_analysis_high_effect.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro to RegEx
#
# RegEx is a small programming language to deal with text. It is hard to learn but sometimes can save the day with its smart search abilities. Is mainly useful for:
# - cleaning data,
# - finding selectors/attributes.
#
# This notebook introduces regex, but first provides the quotes scraper we know in a slighty changed way using:
# - select() instead of find_all,
# - find_next_sibling() instead of find_all,
# - attr instead of directly providing class,
# - returns dataframe instead of a list.
# +
import numpy as np
import pandas as pd
import time
import requests
from bs4 import BeautifulSoup
from pprint import pprint
import re #regular expressions
from textblob import TextBlob, Word
import nltk
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords, wordnet
# -
#select <=> find_all
#select_one <=> find
def my_scraper(url):
response = requests.get(url)
page = response.content
page = BeautifulSoup(page,"html.parser")
quotes = page.select("div.quote span.text")
quotes_text = [i.get_text() for i in quotes]
authors = page.find_all("small",attrs={"class":"author"})
authors_text = [i.get_text() for i in authors]
author_links = [i.find_next_sibling().get("href") for i in authors]
my_output = {"quotes":quotes_text,
"authors":authors_text,
"author_links":author_links}
my_df = pd.DataFrame(my_output)
return my_df
my_data = my_scraper("http://quotes.toscrape.com/")
my_data.head()
my_text = "I am Jack, I am 37 years old, I am earning $100"
re.findall("I",my_text)
re.findall("[0-9]+",my_text) #find one or more digits
re.findall(".*",my_text) #find anything any number of times
re.sub("37","73",my_text) #substitute
re.sub("\s[0-9]+"," 73",my_text)
expres = re.compile("\s[0-9]+") #compile the expresson to be used somewhere else
re.findall(expres,my_text)
url = "http://books.toscrape.com/"
response = requests.get(url)
page = response.content
page = BeautifulSoup(page,"html.parser")
page.find_all("p",class_=re.compile("price.+"))
page_str = str(page)
prices = re.findall("£[0-9]+\S[0-9]+",page_str)
[re.sub("£","",i) for i in prices]
re.sub(r"(\S)([0-9].+)",r"$\2",prices[0])
my_t = "My name is Hrant, my e-mail is <EMAIL>"
re.sub(r"(\S+@)(.+)",r"\1harvard.edu",my_t)
|
2018/RegEx.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Simple fiber coupling analysis using Zemax's POP
# <img src="https://raw.githubusercontent.com/indranilsinharoy/PyZDDE/master/Doc/Images/articleBanner_02_fibercoupling.png" height="230">
# *Please feel free to [e-mail](mailto:<EMAIL>) any corrections, comments and suggestions to the author ([<NAME>](http://indranilsinharoy.com/))*
#
# Last updated: 12/27/2015
#
# License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/)
# **Reference**
#
# The source of this material is from a Zemax webinar called [Understanding Single-Mode Fiber Coupling with Rays and Physical Optics](http://www.zemax.com/support/resource-center/webinars/understanding-single-mode-fiber-coupling-with-rays) hosted by Dr. <NAME> on 1/29/2013.
#
# Please note that if there are any errors in concepts discussed here, it is mostly likely due to my lack of understanding and not made by Zemax, or the host of the above webniar. Also, please note that this article is a part of my own notes (as I learn Zemax), and it has not been sponsored by Zemax.
import os
import numpy as np
import matplotlib.pyplot as plt
import pyzdde.zdde as pyz
# %matplotlib inline
ln = pyz.createLink()
# Load the lens file "Fiber Coupling.zmx" that comes with Zemax as an example of POP computation.
zmxfile = 'Fiber Coupling.zmx'
lensPath = ln.zGetPath()[1]
lensFile = os.path.join(lensPath, 'Physical Optics', zmxfile)
ln.zLoadFile(lensFile)
# In this example, we have a lens (see the LDE and the layout plot below) that has almost no aberrations, a Gaussian TEM00 beam that is propagating in free space starting from the ``STO`` surface to the lens, and then the beam is focused by the lens. The focused beam is the coupled into the receiving fiber at the right end.
ln.ipzGetLDE()
# There is just one wavelength defined for the system. The wavelength is 1 $\mu m$ as shown below
ln.zGetWaveTuple()
# We can also see the fields set
ln.zGetFieldTuple()
# The lens is truely diffraction limited, as shown by the Seidel aberration coefficients.
ln.zGetSeidelAberration()
# ##### Layout plot
# The layout plot shows where we will place the input Gaussian beam (at the location of STO). The beam, after passing through the lens should be focused into the fiber. The surface numbers for the stop and fiber are shown in parentheses.
ln.zPushLens(1) # was pushed to LDE, and made sure that the "Frame suppress" was check on layout plot
lay = ln.ipzCaptureWindow('Lay', percent=15, gamma=0.1, retArr=True)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# Render the array
pyz.imshow(lay, cropBorderPixels=(2, 5, 60, 90), fig=fig, faxes=ax)
ax.text(85, 75, "Lens", fontsize=12)
ax.text(20, 75, "STO (#1)", fontsize=12, rotation='vertical')
ax.text(510, 80, "Fiber (#4)", fontsize=12, rotation='vertical')
col = (0.08,0.08,0.08)
ax.annotate("{}".format('Beam origin'), (25, 163), (25, 230), fontsize=12,
arrowprops=dict(arrowstyle="->", linewidth=0.45, color=col,
relpos=(0.0,0.5)))
ax.annotate("{}".format('Beam focus'), (516, 152), (448, 230), fontsize=12,
arrowprops=dict(arrowstyle="->", linewidth=0.45, color=col,
relpos=(1.0,0.5)))
#ax.set_title('Layout plot', fontsize=14)
plt.show()
# We will now set up the POP analysis and see the irradiance and phase distribution of the input beam at surfaces 1 and 4, using the function ``zSetPOPSettings()``. Please refer to the function's docstring for details on the function.
#
# The input beam is defined as follows:
#
# ```
# Beam type : Gaussian waist
# Mode : TEM00
# Waist : 2 mm
# Total power : 1 Watt
# ```
#
# The beam waist of the input beam is located at the ``STO`` surface (surface to beam = 0).
#
# The receiver fiber settings are as follows:
#
# ```
# Beam type : Gaussian waist
# Mode : TEM00
# Waist : 8 microns (0.008 mm)
# ```
#
# The modal radius that the receiving fiber supports is an 8 microns beam waist.
sfile = ln.zSetPOPSettings(data=0, startSurf=1, endSurf=1, field=1, wave=1,
beamType=0, paramN=((1, 2), (2, 2)), tPow=1,
sampx=4, sampy=4, widex=40, widey=40)
# The above function creates .cfg file (if not provided with one) after setting the appropriate parameters for the POP analysis and returns the full name of the file.
# Note that we want to analyze the effect of the beam that is propagating from surace 1 to surface 4. However, in the above function we set the `endSurf` to 1 in order to retrieve the POP data at the ``STO`` surface. Also, in the above function call, we did not select the "compute fiber coupling integral" option. This is because we are interested in the fiber coupling computation at the surface 4.
# ##### Irradiance data at ``STO`` surface:
irr_sur1, irrGridDat_sur1 = ln.zGetPOP(settingsFile=sfile, displayData=True)
# The function returns to data structures --- ``irr_sur1`` contains POP analysis data such as peak irradiance, total power, rayleigh, etc, and ``irrGridDat_sur1`` contains the 2D irradiance plot data. We will plot this data later along with the irradiance and phase data of surface 4 (at the coupling).
irr_sur1
# ##### Irradiance data at surface 4:
# To get the beam parameters at the surface 4, we will need to modify the ``endSurf`` to 4. Also, since we are interested in fiber coupling integral computation at this surface, we will specify the fiber type and other fiber parameters as shown below.
ln.zModifyPOPSettings(settingsFile=sfile, endSurf=4, fibComp=1,
fibType=0, fparamN=((1, 2), (0.008, 0.008)))
# The tuple of zeros in the above indicate that all the settings were successfully modified.
irr_sur4, irrGridDat_sur4 = ln.zGetPOP(settingsFile=sfile, displayData=True)
irr_sur4
# From the above we see that the **system efficiency**, which is the energy transported by the optical system is 1. The **receiver efficiency** is 0.999955, and the **coupling efficiency** is 0.999955.
# ##### Phase data at ``STO`` surface:
# In order to get the phase information we need to use the function ``zSetPOPSettings()`` once again. This is mainly because there is (currently) no way to modify the "Data" parameter externally. So in PyZDDE, we create a new settings file when everytime we switch between "Phase" and "Irradiance" data. Since we have already retrieved the irradiance data, we will re-use the name of the previous settings file.
sfile = ln.zSetPOPSettings(data=1, startSurf=1, endSurf=1, field=1,
wave=1, beamType=0, paramN=((1, 2), (2, 2)),
tPow=1, sampx=4,sampy=4, widex=40, widey=40)
pha_sur1, phaGridDat_sur1 = ln.zGetPOP(settingsFile=sfile, displayData=True)
pha_sur1
# ##### Phase data at surface 4:
ln.zModifyPOPSettings(settingsFile=sfile, endSurf=4, fibComp=1, fibType=0,
fparamN=((1, 2), (0.008, 0.008))) # Change analysis surface to 4 and add fiber comp
pha_sur4, phaGridDat_sur4 = ln.zGetPOP(settingsFile=sfile, displayData=True)
# ##### Plot of the irradiance and phase data at the two surfaces
# In the following sections we shall use matplotlib to render the raw data which we have grabbed above. Consequently, will will have to write code for generating the plots. The advantage is that we have complete control of how we want to analyze and present the data. For quick notes, you can always use ``zCaptureWindow()`` to grab the POP graphic window from Zemax as shown in an example at the end of this article.
# +
fig = plt.figure(figsize=(8,8))
# irradiance data
ax = fig.add_subplot(2,2,1)
ax.set_title('Irradiance at STO', fontsize=14)
irrmax = np.max(irrGridDat_sur1)
ext = [-irr_sur1.widthX/2, irr_sur1.widthX/2,
-irr_sur1.widthY/2, irr_sur1.widthY/2]
ax.imshow(irrGridDat_sur1, extent=ext, origin='lower',
cmap=plt.cm.coolwarm, vmin=0, vmax=irrmax)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
ax = fig.add_subplot(2,2,2)
ax.set_title('Irradiance at fiber', fontsize=14)
irrmax = np.max(irrGridDat_sur4)
ext = [-irr_sur4.widthX/2, irr_sur4.widthX/2,
-irr_sur4.widthY/2, irr_sur4.widthY/2]
ax.imshow(irrGridDat_sur4, extent=ext, origin='lower',
cmap=plt.cm.coolwarm, vmin=0, vmax=irrmax)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
# phase data
ax = fig.add_subplot(2,2,3)
ax.set_title('Phase at source', fontsize=14)
ext = [-pha_sur1.widthX/2, pha_sur1.widthX/2,
-pha_sur1.widthY/2, pha_sur1.widthY/2]
ax.imshow(phaGridDat_sur1, extent=ext, origin='lower',
vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
ax = fig.add_subplot(2,2,4)
ax.set_title('Phase at fiber', fontsize=14)
ext = [-pha_sur4.widthX/2, pha_sur4.widthX/2,
-pha_sur4.widthY/2, pha_sur4.widthY/2]
ax.imshow(phaGridDat_sur4, extent=ext, origin='lower',
vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
fig.tight_layout()
plt.show()
# -
# From the above plots we can see that there is no <u>appreciable</u> change in the phase of the beam. In order to get a better picture, we will plot slices of the 2D data.
# +
fig = plt.figure(figsize=(8,8))
# irradiance data
ax = fig.add_subplot(2,2,1)
ax.set_title('Irradiance at STO', fontsize=14)
dx = irr_sur1[-2]/256
x = [-irr_sur1[-2]/2 + dx*i for i in range(256)]
ax.plot(x, irrGridDat_sur1[128])
ax.set_ylim(top=np.max(np.array(irrGridDat_sur1[128])))
ax.set_xlim(left=-irr_sur1[-2]/2, right=irr_sur1[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13)
ax = fig.add_subplot(2,2,2)
dx = irr_sur4[-2]/256
x = [-irr_sur4[-2]/2 + dx*i for i in range(256)]
ax.set_title('Irradiance at fiber', fontsize=14)
ax.plot(x, irrGridDat_sur4[128])
ax.set_ylim(top=np.max(np.array(irrGridDat_sur4[128])))
ax.set_xlim(left=-irr_sur4[-2]/2, right=irr_sur4[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13)
# phase data
ax = fig.add_subplot(2,2,3)
ax.set_title('Phase at STO', fontsize=14)
dx = pha_sur1[-2]/256
x = [-pha_sur1[-2]/2 + dx*i for i in range(256)]
ax.plot(x, phaGridDat_sur1[128])
ax.set_ylim(top=np.pi, bottom=-np.pi)
ax.set_xlim(left=-pha_sur1[-2]/2, right=pha_sur1[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Phase (radians)', fontsize=13)
ax = fig.add_subplot(2,2,4)
ax.set_title('Phase at fiber', fontsize=14)
dx = pha_sur4[-2]/256
x = [-pha_sur4[-2]/2 + dx*i for i in range(256)]
ax.plot(x, phaGridDat_sur4[128])
ax.set_ylim(top=np.pi, bottom=-np.pi)
ax.set_xlim(left=-pha_sur4[-2]/2, right=pha_sur4[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Phase (radians)', fontsize=13)
fig.tight_layout()
plt.show()
# -
# We can zoom-in into to the irradiance slice plot at the fiber and see that the fiber is "perfectly coupled" as the beam size (radius) defined by $1/e^2$ width at the point of focus is very close to 8 microns (indicated by the red dashed vertical lines), which is the modal radius of the fiber we have defined.
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
dx = irr_sur4[-2]/256
x = [-irr_sur4[-2]/2 + dx*i for i in range(256)]
ax.set_title('Irradiance at fiber', fontsize=14)
ax.plot(x, irrGridDat_sur4[128])
ymax = np.max(np.array(irrGridDat_sur4[128]))
xlim = irr_sur4[-2]/16
ax.set_ylim(top=ymax)
ax.set_xlim(left=-xlim, right=xlim)
ax.axvline(x=-0.008, ymax=ymax, color='r', ls='--')
ax.axvline(x=0.008, ymax=ymax, color='r', ls='--')
ax.axhline(y=ymax/np.e**2, color='g', ls='--')
ax.text(0.012, 1730, r'$1/e^2$', fontsize=17)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13)
plt.show()
# Now, if we change the input beam waist to be 1.5 mm instead of 2 mm, which was the perfect coupling case, we will see that the coupling efficiency will decrease.
sfile = ln.zSetPOPSettings(data=0, startSurf=1, endSurf=4, field=1,
wave=1, beamType=0, paramN=((1, 2), (1.5, 1.5)),
tPow=1, sampx=4, sampy=4, widex=40, widey=40,
fibComp=1, fibType=0, fparamN=((1, 2), (0.008, 0.008)))
irr_sur4_imperfect, irrGridDat_sur4_imperfect = ln.zGetPOP(settingsFile=sfile, displayData=True)
irr_sur4_imperfect
# We can see that the coupling efficiency decreased to 0.924587 We can also see that the $1/e^2$ beam size at the point of coupling with the fiber is greater than 8 microns (indicated by the red dashed vertical lines) resulting in a loss of energy.
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
dx = irr_sur4_imperfect[-2]/256
x = [-irr_sur4_imperfect[-2]/2 + dx*i for i in range(256)]
ax.set_title('Irradiance at fiber', fontsize=14)
ax.plot(x, irrGridDat_sur4_imperfect[128])
ymax = np.max(np.array(irrGridDat_sur4_imperfect[128]))
xlim = irr_sur4[-2]/14
ax.set_ylim(top=ymax)
ax.set_xlim(left=-xlim, right=xlim)
ax.axvline(x=-0.008, ymax=ymax, color='r', ls='--')
ax.axvline(x=0.008, ymax=ymax, color='r', ls='--')
ax.axhline(y=ymax/np.e**2, color='g', ls='--')
ax.text(0.015, 930, r'$1/e^2$', fontsize=17)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13)
plt.show()
# ### Effect of moving the beam origin to the left
# We will now see the effect of changing the origin of the beam behind the stop by 3000 *mm*. In Zemax, under POP analysis, there is an option called "Surface to Beam" that specifies the distance from the starting surface to the beam position in lens units.
#
# Unfortunately, we cannot modify this POP parameter through the DDE. One way of achieving the same objective is to create a dummy surface of thickness 3000 *mm* between the ``OBJ`` and ``STO`` and specify this surface as the start position of the beam.
ln.zInsertSurface(1)
ln.zSetSurfaceData(surfNum=1, code=ln.SDAT_COMMENT, value='dummy')
ln.zSetSurfaceData(surfNum=1, code=ln.SDAT_THICK, value=3000)
ln.ipzGetLDE()
# +
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# Render the array
pyz.imshow(lay, cropBorderPixels=(2, 5, 60, 90), fig=fig, faxes=ax)
ax.text(85, 75, "Lens", fontsize=12)
ax.text(20, 75, "STO (#2)", fontsize=12, rotation='vertical')
ax.text(510, 80, "Fiber (#5)", fontsize=12, rotation='vertical')
col = (0.08,0.08,0.08)
ax.annotate("{}".format('Beam origin (3000 mm left of STO)'), (0, 230), (35, 234), fontsize=12,
arrowprops=dict(arrowstyle="-|>", linewidth=0.9, color='r',
relpos=(0.0,0.5)))
ax.annotate("{}".format('Beam focus'), (516, 152), (448, 230), fontsize=12,
arrowprops=dict(arrowstyle="->", linewidth=0.45, color=col,
relpos=(1.0,0.5)))
#ax.set_title('Layout plot', fontsize=14)
plt.show()
# -
# ##### Intensity at surface ``STO``
# Note that the surface numbers have changed. We will create a new settings file by not passing a settings file-name to ``zSetPOPSettings()``. (Also, since it will have the same name, the old file will be overwritten.)
sfile = ln.zSetPOPSettings(startSurf=1, endSurf=2, field=1, wave=1,
beamType=0, paramN=((1, 2), (2, 2)), tPow=1,
sampx=4, sampy=4, widex=40, widey=40)
new_irr_sur2, new_irrGridDat_sur2 = ln.zGetPOP(settingsFile=sfile, displayData=True)
# ##### Intensity at Fiber
ln.zModifyPOPSettings(settingsFile=sfile, endSurf=5, fibComp=1,
fibType=0, fparamN=((1, 2), (0.008, 0.008)))
new_irr_sur5, new_irrGridDat_sur5 = ln.zGetPOP(settingsFile=sfile, displayData=True)
# ##### Phase at surface ``STO``
sfile = ln.zSetPOPSettings(data= 1, startSurf=1, endSurf=2, field=1, wave=1,
beamType=0, paramN=((1, 2), (2, 2)), tPow=1,
sampx=4, sampy=4, widex=40, widey=40)
new_pha_sur2, new_phaGridDat_sur2 = ln.zGetPOP(settingsFile=sfile, displayData=True)
# ##### Phase at Fiber
ln.zModifyPOPSettings(settingsFile=sfile, endSurf=5, fibComp=1,
fibType=0, fparamN=((1, 2), (0.008, 0.008)))
new_pha_sur5, new_phaGridDat_sur5 = ln.zGetPOP(settingsFile=sfile, displayData=True)
# ##### Plots of irradiance and phase
# +
fig = plt.figure(figsize=(8,8))
# irradiance data
ax = fig.add_subplot(2,2,1)
ax.set_title('Irradiance at STO', fontsize=14)
ext = [-new_irr_sur2[-2]/2, new_irr_sur2[-2]/2,
-new_irr_sur2[-1]/2, new_irr_sur2[-1]/2]
ax.imshow(new_irrGridDat_sur2, extent=ext, origin='lower',
cmap=plt.cm.coolwarm)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
ax = fig.add_subplot(2,2,2)
ax.set_title('Irradiance at fiber', fontsize=14)
ext = [-new_irr_sur5[-2]/2, new_irr_sur5[-2]/2,
-new_irr_sur5[-1]/2, new_irr_sur5[-1]/2]
ax.imshow(new_irrGridDat_sur5, extent=ext, origin='lower',
cmap=plt.cm.coolwarm)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
# phase data
ax = fig.add_subplot(2,2,3)
ax.set_title('Phase at STO', fontsize=14)
ext = [-new_pha_sur2[-2]/2, new_pha_sur2[-2]/2,
-new_pha_sur2[-1]/2, new_pha_sur2[-1]/2]
ax.imshow(new_phaGridDat_sur2, extent=ext, origin='lower',
vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
ax = fig.add_subplot(2,2,4)
ax.set_title('Phase at fiber', fontsize=14)
ext = [-new_pha_sur5[-2]/2, new_pha_sur5[-2]/2,
-new_pha_sur5[-1]/2, new_pha_sur5[-1]/2]
ax.imshow(new_phaGridDat_sur5, extent=ext, origin='lower',
vmin=-np.pi, vmax=np.pi, cmap=plt.cm.coolwarm)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('y (mm)', fontsize=13)
fig.tight_layout()
plt.show()
# +
fig = plt.figure(figsize=(8,8))
# irradiance data
ax = fig.add_subplot(2,2,1)
ax.set_title('Irradiance at STO', fontsize=14)
dx = irr_sur1[-2]/256
x = [-irr_sur1[-2]/2 + dx*i for i in range(256)]
ax.plot(x, irrGridDat_sur1[128],'gray')
ax.plot(x, new_irrGridDat_sur2[128], 'r')
ax.set_ylim(top=np.max(np.array(irrGridDat_sur1[128])))
ax.set_xlim(left=-irr_sur1[-2]/2, right=irr_sur1[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13)
ax = fig.add_subplot(2,2,2)
dx = irr_sur4[-2]/256
x = [-irr_sur4[-2]/2 + dx*i for i in range(256)]
ax.set_title('Irradiance at fiber', fontsize=14)
ax.plot(x, irrGridDat_sur4[128],'gray')
ax.plot(x, new_irrGridDat_sur5[128], 'r')
ax.set_ylim(top=np.max(np.array(irrGridDat_sur4[128])))
ax.set_xlim(left=-irr_sur4[-2]/2, right=irr_sur4[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Irradiance (Watts per square mm)', fontsize=13)
# phase data
ax = fig.add_subplot(2,2,3)
ax.set_title('Phase at STO', fontsize=14)
dx = pha_sur1[-2]/256
x = [-pha_sur1[-2]/2 + dx*i for i in range(256)]
ax.plot(x, phaGridDat_sur1[128],'gray')
ax.plot(x, new_phaGridDat_sur2[128], 'r')
ax.set_ylim(top=np.pi, bottom=-np.pi)
ax.set_xlim(left=-pha_sur1[-2]/2, right=pha_sur1[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Phase (radians)', fontsize=13)
ax = fig.add_subplot(2,2,4)
ax.set_title('Phase at fiber', fontsize=14)
dx = pha_sur4[-2]/256
x = [-pha_sur4[-2]/2 + dx*i for i in range(256)]
ax.plot(x, phaGridDat_sur4[128],'gray')
ax.plot(x, new_phaGridDat_sur5[128], 'r')
ax.set_ylim(top=np.pi, bottom=-np.pi)
ax.set_xlim(left=-pha_sur4[-2]/2, right=pha_sur4[-2]/2)
ax.set_xlabel('x (mm)', fontsize=13)
ax.set_ylabel('Phase (radians)', fontsize=13)
fig.tight_layout()
plt.show()
# -
# In the above plots, the gray lines represent the intensity and phase data slices when the beam originated at the ``STO`` surface (i.e. surface to beam = 0). The red lines represent the intensity and phase data slices when the beam originated 3000 *mm* to the left of the ``STO`` surface.
#
# While the intensity plots for the two surfaces in both the cases are very similar, the phase plots show significant differences. Firstly, as the beam propagates from its waist position, 3000 mm behind the ``STO`` surface, to the ``STO`` surface its radius of curvature decreases from infinity to a finite value. Secondly, we can see the **Gouy shift** of phase profile of the Gaussian beam.
# ##### Coupling efficiency
new_irr_sur5
# The coupling efficiency reduced from 0.999955 to 0.985784.
# The coupling efficiency decreases because the fiber's mode which we have defined as having a flat phase profile doesn't match that of the input (to the fiber) beam.
# #### Grabbing POP window from Zemax
# I used matplotlib to create the above plots using the raw data extracted from Zemax. However, it is entirely possible to grab the Zemax plots as shown with the following example.
pyz.findZButtonCode('physical optics') # What is the button code?
sfile = ln.zSetPOPSettings(startSurf=1, endSurf=2, field=1, wave=1,
beamType=0, paramN=((1, 2), (2, 2)), tPow=1,
sampx=4, sampy=4, widex=40, widey=40)
sfile
ln.ipzCaptureWindow('Pop', percent=15, gamma=0.9)
ln.close()
|
Examples/IPNotebooks/02 Simple fiber coupling analysis using Zemax's POP.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Getting started with SKIL from Python
#
# This notebook is a quick tour of the Skymind Intelligence Layer (SKIL), a tool for managing your deep learning model life-cycle from prototype to production. You will first download and start SKIL, then define and train a simple Keras model, upload the model to SKIL and start a production-ready service that you can use for model inference.
#
# Let's load the SKIL Python package, as well as Keras with Tensorflow backend, first:
# %%capture
# ! pip install skil --user
# ! pip install tensorflow keras --user
# To use SKIL from this notebook (or any other Python environment) we need to install SKIL first. We do this with Docker here, but you have other options as well. Head over to https://docs.skymind.ai/docs/installation to get detailed installation instructions for your platform.
#
# You pull the latest SKIL Community Edition (CE) from dockerhub as follows:
# ! docker pull skymindops/skil-ce
# Once the download is finished start SKIL from command line like this:
#
# **docker run --rm -it -p 9008:9008 -p 8080:8080 skymindops/skil-ce bash /start-skil.sh**
#
# To test this, you can open a browser on "localhost:9008" to see the SKIL login screen. User name and password are both "admin". We won't be using the UI much right now, but everything we do within this notebook can also be run and managed within the SKIL UI.
#
# Now that SKIL runs, we can return to the real focus: your deep learning models. Let's start by defining a Keras model that classifies MNIST handwritten digits first.
# +
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
batch_size = 128
num_classes = 10
epochs = 5
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
# -
# To deploy a model with SKIL you train this model and persist it using "save".
# +
model.compile(loss='categorical_crossentropy',
optimizer='sgd', metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
model.save("model.h5")
# -
# SKIL organizes your work in workspaces, the basis for all experiments you want to run. Once your experiment is set up, you can register your Keras model in it as SKIL Model.
# +
from skil import Skil, WorkSpace, Experiment, Model
skil_server = Skil()
work_space = WorkSpace(skil_server)
experiment = Experiment(work_space)
model = Model('model.h5', id="keras_model", experiment=experiment)
# -
# SKIL now has access to your model and you can deploy it as a service, like this. (The deployment process might take a few seconds, but you'll get notified when the model server is up.)
# +
from skil import Deployment, Service
deployment = Deployment(skil_server, "keras_deployment")
service = model.deploy(deployment)
# -
# That's it! You can now get predictions from your deployed service. SKIL will make sure your service is up and running at all times.
print(service.predict(x_test[0]))
|
examples/keras-skil-example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Predicting Student Admissions with Neural Networks in Keras
# In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
# - GRE Scores (Test)
# - GPA Scores (Grades)
# - Class rank (1-4)
#
# The dataset originally came from here: http://www.ats.ucla.edu/
#
# ## Loading the data
# To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
# - https://pandas.pydata.org/pandas-docs/stable/
# - https://docs.scipy.org/
# +
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
# -
# ## Plotting the data
#
# First let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
# +
# Importing matplotlib
import matplotlib.pyplot as plt
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
# -
# Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
# +
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
# -
# This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.
#
# ## One-hot encoding the rank
# For this, we'll use the `get_dummies` function in pandas.
# +
# Make dummy variables for rank
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)
# Drop the previous rank column
one_hot_data = one_hot_data.drop('rank', axis=1)
# Print the first 10 rows of our data
one_hot_data[:10]
# -
# ## Scaling the data
# The next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
# +
# Copying our data
processed_data = one_hot_data[:]
# Scaling the columns
processed_data['gre'] = processed_data['gre']/800
processed_data['gpa'] = processed_data['gpa']/4.0
processed_data[:10]
# -
# ## Splitting the data into Training and Testing
# In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
# +
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
# -
# ## Splitting the data into features and targets (labels)
# Now, as a final step before the training, we'll split the data into features (X) and targets (y).
#
# Also, in Keras, we need to one-hot encode the output. We'll do this with the `to_categorical function`.
# +
import keras
# Separate data and one-hot encode the output
# Note: We're also turning the data into numpy arrays, in order to train the model in Keras
features = np.array(train_data.drop('admit', axis=1))
targets = np.array(keras.utils.to_categorical(train_data['admit'], 2))
features_test = np.array(test_data.drop('admit', axis=1))
targets_test = np.array(keras.utils.to_categorical(test_data['admit'], 2))
print(features[:10])
print(targets[:10])
# -
# ## Defining the model architecture
# Here's where we use Keras to build our neural network.
# +
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# -
# ## Training the model
# Training the model
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# ## Scoring the model
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# ## Challenge: Play with the parameters!
# You can see that we made several decisions in our training. For instance, the number of layers, the sizes of the layers, the number of epochs, etc.
# It's your turn to play with parameters! Can you improve the accuracy? The following are other suggestions for these parameters. We'll learn the definitions later in the class:
# - Activation function: relu and sigmoid
# - Loss function: categorical_crossentropy, mean_squared_error
# - Optimizer: rmsprop, adam, ada
# +
# - Activation function: relu and sigmoid
# - Loss function: categorical_crossentropy, mean_squared_error
# - Optimizer: rmsprop, adam, ada
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and softmax
# - Loss function: categorical_crossentropy
# - Optimizer: rmsprop
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and softmax
# - Loss function: categorical_crossentropy
# - Optimizer: sgd
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and softmax
# - Loss function: mean_squared_error
# - Optimizer: adam
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and softmax
# - Loss function: mean_squared_error
# - Optimizer: rmsprop
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and softmax
# - Loss function: mean_squared_error
# - Optimizer: sgd
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'mean_squared_error', optimizer='sgd', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and sigmoid
# - Loss function: categorical_crossentropy, mean_squared_error
# - Optimizer: rmsprop, adam, ada
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and sigmoid
# - Loss function: categorical_crossentropy
# - Optimizer: rmsprop
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and sigmoid
# - Loss function: categorical_crossentropy
# - Optimizer: sgd
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and sigmoid
# - Loss function: mean_squared_error
# - Optimizer: adam
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and sigmoid
# - Loss function: mean_squared_error
# - Optimizer: rmsprop
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# +
# - Activation function: relu and sigmoid
# - Loss function: mean_squared_error
# - Optimizer: sgd
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'mean_squared_error', optimizer='sgd', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
# -
# +
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='sigmoid'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(features, targets, epochs=1000, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
|
student-admissions-keras/StudentAdmissionsKeras_mine.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
# the matchArray variable contains the list of matches
def getMatches(regex_str, string):
regex = re.compile(regex_str)
return regex.findall(string)
# +
import re
buyingRegex = "(\w\ \w{1,}\ \w{1,})\s(\d{1,})\ (\w{1,})";
testString = "I AM BUYING 30 TTD" # fill this in
getMatches(buyingRegex,testString)
# + pycharm={"name": "#%%\n"}
buyingRegex="(\w+) PORTFOLIO"
testString="DAILYMETER PORTFOLIO"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
testString="🚩🚩🚩🚩HIGH TO EXTREMELY HIGH RISK - GREAT COMPANY - RISKY ENTRYPOINT 🚩🚩🚩🚩"
buyingRegex="🚩+(.+) 🚩+"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
testString="THE TRADE DESK INC - TTD"
buyingRegex="(.+)-(.+)"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
testString="I AM BUYING 30 TTD"
buyingRegex="(I AM BUYING)\s(\d+)(.+)"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
testString="BUYING - $740+ TO $700, $650, $600, $560"
buyingRegex="(BUYING)\s+-\s(.+),{1}\s+(.+),{1}\s+(.+),{1}\s+(.+)"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
buyingRegex="(SELLING)\s+-\s(.+),{1}\s+(.+),{1}\s+(.+),{1}\s+(.+)"
testString="SELLING - $755, $760, $780, $800"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
buyingRegex="(TRL STOP)\s+-\s(.+)";
testString="TRL STOP - +$20"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
buyingRegex="(FARTHER STOP)\s+-\s(.+)"
testString="FARTHER STOP - $690"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
buyingRegex="(SDP)\s+-\s(.+)\.\.\.\.(ADDED.+AT\s+.+)"
testString="SDP - $744....ADDED SOME SOME AT $744+"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
buyingRegex="(FOMO)\s+-(.?)"
testString="FOMO -"
print(getMatches(buyingRegex,testString))
# + pycharm={"name": "#%%\n"}
|
scratch-pad/scratch-1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CIFAR-10 utils
#
# This notebook contains functions needed to create cifar-10 splits
# %reload_ext autoreload
# %autoreload 2
# +
# default_exp cifar_utils
# -
from nbdev.showdoc import *
from nbdev.test import test_eq
# +
#export
import numpy as np
import matplotlib.pyplot as plt
from fastai2.basics import L
from fastai2.data.external import untar_data, URLs
from fastai2.data.transforms import *
from fastai2.vision.core import *
from fastai2.vision.learner import *
from fastai2.vision.models.xresnet import xresnet34
from fastai2.data.core import Datasets, DataLoader, TfmdDL
from fastai2.callback.schedule import LRFinder
from collections import Counter
# -
cifar = untar_data(URLs.CIFAR)
fnames = get_image_files(cifar)
plt.imshow(PILImage.create(fnames[0]));
#export
def get_overlap(first, second, n_same):
"Generate set with `n_same` random items from `first` and other from `second`"
assert set(first) & set(second) == set(), "Two sets already overlap!"
assert 0 <= n_same <= len(second), "Too many elements requested"
n_diff = len(second) - n_same
same = list(np.random.choice(first, n_same, replace=False))
other = list(np.random.choice(second, n_diff, replace=False))
return same + other
# +
a = ["bird", "cat", "deer", "dog", "frog", "horse"]
b = ["ship", "truck", "airplane", "automobile"]
for i in range(20):
np.random.seed(i)
test_eq(len(set(get_overlap(a, b, 4)) & set(a)), 4)
test_eq(len(set(get_overlap(a, b, 3)) & set(a)), 3)
test_eq(len(set(get_overlap(a, b, 2)) & set(a)), 2)
test_eq(len(set(get_overlap(a, b, 1)) & set(a)), 1)
test_eq(len(set(get_overlap(a, b, 0)) & set(a)), 0)
# -
#export
class CifarFactory():
"Create different versions of cifar dataset splits"
def __init__(self, seed=42, n_labeled=400, n_same_cls=1):
"""
Initialize cifar split
Args:
n_labeled - labeled image per class
n_same_cls - overlap between labeled and unlabeled classes
"""
self.seed = seed
self.n_labeled = n_labeled
self.n_same_cls = n_same_cls
# Classes split as in paper
self.animals = ["bird", "cat", "deer", "dog", "frog", "horse"]
self.other = ["ship", "truck", "airplane", "automobile"]
def splits_from_path(self, path):
"Create all things needed to setup fastai datasets. returns: fnames, (train, test, unsup)"
np.random.seed(self.seed)
# Get all cifar files
fnames = get_image_files(path)
labels = L([parent_label(f) for f in fnames])
# Get only relevant classes
sup_cls = self.animals
unsup_cls = get_overlap(self.animals, self.other, self.n_same_cls)
mask = labels.map(lambda l: l in sup_cls + unsup_cls)
fnames = fnames[mask]
labels = labels[mask]
all_train, test = map(L, GrandparentSplitter("train", "test")(fnames))
test = test.filter(lambda i: labels[i] in sup_cls)
all_train = all_train[np.random.permutation(len(all_train))]
# Split on labeled and unlabeled parts
class_counts = Counter()
n_labeled = self.n_labeled
train, unsup = L(), L()
for i in all_train:
l = labels[i]
if class_counts[labels[i]] < n_labeled:
class_counts[labels[i]] += 1
if l in sup_cls: train.append(i)
elif l in unsup_cls:
unsup.append(i)
return fnames, (train, test, unsup)
# +
cifar = untar_data(URLs.CIFAR)
len(train), len(test), len(unsup)
# -
for i in range(5):
files, (train, test, unsup) = CifarFactory(n_same_cls=i, n_labeled=400).splits_from_path(cifar)
cls_train, cnt_train = zip(*Counter([parent_label(o) for o in files[train]]).items())
cls_unsup, cnt_unsup = zip(*Counter([parent_label(o) for o in files[unsup]]).items())
cls_test, cnt_test = zip(*Counter([parent_label(o) for o in files[test]]).items())
test_eq(len(set(cls_train) & set(cls_test)), 6)
test_eq(len(set(cls_train) & set(cls_unsup)), i)
test_eq(list(cnt_train), [400]*6)
test_eq(list(cnt_unsup), [5000 - 400]*4)
test_eq(list(cnt_test), [1000]*6)
Counter([parent_label(o) for o in files[train]])
Counter([parent_label(o) for o in files[test]])
Counter([parent_label(o) for o in files[unsup]])
test_eq(set(files[unsup]) & set(files[train]), set())
test_eq(set(files[unsup]) & set(files[test]), set())
test_eq(set(files[train]) & set(files[test]), set())
# Example of using the splitter defined above:
files, (train, test, unsup) = CifarFactory(n_same_cls=3, n_labeled=400).splits_from_path(cifar)
sup_ds = Datasets(files, [[PILImage.create], [parent_label, Categorize]], splits=(train, test))
unsup_ds = Datasets(files, [[PILImage.create]], splits=(unsup,))
sup_dl = sup_ds.dataloaders(after_item=[ToTensor], after_batch=[IntToFloatTensor, Normalize.from_stats(*cifar_stats)])
unsup_dl = unsup_ds.dataloaders(after_item=[ToTensor], after_batch=[IntToFloatTensor, Normalize.from_stats(*cifar_stats)])
sup_dl.show_batch(max_n=9)
unsup_dl.show_batch(max_n=9)
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/00_cifar.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inference with baseline model (TF-IDF SVM classifier)
#
# +
# %load_ext autoreload
# %autoreload 2
import sys
import os
from pathlib import Path
import importlib
sys.path.append('..')
import pandas as pd
import numpy as np
import data.dataframe_preparation as preparation
from data.labels_postprocessing import process
from data.dataframe_preparation import get_counts_per_page, get_keywords_from_file, get_text_from_page, get_count_matrix
from data.preprocessing import DocumentPreprocessor
from data.inference_widgets import CroInferenceViewer
# +
############### CONFIG ###############
FIRM_METADATA = os.path.abspath("../input_files/Firm_Metadata.csv")
DATA_INPUT_PATH = os.path.abspath("../input_files/annual_reports/")
MASTER_DATA_PATH = os.path.abspath("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Data/stoxx_inference/Firm_AnnualReport.csv")
INFERENCE_PARAGRAPH_PATH = os.path.abspath("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Data/stoxx_inference/Firm_AnnualReport_Paragraphs_with_actual_back.pkl")
MODELS_PATH = os.path.abspath("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Models/stoxx_inference")
######################################
# Load master file
df = pd.read_csv(MASTER_DATA_PATH)
df = df.set_index("id")
# Load paragraphs file
if Path(INFERENCE_PARAGRAPH_PATH).is_file():
df_paragraphs = pd.read_pickle(INFERENCE_PARAGRAPH_PATH)
else:
df_paragraphs = pd.DataFrame()
# Load classifier
import pickle
with open(os.path.join(MODELS_PATH, 'multilabel_svm_cro.pkl'), 'rb') as f:
clf = pickle.load(f)
label_list = clf.label_list
# -
df
# ## Get paragraphs of all reports
# +
vocabulary = get_keywords_from_file("../data/keyword_vocabulary.txt")
def get_paragraphs_of_report(report_row, add_adjunct_pages=True):
result = []
# Load report
path = os.path.join(DATA_INPUT_PATH,report_row['input_file'])
folder = os.path.dirname(path)
parsed_report_file_path = os.path.join(folder, report_row['orig_report_type'] + '_' + str(int(report_row['year'])), report_row['output_file'])
# Get pages with keyword hits
pages = get_counts_per_page(parsed_report_file_path, vocabulary)
page_indizes = set(pages.index)
# Add adjunct pages if necessary
if add_adjunct_pages:
for p in pages.index:
if p > 0:
page_indizes.add(p - 1)
# elif p < TOTAL_PAGES:
page_indizes.add(p + 1)
# For each page, get all paragraphs
for page_no in page_indizes:
try:
text = get_text_from_page(parsed_report_file_path, page_no)
processed_doc = DocumentPreprocessor(text).process()
except IndexError:
continue
paragraphs = processed_doc.split('\n\n')
for idx, p in enumerate(paragraphs):
result.append({ "page_no": page_no, "paragraph_no": idx, "text": p, "is_adjunct": False if page_no in pages.index else True })
print(f"Page no: {page_no}")
return result
from tqdm.notebook import trange, tqdm_notebook
# Loop through all reports
for index, row in tqdm_notebook(df.iterrows(), total=df.shape[0]):
# Skip if not necessary
if not row['should_infer'] or row['is_inferred']:
continue
paragraphs = get_paragraphs_of_report(row, add_adjunct_pages=True)
if len(paragraphs):
df_report_paragraphs = pd.DataFrame(paragraphs)
paragraphs_df["report_id"] = index
df_paragraphs = pd.concat([df_paragraphs, df_report_paragraphs], ignore_index=True)
# Update progress
df.loc[index, 'is_inferred'] = True
# Save files
df.to_csv(MASTER_DATA_PATH)
df_paragraphs.to_pickle(INFERENCE_PARAGRAPH_PATH, protocol=4)
# -
# TODO: Add inference step here instead of above, i.e. it make dynamic
df_paragraphs["preds_svm_cro"] = clf.predict(df_paragraphs['text']).tolist()
df_paragraphs["preds_prob_svm_cro"] = clf.predict_proba(df_paragraphs['text']).tolist()
# +
# Prepare data/extract prob
df_paragraphs[[ l + "_predicted" for l in label_list]] = pd.DataFrame(df_paragraphs.preds_svm_cro.tolist())
df_paragraphs[[ l + "_prob" for l in label_list]] = pd.DataFrame(df_paragraphs.preds_prob_svm_cro.tolist())
# Merge dataset
df_paragraphs_merged = pd.merge(df_paragraphs, df, how="left", left_on="report_id", right_index=True)
# -
df_paragraphs.groupby("labelling_dataset", dropna=False).count()
# # Temporary: Combine from labels
# +
# Rerun once test is complete
df_labels_training = pd.read_pickle("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Labelling/annual reports/Firm_AnnualReport_Labels_Training_Positive.pkl")
df_labels_training_negative = pd.read_pickle("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Labelling/annual reports/Firm_AnnualReport_Labels_Training_Negative.pkl")
df_labels_test = pd.read_pickle("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Labelling/annual reports/Firm_AnnualReport_Labels_Test_Positive.pkl")
# Set ids
id_columns = ['report_id', 'page', 'paragraph_no']
df_labels_training["id"] = df_labels_training.apply(lambda row: "_".join([str(row[c]) for c in id_columns]), axis=1)
df_labels_training_negative["id"] = df_labels_training_negative.apply(lambda row: "_".join([str(row[c]) for c in id_columns]), axis=1)
df_labels_test["id"] = df_labels_test.apply(lambda row: "_".join([str(row[c]) for c in id_columns]), axis=1)
# Quick check that we do not have overlapping labels
assert len(set(df_labels_training.id).intersection(set(df_labels_training_negative.id))) == 0
assert len(set(df_labels_training.id).intersection(set(df_labels_test.id))) == 0
df_labels = pd.concat([df_labels_training, df_labels_test])
df_cro = pd.crosstab(df_labels.id, df_labels["cro"], dropna=False)
df_cro_sub_type = pd.crosstab(df_labels.id, df_labels["cro_sub_type_combined"], dropna=False)
df_cro = df_cro.add_suffix('_actual')
df_cro_sub_type = df_cro_sub_type.add_suffix('_actual')
df_cro = (df_cro > 0) * 1
df_cro_sub_type = (df_cro_sub_type > 0) * 1
id_columns = ['report_id', 'page_no', 'paragraph_no']
assert len(df_paragraphs_merged) == len(df_paragraphs_merged.groupby(id_columns).count()), "Should only have unique id's, something is not correct!"
id_columns = ['report_id', 'page_no', 'paragraph_no']
df_paragraphs_merged["id"] = df_paragraphs_merged.apply(lambda row: "_".join([str(row[c]) for c in id_columns]), axis=1)
df_paragraphs_merged = df_paragraphs_merged.merge(df_cro, how="left", left_on="id", right_index=True)
df_paragraphs_merged = df_paragraphs_merged.merge(df_cro_sub_type, how="left", left_on="id", right_index=True)
# -
df_paragraphs_merged.to_pickle("/Users/david/Nextcloud/Dokumente/Education/Uni Bern/Master Thesis/Analyzing Financial Climate Disclosures with NLP/Data/stoxx_inference/Firm_AnnualReport_Paragraphs_with_actual.pkl", protocol=4)
# # Inference explorer
from data.inference_widgets import CroInferenceViewer
df_paragraphs_merged = df_paragraphs
viewer = CroInferenceViewer(df_paragraphs_merged, label_list=label_list)
df_paragraphs_merged.columns # cro_sub_type.unique()
print(df_paragraphs_merged.iloc[549581])
print(df_paragraphs_merged.iloc[549581].text)
df_paragraphs_merged.query("REPUTATION_actual == 1")
|
notebooks/archive/Inference.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # `DSML_WS_04` - Introduction to Visualization with Matplotlib and Seaborn
# In this tutorial we will continue with our introduction Python programming for data science. Today we will focus on **visualization**.
#
# We will go through the following:
#
# - **Introduction to visualization with `Matplotlib`**: Learn how you can use `Matplotlib` to porduce plots that effectively communicate your data
# - **Advanced visualization in `Seaborn`**: Use `Seaborn`' for advanced visualization of complex relationships
# ## `Matplotlib`
# Matplotlib is a multi-platform data visualization library built on NumPy arrays. It was originally designed as a patch to IPython for enabling interactive MATLAB-style plotting from the IPython command line. To those of you with experience in working with MATLAB some of the functionality and styles may therefore seem familiar, although syntax varies.
#
# Matplotlib allows for abundant modification and personalization of graphs and plots which cannot all be covered in this tutorial. However, when working with the library the Matplotlib website can act as a powerful reference and source of inspiration. Please have a look [here](https://matplotlib.org/#installation).
#
# Despite being one of the oldest visualization libraries in Python, Matplotlib is still widely used and valued as a well proven, stable and easy to use tool. New add-ons building on the Matplotlib API and using largely the same code such as Seaborn have emerged which mainly exhibit slicker designs. These tools are for you to be checked out in your own time and are not part of this course.
#
# Today we will show you:
# * How to plot simple graphs
# * How to modify the size, shape and design of your graph
# * The different plot styles
# * How to apply these on real data by working through an execise
# Just as we use the __np__ shorthand for NumPy and the __pd__ shorthand for Pandas, we will use some standard shorthands for Matplotlib imports:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# Plotting interactively within an IPython notebook can be done with the %matplotlib command, and works in a similar way to the IPython shell. In the IPython notebook, you also have the option of embedding graphics directly in the notebook, with two possible options:
# * `%matplotlib notebook` will lead to interactive plots embedded within the notebook
# * `%matplotlib inline` will lead to static images of your plot embedded in the notebook
#
# For this book, we will generally opt for `%matplotlib inline`:
# %matplotlib inline
# ### Plots and Sub Plots
#
# We will illustrate the basic functionality of Matplotlib by plotting two simple exponential functions and a simple linear function (x, x^2 and x^3) as line plots.
#
# #### Creating a single (line) plot
#
# For defining the plot we use `plt.plot()`. The command `plt.show()` then starts an event loop by looking for all currently active figure objects, and opening one or more interactive windows that display your figure(s).
# +
x = np.linspace(0, 5, 100)
y1=x
y2=x**2
y3=x**3
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
plt.show()
# -
# #### Annotating and labelling plots
# We can optimize the resulting plot in various ways, e.g.:
#
# * Set a title: `plt.title('name')`
# * Name the axes: `plt.xlabel('name')`, `plt.ylabel('name')`
# * Include a legend: `plt.plot(lable="xx")` and `plt.legend()`
# +
# Add labels to the plots (will show up in legend)
plt.plot(x, y1, label="linear")
plt.plot(x, y2, label="quadratic")
plt.plot(x, y3, label="cubic")
# Add diagram and axes titles
plt.xlabel('x', fontsize=16, fontname="Arial")
plt.ylabel('f(x)', fontsize=16)
plt.title('Test diagram', fontsize=20)
# Display legend
# Use loc function to choose optimal position of legend (loc=0 means Matplotlib chooses optimla position independently)
plt.legend(loc="upper left")
# Show plot
plt.show()
# -
# #### Creating multiple sub plots
#
# Sometimes we may wish to see multiple sub-plots next to each other. For this `plt.subplot()`is a useful tool. We will demonstrate with a quick example.
# +
# plt.subplot syntax: (# of rows, # of columns, plot number)
# subplot no. 1
plt.subplot(1,2,1)
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('diagram #1')
plt.plot(x, y2)
# subplot no. 2
plt.subplot(1,2,2)
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('diagram #2')
plt.plot(y2, x, "g")
plt.show()
# -
# ### Object-based plotting
#
# Object-based plotting offers better control of your plot, particularly if you are working with multiple objects. While in the above examples the axes where created for us in the background, we have to define them here manually. While this might seem more complicated at first it offers better flexibility. For instance, we can add more than one axis.
#
# For object-oriented Matplotlib plots, we start by creating a figure and an axes. In their simplest form, a figure and axes can be created as follows:
#
Fig_n = plt.figure(figsize=(10,4))
type(Fig_n)
# +
# Create a figure (empty work space)
Fig_1 = plt.figure(figsize=(10,4))
# Add axes to the figure
ax_1 = Fig_1.add_axes([0.5, 0.5, 0.51, 0.51])
ax_1.plot(x, y1, 'g-.')
# Add elements to axis
ax_1.set_xlabel('x') # Use set_ to start method
ax_1.set_ylabel('f(x)')
ax_1.set_title('Diagram title')
plt.show()
# +
# Create a figure (empty work space)
Fig_1 = plt.figure(figsize=(10,4))
# Add axes to the figure
ax_1 = Fig_1.add_axes([0.5, 0.5, 0.51, 0.51])
# Onto these axes we can now plot and add labels
ax_1.plot(x, y2, 'g-.')
ax_1.set_xlabel('x') # Use set_ to start method
ax_1.set_ylabel('f(x)')
ax_1.set_title('Diagram title')
plt.show()
# -
# As mentioned above, one of the benefits of object-based Matplotlib plots is greater flexibility, e.g. by adding multiple axes.
# +
# empty work space
Fig_2 = plt.figure()
ax_2 = Fig_2.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
ax_3 = Fig_2.add_axes([0.2, 0.5, 0.4, 0.3]) # added axes
# Larger diagramm with axes 1
ax_2.plot(x, y2, 'b')
ax_2.set_xlabel('x-axis')
ax_2.set_ylabel('y-axis')
ax_2.set_title('Large Diagram')
# Larger diagramm with axes 2
ax_3.plot(y2, x, 'r')
ax_3.set_xlabel('x-axis')
ax_3.set_ylabel('y-axis')
ax_3.set_title('Small Diagram')
plt.show()
# -
# We can also use object-based plotting for subplots. Here is an example:
# +
# instead of plt.figure() we use plt.subplots()
Fig_4, axes = plt.subplots(nrows = 1, ncols = 2, figsize= (10,4))
# this will create an array
#print(axes)
plt.show()
# +
# we can interate through this aray to set the axes and plot onto them
for ax in axes:
ax.plot(x, y3, 'g')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('titel')
# or without loop
#axes[0].plot(x, y3, 'g')
#axes[0].set_xlabel('x')
#axes[0].set_ylabel('y')
#axes[0].set_title('titel')
#axes[1].plot(x, y3, 'g')
#axes[1].set_xlabel('x')
#axes[1].set_ylabel('y')
#axes[1].set_title('titel')
# Display
Fig_4.tight_layout() # avoid overlapping
Fig_4
# -
# __Object-based plotting using plt.subplots() is genrally the most flexible and covenient way for producing high-quality plots, even with just one pane!__
# ### Plot appearance
# #### Modifying size and shape
#
# Matplotlib allows us to set aspect ratio, DPI and diagram size. For this we use the `figsize` and `dpi` arguments.
#
# * `figsize` is a Tupel of width and height in inch
# * `dpi` means dots-per-inch
#
# For example:
# +
Fig_3 = plt.figure(figsize=(10,4))
ax = Fig_3.add_axes([0,0,1,1])
ax.plot(x,y3,label="cubic")
ax.set_xlabel('x-axis')
ax.set_ylabel('y-axis')
ax.set_title('Large Diagram', fontsize=16, fontname="Arial")
plt.legend(loc=0, fontsize=12)
plt.show()
# -
# #### Defining diagram area
#
# We can set diagram areas and axes with `set_ylim` and `set_xlim`. Alternatively you can use `axis("tight")` to autamatically create a fitted axis:
# +
fig, axes = plt.subplots(1, 3, figsize=(16, 9))
axes[0].plot(x, y2, x, y3)
axes[0].set_title("Standard area")
axes[1].plot(x, y2, x, y3)
axes[1].axis('tight')
axes[1].set_title("Fitted area")
axes[2].plot(x, y2, x, y3)
axes[2].set_ylim([0, 50])
axes[2].set_xlim([0, 3])
axes[2].set_title("User defined area")
plt.tight_layout()
plt.show()
# -
# #### Modifying colour and style
# We can manually adjust the appearance of the graph. For instance, we may:
# * Set line colour
# * Set line style
# +
# Add labels to the plots (will show up in legend)
# Additionally add MatLab-style description
plt.plot(x, y1,'b+-',label="x")
plt.plot(x, y2, 'g.--',label="x^2",)
# Add diagram and axes titles
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('Test diagram')
# Display legend
# Use loc function to choose optimal position of legend (loc=0 means Matplotlib chooses optimal position independently)
plt.legend() #loc=0
# Show plot
plt.show()
# -
# A whole range of different design options exist. In the following we will show you a few. Please refer to the Matplotlib repository for a comprehensive overview of design options and styles.
# +
diag, ax = plt.subplots(figsize=(12,6))
ax.plot(x, x+2, color="red", linewidth=0.25)
ax.plot(x, x+4, color="red", linewidth=0.50)
ax.plot(x, x+6, color="red", linewidth=1.00)
ax.plot(x, x+8, color="red", linewidth=2.00)
# User-defined lines ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x+10, color="green", lw=3, linestyle='-')
ax.plot(x, x+12, color="green", lw=3, ls='-.')
ax.plot(x, x+14, color="green", lw=3, ls=':')
# Possible markers: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x+16, color="blue", lw=3, ls='-', marker='+')
ax.plot(x, x+18, color="blue", lw=3, ls='--', marker='o')
ax.plot(x, x+20, color="blue", lw=3, ls='-', marker='s')
ax.plot(x, x+22, color="blue", lw=3, ls='--', marker='1')
# Marker size and colour
ax.plot(x, x+24, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x+26, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x+28, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x+30, color="purple", lw=1, ls='-', marker='+', markersize=8,
markerfacecolor="yellow", markeredgewidth=3, markeredgecolor="green")
#plt.savefig("Test.pdf")
plt.show()
# -
# ### Other plot types
#
# In addition to normal `plot` methods there are further arguments to produce various types of diagrams. A hollistic overview of diagram types can be viewed on the official Matplotlib website at: http://matplotlib.org/gallery.html
# +
# scatter plots:
Scatter = plt.figure()
axscat = Scatter.add_axes([0.1, 0.1, 0.8, 0.8])
x = np.random.randn(100)
y = 0.5*x**2+0.3*np.random.randn(100)
axscat.scatter (x,y)
axscat.axis("tight")
plt.show()
# +
# histogram plots:
from random import sample
data = sample(range(1, 1000), 500)
plt.hist(data, bins=20, edgecolor='k')
plt.show()
# -
#
# ### <font color='green'> Exercise - Visualizing `iris.csv` </font>
# ### *Task 1:*
#
# Read in the `iris.csv` dataset. Handle missing data. Create a simple scatter plot on the "Sepal.Width" feature. Annotate your graph and size it appropriately.
#
#
# +
# YOUR CODE HERE. IF YOU ARE SEEING THE SAME AS BELOW, YOU CAN GO ON!
#import pandas
import pandas as pd
# reade data (it is in the same deicretory so no need to specify the file path)
iris = pd.read_csv("iris.csv")
# fill missing values using bfill or ffill
iris = iris.fillna(method="bfill")
iris.info()
# +
# STEP 1 - DO NOT RUN THIS CELL, OTHERWISE THE OUTPUT IS LOST!
# +
# YOUR CODE HERE. IF YOU ARE SEEING THE SAME AS BELOW, YOU CAN GO ON!
# first let's create a figure object
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(10,6))
# define plot
# add axis and title labels
# save figure and show
# -
# ### *Task 2:*
#
# You may have noticed some striking patterns emerging from this first plot. So let's dig deeper. Plot Petal.Width vs. Petal.Length on a scatter plot. Colour-code the differet species. Hint: Run `plt.scatter()` commands for each species. Use methods you know to differentieate between the species.
for species in list(iris["Species"].unique()):
print(species)
iris["Species"].unique()
# +
# YOUR CODE HERE
#define species list and corresponding colors
species = list(iris["Species"].unique())
colors = ["r","g","b"]
#create fig object
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(10,6))
#iterate over species, plot annotate
# +
# DO NOT RUN THIS CELL, OTHERWISE THE OUTPUT IS LOST!
# +
# and much easier with searborn... (more details on seaborn later!)
import seaborn as sns
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize= (10,6))
# use the seaborn hue statement to differentiate the different species
sns.scatterplot(ax=ax, x="Petal.Length", y="Petal.Width",
data = iris, hue="Species", palette=colors)
ax.set_xlabel("Petal Length")
ax.set_ylabel("Petal Width")
ax.set_title("Petal length per iris flower", fontsize=14)
ax.legend(fontsize=12)
plt.show()
# -
# ### *Task 3:*
#
# Finally let us plot a histogram, a common plot-type in data science, which shows the number of occurences of a given observation in the sample. Use Petal.Width for illustrtation purposes.
#
# Hint: Use `histtype='barstacked'` for this plot type. To plot the data, create a list of petal widths for each species which you append into one list and pass as `x` to `plt.hist()`.
# +
# YOUR CODE HERE (in your own time)
# -
# ## `Seaborn`
# Seaborn is a library for making statistical graphics in Python. It is built on top of `Matplotlib` and closely integrated with `Pandas` data structures. It provides a high-level interface for drawing attractive and informative statistical graphics. As such it is often easier to code than pure matplotlib graphs and often results in visually more appealing graphics. Plotting options are very comprehensive. An overview of plots types and styles can be found [here](https://seaborn.pydata.org/introduction.html). In this short introduction we will briefly introduce:
# - Categorical plots (barplots, countplots, boxplots, violin plots, etc. )
# - Grid plots
# - Distibution plots
# ### Categorical plots
import seaborn as sns
# We import a new dataset, which is built into seaborn
tips = sns.load_dataset("tips")
tips.head()
# #### Barplots & Countplots
# +
f, axes = plt.subplots(1, 2, sharey=True, figsize=(10, 4))
#barplots
sns.barplot(x="sex",y="total_bill",data=tips, ax=axes[0],palette="magma")
#countplots
sns.countplot(x="sex",data=tips, ax=axes[1],palette="OrRd")
plt.show()
# -
# #### Box and Whisker plots
#
# Boxplots graphically depict groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.
sns.boxplot(x="day",y="total_bill",data=tips,palette="mako")
plt.show()
# The `hue` statemtem allows for categorical splitting of the data - A useful technique
sns.boxplot(x="day",y="total_bill",data=tips,palette="rainbow", hue="sex")
plt.show()
# #### Violine plots
#
# Boxplots graphically depict groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.
sns.violinplot(x="day", y="total_bill", data=tips, palette="rainbow")
plt.show()
sns.violinplot(x="day", y="total_bill", data=tips, hue="sex",palette="magma")
plt.show()
sns.violinplot(x="day", y="total_bill", data=tips,hue='sex',
split=True,palette="rainbow")
plt.show()
# ### Distribution plots
# #### Histograms
sns.histplot(tips["total_bill"], bins=30, kde=True)
plt.show()
# #### Jointplots (multiple distributions)
#
# Jointplots are excellent tools for plotting bivariate relationships
sns.jointplot(x="total_bill",y="tip",data=tips,
kind="reg")
plt.show()
# ### <font color='green'> **Exercise**: Play around with the `kind=` parameter on a `seaborn.jointplot` and see for yourself:</font>
# +
# YOUR CODE HERE
# -
# ### Grid plots
#
# Grids plots allow for the reperesentation of multiple relationships along rows and columns. This is a great tool for uncovering hidden relationships in your data.
# #### Pairplots
tips.info()
sns.pairplot(tips, palette="magma", height=3, hue="time")
plt.show()
# ### <font color='green'> **Exercise**: Repeat some of the above `seaborn` visualizations for the iris dataset for partice. Which new reationships do you uncover? </font>
# +
# YOUR CODE HERE
# -
# ---
|
03_Workshops/DSML_WS_04_Visualization/DSML_WS_04_Visualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Telco Customer Churn: Focused customer retention programs
#
# - kaggle: https://www.kaggle.com/blastchar/telco-customer-churn
# - kaggle api: `kaggle datasets download -d blastchar/telco-customer-churn`
# - credit: https://www.kaggle.com/bandiatindra/telecom-churn-prediction
#
# + [markdown] _uuid="3544313683eb7fc71db62dea38a54a3482efec26"
# # EDA and Prediction
#
# + [markdown] _uuid="db15e9d14568dbe9d4091a9efe0dd1fd946b137a"
# Churn is a one of the biggest problem in the telecom industry. Research has shown that the average monthly churn rate among the top 4 wireless carriers in the US is 1.9% - 2%.
#
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # For creating plots
import matplotlib.ticker as mtick # For specifying the axes tick format
import matplotlib.pyplot as plt
sns.set(style="white")
# + [markdown] _uuid="72e4750b2861fe4d89197b4c73ad861544701c4f"
# **Let us read the data file in the python notebook**
#
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
telecom_cust = pd.read_csv("https://github.com/prasertcbs/basic-dataset/raw/master/Telco-Customer-Churn.csv")
# + _uuid="9dc64474af58b8114afb60f83cd1a002722ba74c"
telecom_cust.head()
# + _uuid="d814e5db6f76a4e90b8496f09d0ba340d1eae808"
telecom_cust.columns.values
# + [markdown] _uuid="16fb2729865787705742df465100419d7988837c"
# **Let's explore the data to see if there are any missing values.**
#
# + _uuid="641531c5f3131228c78e6a200e0410a161ccb2b0"
# Checking the data types of all the columns
telecom_cust.dtypes
# + _uuid="1d2c01029124a6fe73b3ec3fc4efdb66647d64e1"
# Converting Total Charges to a numerical data type.
telecom_cust.TotalCharges = pd.to_numeric(telecom_cust.TotalCharges, errors="coerce")
telecom_cust.isnull().sum()
# + [markdown] _uuid="4c997dec9dbb501333f6a1562e9092e1008df58f"
# After looking at the above output, we can say that there are 11 missing values for Total Charges. Let us replace remove these 11 rows from our data set
#
# + _uuid="35b6c18a5b84dd1e5fa014b49fcfce8ee43aaabe"
# Removing missing values
telecom_cust.dropna(inplace=True)
# Remove customer IDs from the data set
df2 = telecom_cust.iloc[:, 1:]
# Convertin the predictor variable in a binary numeric variable
df2["Churn"].replace(to_replace="Yes", value=1, inplace=True)
df2["Churn"].replace(to_replace="No", value=0, inplace=True)
# Let's convert all the categorical variables into dummy variables
df_dummies = pd.get_dummies(df2)
df_dummies.head()
# + _uuid="57fad0b9bcd9188193c84ae48ea589123532eac8"
# Get Correlation of "Churn" with other variables:
plt.figure(figsize=(15, 8))
df_dummies.corr()["Churn"].sort_values(ascending=False).plot(kind="bar")
# + [markdown] _uuid="e4f114358ce3d568a34c1ac419623274a345812e"
# Month to month contracts, absence of online security and tech support seem to be positively correlated with churn. While, tenure, two year contracts seem to be negatively correlated with churn.
#
# Interestingly, services such as Online security, streaming TV, online backup, tech support, etc. without internet connection seem to be negatively related to churn.
#
# We will explore the patterns for the above correlations below before we delve into modelling and identifying the important variables.
#
# + [markdown] _uuid="36df8a7ca36ca8afe570cc985582ef12bc1bc7f8"
# ## Data Exploration
#
# Let us first start with exploring our data set, to better understand the patterns in the data and potentially form some hypothesis. First we will look at the distribution of individual variables and then slice and dice our data for any interesting trends.
#
# + [markdown] _uuid="3755c7c8080f09fc745f8db11d9205102de6780e"
# **A.)** **_Demographics_** - Let us first understand the gender, age range, patner and dependent status of the customers
#
# + [markdown] _uuid="e98209bca3edf4f3ea0d2bdea092b152f09f6bbb"
# 1. **Gender Distribution** - About half of the customers in our data set are male while the other half are female
#
# + _uuid="d507f49a7e96b7618b57e0812eed8847543b1c64"
colors = ["#4D3425", "#E4512B"]
ax = (telecom_cust["gender"].value_counts() * 100.0 / len(telecom_cust)).plot(
kind="bar", stacked=True, rot=0, color=colors
)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.set_ylabel("% Customers")
ax.set_xlabel("Gender")
ax.set_ylabel("% Customers")
ax.set_title("Gender Distribution")
# create a list to collect the plt.patches data
totals = []
# find the values and append to list
for i in ax.patches:
totals.append(i.get_width())
# set individual bar lables using above list
total = sum(totals)
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(
i.get_x() + 0.15,
i.get_height() - 3.5,
str(round((i.get_height() / total), 1)) + "%",
fontsize=12,
color="white",
weight="bold",
)
# + [markdown] _uuid="b9cceb14a980ddcd1ac36552947f1571706e0c17"
# 2. **% Senior Citizens** - There are only 16% of the customers who are senior citizens. Thus most of our customers in the data are younger people.
#
# + _uuid="b552f24e928a6e41806ddfa10f49c331988e60e3"
ax = (
telecom_cust["SeniorCitizen"].value_counts() * 100.0 / len(telecom_cust)
).plot.pie(autopct="%.1f%%", labels=["No", "Yes"], figsize=(5, 5), fontsize=12)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.set_ylabel("Senior Citizens", fontsize=12)
ax.set_title("% of Senior Citizens", fontsize=12)
# + [markdown] _uuid="ae9ffa571dcf10f3cb0d8dc56e827cdf25fcf4e5"
# 3. **Partner and dependent status** - About 50% of the customers have a partner, while only 30% of the total customers have dependents.
#
# + _uuid="6127f191c43a00f8f0bc865c327ce0632e2eeb3e"
df2 = pd.melt(
telecom_cust, id_vars=["customerID"], value_vars=["Dependents", "Partner"]
)
df3 = df2.groupby(["variable", "value"]).count().unstack()
df3 = df3 * 100 / len(telecom_cust)
colors = ["#4D3425", "#E4512B"]
ax = df3.loc[:, "customerID"].plot.bar(
stacked=True, color=colors, figsize=(8, 6), rot=0, width=0.2
)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.set_ylabel("% Customers", size=14)
ax.set_xlabel("")
ax.set_title("% Customers with dependents and partners", size=14)
ax.legend(loc="center", prop={"size": 14})
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
ax.annotate(
"{:.0f}%".format(height),
(p.get_x() + 0.25 * width, p.get_y() + 0.4 * height),
color="white",
weight="bold",
size=14,
)
# + [markdown] _uuid="ef1d5a99fb546b501b2cb5b35d5fb437303b4c20"
# **What would be interesting is to look at the % of customers, who have partners, also have dependents. We will explore this next. **
#
# + [markdown] _uuid="56ee88a7c14911a3cd79c3b885ac738377baf7aa"
# Interestingly, among the customers who have a partner, only about half of them also have a dependent, while other half do not have any independents.
# Additionally, as expected, among the customers who do not have any partner, a majority (80%) of them do not have any dependents .
#
# + _uuid="f79c281d0cfab3cc8f14979d33834f8834ef9d2e"
colors = ["#4D3425", "#E4512B"]
partner_dependents = telecom_cust.groupby(["Partner", "Dependents"]).size().unstack()
ax = (partner_dependents.T * 100.0 / partner_dependents.T.sum()).T.plot(
kind="bar", width=0.2, stacked=True, rot=0, figsize=(8, 6), color=colors
)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.legend(loc="center", prop={"size": 14}, title="Dependents", fontsize=14)
ax.set_ylabel("% Customers", size=14)
ax.set_title(
"% Customers with/without dependents based on whether they have a partner", size=14
)
ax.xaxis.label.set_size(14)
# Code to add the data labels on the stacked bar chart
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
ax.annotate(
"{:.0f}%".format(height),
(p.get_x() + 0.25 * width, p.get_y() + 0.4 * height),
color="white",
weight="bold",
size=14,
)
# + [markdown] _uuid="3da3b87d7066955df9803b71931c5d5f7f5af1bb"
# I also looked at any differences between the % of customers with/without dependents and partners by gender. There is no difference in their distribution by gender. Additionally, there is no difference in senior citizen status by gender.
#
# + [markdown] _uuid="479b11772e5d57dbf5a812abdd07a344e876c935"
# ### B.) **Customer Account Information**: Let u now look at the tenure, contract
#
# + [markdown] _uuid="d3090f6fd85039f6c67875eadf51d59b0d3c27a8"
# **1. Tenure:** After looking at the below histogram we can see that a lot of customers have been with the telecom company for just a month, while quite a many are there for about 72 months. This could be potentially because different customers have different contracts. Thus based on the contract they are into it could be more/less easier for the customers to stay/leave the telecom company.
#
# + _uuid="2b1dca3917424690fe006b38b03b46ebf1e751d4"
ax = sns.distplot(
telecom_cust["tenure"],
hist=True,
kde=False,
bins=int(180 / 5),
color="darkblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
)
ax.set_ylabel("# of Customers")
ax.set_xlabel("Tenure (months)")
ax.set_title("# of Customers by their tenure")
# + [markdown] _uuid="ce5fccc67da0ea51ed1bf9a3451d9aeedfb9cd77"
# **2. Contracts:** To understand the above graph, lets first look at the # of customers by different contracts.
#
# + _uuid="b4589d9d041360eb2c1f33bafb61631fa3b1a29e"
ax = telecom_cust["Contract"].value_counts().plot(kind="bar", rot=0, width=0.3)
ax.set_ylabel("# of Customers")
ax.set_title("# of Customers by Contract Type")
# + [markdown] _uuid="0acd43e2b09988ba4d5c0de03cfab1c8a7d684a0"
# As we can see from this graph most of the customers are in the month to month contract. While there are equal number of customers in the 1 year and 2 year contracts.
#
# + [markdown] _uuid="cc6cac55a5511342b33fed0feaf3b9624788fd5a"
# Below we will understand the tenure of customers based on their contract type.
#
# + _uuid="1a616c81b2d4153d3470010a90f03fa25767a7ed"
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, sharey=True, figsize=(20, 6))
ax = sns.distplot(
telecom_cust[telecom_cust["Contract"] == "Month-to-month"]["tenure"],
hist=True,
kde=False,
bins=int(180 / 5),
color="turquoise",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
ax=ax1,
)
ax.set_ylabel("# of Customers")
ax.set_xlabel("Tenure (months)")
ax.set_title("Month to Month Contract")
ax = sns.distplot(
telecom_cust[telecom_cust["Contract"] == "One year"]["tenure"],
hist=True,
kde=False,
bins=int(180 / 5),
color="steelblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
ax=ax2,
)
ax.set_xlabel("Tenure (months)", size=14)
ax.set_title("One Year Contract", size=14)
ax = sns.distplot(
telecom_cust[telecom_cust["Contract"] == "Two year"]["tenure"],
hist=True,
kde=False,
bins=int(180 / 5),
color="darkblue",
hist_kws={"edgecolor": "black"},
kde_kws={"linewidth": 4},
ax=ax3,
)
ax.set_xlabel("Tenure (months)")
ax.set_title("Two Year Contract")
# + [markdown] _uuid="ee2411d340eb9c03cd45cbaafd391bf60f61653c"
# Interestingly most of the monthly contracts last for 1-2 months, while the 2 year contracts tend to last for about 70 months. This shows that the customers taking a longer contract are more loyal to the company and tend to stay with it for a longer period of time.
#
# This is also what we saw in the earlier chart on correlation with the churn rate.
#
# + [markdown] _uuid="7f912f0a732097de7c35920ea25ccf14cd05cc13"
# ### C. Let us now look at the distribution of various services used by customers
#
# + _uuid="961cdb51ddd3ccb110555b359394d823c33df66b"
telecom_cust.columns.values
# + _uuid="8503e680b82a2492bfed8f29dd2d13dafb7ed661"
services = [
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
]
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(15, 12))
for i, item in enumerate(services):
if i < 3:
ax = telecom_cust[item].value_counts().plot(kind="bar", ax=axes[i, 0], rot=0)
elif i >= 3 and i < 6:
ax = (
telecom_cust[item].value_counts().plot(kind="bar", ax=axes[i - 3, 1], rot=0)
)
elif i < 9:
ax = (
telecom_cust[item].value_counts().plot(kind="bar", ax=axes[i - 6, 2], rot=0)
)
ax.set_title(item)
# + [markdown] _uuid="fec8539f6336ca456276f67b6ffd72b8ceb3790e"
# ### D.) Now let's take a quick look at the relation between monthly and total charges
#
# + [markdown] _uuid="933ff48d2ebc4c4828a2c998b6f0428767475161"
# We will observe that the total charges increases as the monthly bill for a customer increases.
#
# + _uuid="4601f238e8fe13cbb815be90718086928a54041b"
telecom_cust[["MonthlyCharges", "TotalCharges"]].plot.scatter(
x="MonthlyCharges", y="TotalCharges"
)
# + [markdown] _uuid="ad813734756bd9cf1d298aee1d4063beef47738f"
# ### E.) Finally, let's take a look at out predictor variable (Churn) and understand its interaction with other important variables as was found out in the correlation plot.
#
# + [markdown] _uuid="67dea305467069a89007dd58a5906195ea0627d0"
# 1. Lets first look at the churn rate in our data
#
# + _uuid="4bef28080b360cbad4944abb2d4f75a440c525e9"
colors = ["#4D3425", "#E4512B"]
ax = (telecom_cust["Churn"].value_counts() * 100.0 / len(telecom_cust)).plot(
kind="bar", stacked=True, rot=0, color=colors, figsize=(8, 6)
)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.set_ylabel("% Customers", size=14)
ax.set_xlabel("Churn", size=14)
ax.set_title("Churn Rate", size=14)
# create a list to collect the plt.patches data
totals = []
# find the values and append to list
for i in ax.patches:
totals.append(i.get_width())
# set individual bar lables using above list
total = sum(totals)
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(
i.get_x() + 0.15,
i.get_height() - 4.0,
str(round((i.get_height() / total), 1)) + "%",
fontsize=12,
color="white",
weight="bold",
# size=14,
)
# + [markdown] _uuid="432a25bf5cd664b021617844ba945fce5c6f341b"
# In our data, 74% of the customers do not churn. Clearly the data is skewed as we would expect a large majority of the customers to not churn. This is important to keep in mind for our modelling as skeweness could lead to a lot of false negatives. We will see in the modelling section on how to avoid skewness in the data.
#
# + [markdown] _uuid="a9a182eb5bf7832d84072519b01f4ebc741da8a9"
# 2. Lets now explore the churn rate by tenure, seniority, contract type, monthly charges and total charges to see how it varies by these variables.
#
# + [markdown] _uuid="d6be674fd79a62315fe71699fb1164f31ed1446a"
# **i.) Churn vs Tenure**: As we can see form the below plot, the customers who do not churn, they tend to stay for a longer tenure with the telecom company.
#
# + _uuid="e7643a6a6dac23d668aa512b3a922d68ed91abc4"
sns.boxplot(x=telecom_cust.Churn, y=telecom_cust.tenure)
# + [markdown] _uuid="6ef23d7c4fddda89f7ec7c9345851ccd4bcb5089"
# **ii.) Churn by Contract Type**: Similar to what we saw in the correlation plot, the customers who have a month to month contract have a very high churn rate.
#
# + _uuid="7ddbc6ac84fe48dcea91ce2a60a6103e2a10ad9f"
colors = ["#4D3425", "#E4512B"]
contract_churn = telecom_cust.groupby(["Contract", "Churn"]).size().unstack()
ax = (contract_churn.T * 100.0 / contract_churn.T.sum()).T.plot(
kind="bar", width=0.3, stacked=True, rot=0, figsize=(10, 6), color=colors
)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.legend(loc="best", prop={"size": 14}, title="Churn")
ax.set_ylabel("% Customers", size=14)
ax.set_title("Churn by Contract Type", size=14)
# Code to add the data labels on the stacked bar chart
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
ax.annotate(
"{:.0f}%".format(height),
(p.get_x() + 0.25 * width, p.get_y() + 0.4 * height),
color="white",
weight="bold",
size=14,
)
# + [markdown] _uuid="d2975c96bb5c2ea1bcdf8c718aef1f3e8226a64c"
# **iii.) Churn by Seniority**: Senior Citizens have almost double the churn rate than younger population.
#
# + _uuid="169bc4d753a4bb1368ffe02768481e35366d75e1"
colors = ["#4D3425", "#E4512B"]
seniority_churn = telecom_cust.groupby(["SeniorCitizen", "Churn"]).size().unstack()
ax = (seniority_churn.T * 100.0 / seniority_churn.T.sum()).T.plot(
kind="bar", width=0.2, stacked=True, rot=0, figsize=(8, 6), color=colors
)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.legend(loc="center", prop={"size": 14}, title="Churn")
ax.set_ylabel("% Customers")
ax.set_title("Churn by Seniority Level", size=14)
# Code to add the data labels on the stacked bar chart
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
ax.annotate(
"{:.0f}%".format(height),
(p.get_x() + 0.25 * width, p.get_y() + 0.4 * height),
color="white",
weight="bold",
size=14,
)
# + [markdown] _uuid="bd0851a067af76d101f72fc4eb2f95bea547eda3"
# **iv.) Churn by Monthly Charges**: Higher % of customers churn when the monthly charges are high.
#
# + _uuid="fa0b5fc6c8ee5f284c0f8515c297d6765cc2b807"
ax = sns.kdeplot(
telecom_cust.MonthlyCharges[(telecom_cust["Churn"] == "No")],
color="Red",
shade=True,
)
ax = sns.kdeplot(
telecom_cust.MonthlyCharges[(telecom_cust["Churn"] == "Yes")],
ax=ax,
color="Blue",
shade=True,
)
ax.legend(["Not Churn", "Churn"], loc="upper right")
ax.set_ylabel("Density")
ax.set_xlabel("Monthly Charges")
ax.set_title("Distribution of monthly charges by churn")
# + [markdown] _uuid="f6cc05bc4205538c60a69ad00ce742118a21786d"
# **v.) Churn by Total Charges**: It seems that there is higer churn when the total charges are lower.
#
# + _uuid="73b499447ad1d100aae396cb5b7d16533f153219"
ax = sns.kdeplot(
telecom_cust.TotalCharges[(telecom_cust["Churn"] == "No")], color="Red", shade=True
)
ax = sns.kdeplot(
telecom_cust.TotalCharges[(telecom_cust["Churn"] == "Yes")],
ax=ax,
color="Blue",
shade=True,
)
ax.legend(["Not Churn", "Churn"], loc="upper right")
ax.set_ylabel("Density")
ax.set_xlabel("Total Charges")
ax.set_title("Distribution of total charges by churn")
# + [markdown] _uuid="a253e14489981fb89ac3a36849432549f442695d"
# ## After going through the above EDA we will develop some predictive models and compare them.
#
# We will develop Logistic Regression, Random Forest, SVM, ADA Boost and XG Boost
#
# + [markdown] _uuid="083086278a1073887b282a50a6399b1985224c1f"
# **1. Logistic Regression**
#
# + _uuid="7d13d788abc4ee9268338ad0539e6f8098ae9fd4"
# We will use the data frame where we had created dummy variables
y = df_dummies["Churn"].values
X = df_dummies.drop(columns=["Churn"])
# Scaling all the variables to a range of 0 to 1
from sklearn.preprocessing import MinMaxScaler
features = X.columns.values
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(X)
X = pd.DataFrame(scaler.transform(X))
X.columns = features
# + [markdown] _uuid="84cd7e26d0c3e46555b53fadefd203b4adaa0da9"
# It is important to scale the variables in logistic regression so that all of them are within a range of 0 to 1. This helped me improve the accuracy from 79.7% to 80.7%. Further, you will notice below that the importance of variables is also aligned with what we are seeing in Random Forest algorithm and the EDA we conducted above.
#
# + _uuid="32367729f8ea962bb73cae8b88b6310dd811b6de"
# Create Train & Test Data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=101
)
# + _uuid="998977bc5997ee4f9b5c45f3997523339ddc3943"
# Running logistic regression model
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
result = model.fit(X_train, y_train)
# + _uuid="f7f3d7b6cad7e9754fc7480e004269000363ec29"
from sklearn import metrics
prediction_test = model.predict(X_test)
# Print the prediction accuracy
print(metrics.accuracy_score(y_test, prediction_test))
# + _uuid="ea39ca61c3af331a45ff320add59e1aefab7da65"
# To get the weights of all the variables
weights = pd.Series(model.coef_[0], index=X.columns.values)
print(weights.sort_values(ascending=False)[:10].plot(kind="bar"))
# + _uuid="6aef934353dce495ea49e3ba48fe9ef29e7a53f0"
print(weights.sort_values(ascending=False)[-10:].plot(kind="bar"))
# + [markdown] _uuid="185b83162c70e65203bdc387d036f7d24ae8c129"
# **Observations**
#
# We can see that some variables have a negative relation to our predicted variable (Churn), while some have positive relation. Negative relation means that likeliness of churn decreases with that variable. Let us summarize some of the interesting features below:
#
# - As we saw in our EDA, having a 2 month contract reduces chances of churn. 2 month contract along with tenure have the most negative relation with Churn as predicted by logistic regressions
# - Having DSL internet service also reduces the proability of Churn
# - Lastly, total charges, monthly contracts, fibre optic internet services and seniority can lead to higher churn rates. This is interesting because although fibre optic services are faster, customers are likely to churn because of it. I think we need to explore more to better understad why this is happening.
#
# Any hypothesis on the above would be really helpful!
#
# + [markdown] _uuid="d8ee107a6c58397a1f6f073032cde2c20c76fc15"
# **2. Random Forest**
#
# + _uuid="352d2332ff650ec1e22521c46790629295bcb1c7"
from sklearn.ensemble import RandomForestClassifier
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=101
)
model_rf = RandomForestClassifier(
n_estimators=1000,
oob_score=True,
n_jobs=-1,
random_state=50,
max_features="auto",
max_leaf_nodes=30,
)
model_rf.fit(X_train, y_train)
# Make predictions
prediction_test = model_rf.predict(X_test)
print(metrics.accuracy_score(y_test, prediction_test))
# + _uuid="ad587d99c32b5998fae79beaf9b508c4345e8ad2"
importances = model_rf.feature_importances_
weights = pd.Series(importances, index=X.columns.values)
weights.sort_values()[-10:].plot(kind="barh")
# + [markdown] _uuid="8dc6a21dd39575a473ca7d7e8575bd192a313027"
# **Observations:**
#
# - From random forest algorithm, monthly contract, tenure and total charges are the most important predictor variables to predict churn.
# - The results from random forest are very similar to that of the logistic regression and in line to what we had expected from our EDA
#
# + [markdown] _uuid="d66067184c40bb775576854fcd1d2d00d1e2ada5"
# **3. Support Vecor Machine (SVM)**
#
# + _uuid="2efb7cabe5d6c1ec276becf6d036f8eea3b8d492"
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=99
)
# + _uuid="9bd8fcf307bdf2092e6b2e2cd3dfc12e82400e19"
from sklearn.svm import SVC
model.svm = SVC(kernel="linear")
model.svm.fit(X_train, y_train)
preds = model.svm.predict(X_test)
metrics.accuracy_score(y_test, preds)
# + _uuid="231b49ac18e47fa50c817b4ee1915af17c9d3cd2"
# Create the Confusion matrix
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, preds))
# + [markdown] _uuid="126ddb51cd0b24313451a303d7685c6f68335823"
# Wth SVM I was able to increase the accuracy to upto 82%. However, we need to take a deeper look at the true positive and true negative rates, including the Area Under the Curve (AUC) for a better prediction. I will explore this soon. Stay Tuned!
#
# + _uuid="9cafbb06245f6162da782bf0aa99faa3f6f9029d"
ax1 = sns.catplot(
x="gender",
kind="count",
hue="Churn",
data=telecom_cust,
estimator=lambda x: sum(x == 0) * 100.0 / len(x),
)
# ax1.yaxis.set_major_formatter(mtick.PercentFormatter())
# + [markdown] _uuid="46dcb2e6e42d4445cb046632fc70be3ecf7b4380"
# **4. ADA Boost**
#
# + _uuid="92f12399305244a20814918747d8cc2cf69f2937"
# AdaBoost Algorithm
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier()
# n_estimators = 50 (default value)
# base_estimator = DecisionTreeClassifier (default value)
model.fit(X_train, y_train)
preds = model.predict(X_test)
metrics.accuracy_score(y_test, preds)
# + [markdown] _uuid="31346f118c08fced007e555f3a7ad5ad9605a7ff"
# **5. XG Boost**
#
# + _uuid="066ba20180ea27da8c3cea15ce50aaeac9fdd3bd"
from xgboost import XGBClassifier
model = XGBClassifier()
model.fit(X_train, y_train)
preds = model.predict(X_test)
metrics.accuracy_score(y_test, preds)
# + [markdown] _uuid="1c83a1110098ada08a1906c3e94640041233d7ec"
# Interestingly with XG Boost I was able to increase the accuracy on test data to almost 83%. Clearly, XG Boost is a winner among all other techniques. XG Boost is a slow learning model and is based on the concept of Boosting
#
# -
help(XGBClassifier)
model.__dict__
|
ml/telecom-churn-prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# COMANDO PARA INSTALAR O PACOTE PANDAS VIA PIP
# CASO NÃO TENHA-O INSTALADO EM SEU COMPUTADOR
# !pip install pandas
# Importa a biblioteca pandas como pd
import pandas as pd
# # PANDAS SERIES
# atribui objetos a variável dados
dados = (['a',2,'b',4])
# usa o pandas apelidado(pd) para transformar os dados em series
s = pd.Series(dados)
# exibe na tela o resultado da operação e o seu tipo de estrutura
print(s)
print(type(s))
# # PANDAS DATAFRAME
# cria um dicionário chave-valor
#colunas [ Nome, Idade ] e respectivos valores
data = {'Nome':['Marcos','Paulo','Rita','Julia'],
'Idade':[28,34,29,42]
}
# usa a função DataFrame do pandas
# atribui o objeto df ao resultado da operação
df = pd.DataFrame(data)
# exibe o DataFrame
display(df)
|
DATA_PRE_PROCESSING/Python-Pandas .ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# # Combination of p-values
# The `MultipleTesting` package offers several established methods for combining independent p-values. This becomes relevant when testing a global hypothesis: As an example, we can think of a meta analysis based on a number of studies where each study gives us a single p-value pᵢ. Using a method for the combination of all p-values pᵢ, we can derive a global p-value p̃.
# ## Setting the scene
using MultipleTesting
using Gadfly
# We can combine an arbitrarily large number of p-values, but let us start with the simplest case for our exploration: The combination of two p-values p₁ and p₂.
p_values = 0.01:0.01:0.99;
# For each pair of p₁ and p₂, we compute a global p-value p̃ using different p-value combination methods of the package.
function combine_two_pvalues(p_values, combination)
pc = [combine([p1, p2], combination) for p1 in p_values, p2 in p_values]
end
# Looking at contour plots that show us the combined p-value p̃ depending on the two inputs p₁ and p₂, we can explore the properties and the differences of the various methods.
function plot_combined_pvalues_contour(pc, p_values, title)
p = Gadfly.plot(z = pc, x = collect(p_values), y = collect(p_values),
Geom.contour(levels = collect(0.1:0.1:1.0)),
Guide.xticks(ticks = collect(0:0.2:1)), Guide.yticks(ticks = collect(0:0.2:1)),
Guide.title(title),
Guide.xlabel("p₁"), Guide.ylabel("p₂"),
Guide.colorkey(title="p̃"),
Coord.cartesian(fixed = true))
f = draw(SVG(13cm, 13cm), p)
return f
end
# ## Analysing different combination methods
# ### Fisher combination
pc = combine_two_pvalues(p_values, Fisher())
plot_combined_pvalues_contour(pc, p_values, "Fisher combination")
# ### Tippett combination
pc = combine_two_pvalues(p_values, Tippett())
plot_combined_pvalues_contour(pc, p_values, "Tippett combination")
# ### Simes combination
pc = combine_two_pvalues(p_values, Simes())
plot_combined_pvalues_contour(pc, p_values, "Simes Combination")
# ### Stouffer combination
pc = combine_two_pvalues(p_values, Stouffer())
plot_combined_pvalues_contour(pc, p_values, "Stouffer Combination")
# ### Logit combination
pc = combine_two_pvalues(p_values, Logit())
plot_combined_pvalues_contour(pc, p_values, "Logit Combination")
# ### Wilkinson combination
pc = combine_two_pvalues(p_values, Wilkinson(1))
plot_combined_pvalues_contour(pc, p_values, "Wilkinson combination with rank=1")
pc = combine_two_pvalues(p_values, Wilkinson(2))
plot_combined_pvalues_contour(pc, p_values, "Wilkinson combination with rank=2")
# ### Minimum of adjusted p-values combination
pc = combine_two_pvalues(p_values, Minimum(BenjaminiHochberg()))
plot_combined_pvalues_contour(pc, p_values, "Minimum of Benjamini-Hochberg adjusted p-values combination")
pc = combine_two_pvalues(p_values, Minimum(Bonferroni()))
plot_combined_pvalues_contour(pc, p_values, "Minimum of Bonferroni adjusted p-values combination")
|
docs/notebooks/p-values-combination.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://raw.githubusercontent.com/Qiskit/qiskit-tutorials/master/images/qiskit-heading.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# ## _*Let's Make a Deal*_
#
# The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.
#
# For more information about how to use the IBM Q experience (QX), consult the [tutorials](https://quantumexperience.ng.bluemix.net/qstage/#/tutorial?sectionId=c59b3710b928891a1420190148a72cce&pageIndex=0), or check out the [community](https://quantumexperience.ng.bluemix.net/qstage/#/community).
#
# ***
# ## Contributors
#
# <NAME>, Université Libre de Bruxelles
# +
# useful additional packages
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import time
from pprint import pprint
# importing Qiskit
from qiskit import Aer, IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
# -
IBMQ.load_accounts()
# ## Introduction
# The Monty Hall problem, named after the original host of the television show "Let's Make a Deal" is well known. The game master asks the player to designate between three doors the one behind which a valuable prize has been hidden, such as a luxury car. Goats are hidden behind the other two doors. When the player has issued a preference, the game master opens one of the two remaining doors and one of the goats appears. The player then has the opportunity to choose the closed door remaining instead of the door chosen in first intention.
#
# Is it wise, indifferent or unwise to stick with one's first choice, whatever it may have been? Much has been written on about the optimal strategy because the actual solution is counter-intuitive.
#
# This tutorial is not intended to illustrate one of the many quantum models proposed on this subject and including many variations. It simply describes a model of the original game that can be played many times by players to convince them of the validity of the optimal strategic solution. This kind of simulation was proposed using conventional hardware, like a shell game, a card deck or a programmed pseudo random number generator.
#
# The present game uses the $ |W_{3} \rangle$ creator circuit described in the tutorial "W State 1 : Multi-qubit systems" as a _*true*_$^1$ random number generator.
#
# The following state is created and measured: $ |W_{3} \rangle = \frac{1}{\sqrt{3}} \: (|1 0 0 \rangle \: + |0 1 0 \rangle\: + |0 0 1\rangle) $
#
# Each of the three qubits used represents one of the doors. The car is hidden behind the one corresponding to the qubit measured as 1 (excited) during the measurement.
#
# With the help of Hadamard gates, a second true random number generator uses the two-qubit state:
#
# $$ H^{\otimes 2}|0_{a}0_{b}\rangle=|+_{a}\rangle \:|+_{b}\rangle=\frac{|0_{a}\rangle|0_{b}\rangle+|0_{a}\rangle|1_{b}\rangle+|1_{a}\rangle|0_{b}\rangle+|1_{a}\rangle|1_{b}\rangle}{2}$$
#
# From the binary value of the measurement $c_{a} c_{b}$, the following quantity represents the result of a coin flipping:
# $c_{a} \oplus c_{b}$. This two-qubit model of true$^1$ random generator was chosen because bias is minimized on the real device.
#
# This result is used to determine which of the two doors hiding a goat is opened each time the player has chosen the door that hides the car. This phase is obviously not necessary when the player has chosen a door hiding a goat: the game master can not open the door hiding a car, nor the door chosen by the player$^2$.
#
# $^1$ If used on the simulator, it remains a pseudo random number generator.
#
# $^2$ This is a hint for finding the optimal strategy: how many times on average is the game master exempt from tossing a coin?
# ## It's time to play!
# You may have noticed that the optimal solution has not been hitherto explicitly given. Chances are you already know the answer, and what follows will only comfort you in your belief. For those who are not familiar with this problem, the suspense is preserved and for those who still doubt, this is an opportunity to review your opinion. Play the game a sufficient number of times. Even if you only rely on your intuition for each game, you can use your own success statistics to figure out what is the best strategy.
#
# You will first be asked to choose between the simulator (a good choice to start) or a real device.
# +
"Choice of the backend"
# local qasm simulator
backend = Aer.get_backend('qasm_simulator')
# The flag_qx2 must be "True" for using the ibmqx2.
# "True" is also better when using the simulator
# Use the IBM Quantum Experience
#backend = least_busy(IBMQ.backends(filters=lambda x: not x.configuration().simulator)) #
flag_qx2 = True
if backend == 'ibmqx4':
flag_qx2 = False
print("Your choice for the backend is: ", backend.name(), "flag_qx2 is: ", flag_qx2)
# -
# Here, two useful routine
# Define a F_gate
def F_gate(circ,q,i,j,n,k) :
theta = np.arccos(np.sqrt(1/(n-k+1)))
circ.ry(-theta,q[j])
circ.cz(q[i],q[j])
circ.ry(theta,q[j])
circ.barrier(q[i])
# Define the cxrv gate which uses reverse CNOT instead of CNOT
def cxrv(circ,q,i,j) :
circ.h(q[i])
circ.h(q[j])
circ.cx(q[j],q[i])
circ.h(q[i])
circ.h(q[j])
circ.barrier(q[i],q[j])
# +
# 3-qubit W state
q = QuantumRegister(5)
c = ClassicalRegister(5)
W_states = QuantumCircuit(q,c)
W_states.x(q[2]) #start is |100>
F_gate(W_states,q,2,1,3,1) # Applying F12
F_gate(W_states,q,1,0,3,2) # Applying F23
if flag_qx2 : # option ibmqx2
W_states.cx(q[1],q[2]) # cNOT 21
W_states.cx(q[0],q[1]) # cNOT 32
else : # option ibmqx4
cxrv(W_states,q,1,2)
cxrv(W_states,q,0,1)
# Coin tossing
W_states.h(q[3])
W_states.h(q[4])
for i in range(5) :
W_states.measure(q[i] , c[i])
# +
"Dotted alphabet"
top_bottom = "███████████████"
blank = "█ █"
chosen = []
chosen = chosen + ["███████████████"]
chosen = chosen + ["███████████ ██"]
chosen = chosen + ["██████████ ███"]
chosen = chosen + ["█████████ ████"]
chosen = chosen + ["████████ █████"]
chosen = chosen + ["█ ████ ██████"]
chosen = chosen + ["██ ██ ███████"]
chosen = chosen + ["███ ████████"]
chosen = chosen + ["████ █████████"]
chosen = chosen + ["███████████████"]
here_left = []
here_left = here_left + ["███████████████"]
here_left = here_left + ["███████████████"]
here_left = here_left + ["███ █████████"]
here_left = here_left + ["███ █████████"]
here_left = here_left + ["███ █████████"]
here_left = here_left + ["███ █████████"]
here_left = here_left + ["███ █████████"]
here_left = here_left + ["███ ████"]
here_left = here_left + ["███████████████"]
here_left = here_left + ["███████████████"]
here_center = []
here_center = here_center + ["███████████████"]
here_center = here_center + ["███████████████"]
here_center = here_center + ["█████ ████"]
here_center = here_center + ["███ █████████"]
here_center = here_center + ["███ █████████"]
here_center = here_center + ["███ █████████"]
here_center = here_center + ["███ █████████"]
here_center = here_center + ["█████ ████"]
here_center = here_center + ["███████████████"]
here_center = here_center + ["███████████████"]
here_right = []
here_right = here_right + ["███████████████"]
here_right = here_right + ["███████████████"]
here_right = here_right + ["███ █████"]
here_right = here_right + ["███ ███ ███"]
here_right = here_right + ["███ ███ ███"]
here_right = here_right + ["███ █████"]
here_right = here_right + ["███ ██ ████"]
here_right = here_right + ["███ ███ ███"]
here_right = here_right + ["███████████████"]
here_right = here_right + ["███████████████"]
goa=["█ █","█ ( ) █","█ ( ) █","█ / O O \ █","█ )|( █","█ @ █","█ = █","█ Y █","█ █"]
car=["█ █","█ _______ █","█ / \ █","█ ° _______ ° █","█ / \ █","█ (O) ### (O) █","█ =+=====+= █","█ || || █","█ █"]
# -
"(RE)INITIATES STATISTICS"
nb_randomnb = 0
nb_left = 0
nb_center = 0
nb_right = 0
nb_switches = 0
nb_stays = 0
nb_won_switching = 0
nb_won_sticking = 0
nb_games = 0
n_won = 0
"HERE START THE GAME"
"Hiding the car and the two goats behind the three doors"
Label = ["left", "central", "right"]
shots = 1
repeat = "Y"
while repeat == "Y":
nb_of_cars = 4
while nb_of_cars != 1:
result = execute(W_states, backend=backend, shots=shots)
c5str = str(result.result().get_counts(W_states))
nb_of_cars = int(c5str[4]) + int(c5str[5]) + int(c5str[6])
#this is for checking results from the real computer:
if nb_of_cars == 0:
print("They managed to hide three goats and no car behind the doors! Restarting the hiding process...")
if nb_of_cars >= 2:
print("They managed to hide", nb_of_cars, "cars behind the doors! Restarting the hiding process...")
print(top_bottom," ",top_bottom," ",top_bottom)
for i in range(9):
print(here_left[i]," ",here_center[i]," ",here_right[i])
print(top_bottom," ",top_bottom," ",top_bottom,"\n")
door = input("Game master: Your choice? letter l: left door, c: central door, r: right door + enter\n").upper()
picl = here_left
picc = here_center
picr = here_right
if (door == "L"):
Doorchosen = 1
nb_left = nb_left + 1
picl = chosen
else:
if (door == "C"):
Doorchosen = 2
nb_center = nb_center + 1
picc=chosen
else:
Doorchosen = 3
nb_right = nb_right + 1
picr = chosen
print('Game master: Your choice was the',Label[Doorchosen-1], "door")
"AN OPPORTUNITY TO CHANGE YOUR MIND"
c5str = str(result.result().get_counts(W_states))
randomnb = (int(c5str[2]) + int(c5str[3])) %2
if c5str[4] == "1": #car behind left door
Doorwinning = 1
if Doorchosen == 1:
Dooropen = 2 + randomnb
Doorswitch = 3 - randomnb
if Doorchosen == 2:
Dooropen = 3
Doorswitch = 1
if Doorchosen == 3:
Dooropen = 2
Doorswitch = 1
if c5str[5] == "1": #car behind central door
Doorwinning = 2
if Doorchosen == 2:
Dooropen = 1 + 2*randomnb
Doorswitch = 3 - 2*randomnb
if Doorchosen == 1:
Dooropen = 3
Doorswitch = 2
if Doorchosen == 3:
Dooropen = 1
Doorswitch = 2
if c5str[6] == "1": #car behind right door
Doorwinning = 3
if Doorchosen == 3:
Dooropen = randomnb + 1
Doorswitch = 2 - randomnb
if Doorchosen == 1:
Dooropen = 2
Doorswitch = 3
if Doorchosen == 2:
Dooropen = 1
Doorswitch = 3
if Dooropen == 1:
picl = goa
if Dooropen == 2:
picc = goa
if Dooropen == 3:
picr = goa
print(top_bottom," ",top_bottom," ",top_bottom)
for i in range(9):
print(picl[i]," ",picc[i]," ",picr[i])
print(top_bottom," ",top_bottom," ",top_bottom,"\n")
print('I opened the', Label[Dooropen-1], 'door and you see a goat')
print('You get now an opportunity to change your choice!')
print("Do you want to switch for the ",Label[Doorswitch-1], " door?")
I_switch = input(" Answer by (y/n) + enter\n").upper()
if (I_switch == "Y"):
Doorfinal = Doorswitch
else:
Doorfinal = Doorchosen
"FINAL ANNOUNCE"
if Doorfinal == Doorwinning:
if Doorfinal == 1:
picl = car
if Doorfinal == 2:
picc = car
if Doorfinal == 3:
picr = car
endmessage = 'won the car! Congratulations!'
else:
if Doorfinal == 1:
picl = goa
if Doorfinal == 2:
picc = goa
if Doorfinal == 3:
picr = goa
endmessage = 'won a goat! Sorry!'
print(top_bottom," ",top_bottom," ",top_bottom)
for i in range(9):
print(picl[i]," ",picc[i]," ",picr[i])
print(top_bottom," ",top_bottom," ",top_bottom,"\n")
print('Game master: You opened the',Label[Doorfinal-1],'door and', endmessage)
"STATISTICS"
nb_games = nb_games + 1
if Doorfinal == Doorswitch:
nb_switches = nb_switches +1
if c5str[Doorfinal+3] == "1":
nb_won_switching = nb_won_switching + 1
else:
nb_stays = nb_stays+1
if c5str[Doorfinal+3] == "1":
nb_won_sticking = nb_won_sticking + 1
n_won = nb_won_switching + nb_won_sticking
print()
print("YOUR STATS")
print("nb of games: ", nb_games," total nb won:", n_won, " first choice: left",nb_left," center", nb_center,"right", nb_right)
print("nb sticking: ",nb_stays," nb won when sticking: ",nb_won_sticking,"nb switching:",nb_switches," nb won when switching:",nb_won_switching)
repeat = input("Another game? Answer by (y/n) + enter\n").upper()
print("Game over")
|
awards/teach_me_qiskit_2018/w_state/W State 2 - Let's Make a Deal.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--NOTEBOOK_HEADER-->
# *This notebook contains material from [Controlling Natural Watersheds](https://jckantor.github.io/Controlling-Natural-Watersheds);
# content is available [on Github](https://github.com/jckantor/Controlling-Natural-Watersheds.git).*
# <!--NAVIGATION-->
# < [ENSO](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.13-ENSO.ipynb) | [Contents](toc.ipynb) | [Solar Cycle](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/B.01-Solar_Cycle.ipynb) ><p><a href="https://colab.research.google.com/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/B.00-Projects.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://raw.githubusercontent.com/jckantor/Controlling-Natural-Watersheds/master/notebooks/B.00-Projects.ipynb"><img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
# # Projects
# * Extend model downstream to include Manitou, Big Fork, and Little Fork. The goal is to incorporate downstream effects of rule curve control, and to get additional sensors in the feedback loop.
#
# * Implement optimization model for the construction of rule curves.
#
# * Develop CUSUM style approach to improve control margins 'controllability'
#
# * Run cross correlation analysis of Basswood and <NAME> Croix
#
# * Stochastic model for total inflows to Namakan and
#
#
#
# Things we learned from the study board ....
#
# 1. The rule curves could include 'adaptive characteristics', but the scope of the project would not include establishment of a full time decision making board to implement the rule curves. The dam operators will be responsible for implementing the rule curves.
#
# 2. The study board is pursing an additional phase of analysis to understand the impact of modifying the channel in Rainy River above the dam. This will include additional bathymetry.
#
# 3. Jeff
# <!--NAVIGATION-->
# < [ENSO](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/A.13-ENSO.ipynb) | [Contents](toc.ipynb) | [Solar Cycle](http://nbviewer.jupyter.org/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/B.01-Solar_Cycle.ipynb) ><p><a href="https://colab.research.google.com/github/jckantor/Controlling-Natural-Watersheds/blob/master/notebooks/B.00-Projects.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://raw.githubusercontent.com/jckantor/Controlling-Natural-Watersheds/master/notebooks/B.00-Projects.ipynb"><img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
|
notebooks/B.00-Projects.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3
# name: python3
# ---
# + [markdown] id="view-in-github"
# <a href="https://colab.research.google.com/github/apache/beam/blob/master/examples/notebooks/documentation/transforms/python/elementwise/pardo-py.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
# + [markdown] id="view-the-docs-top"
# <table align="left"><td><a target="_blank" href="https://beam.apache.org/documentation/transforms/python/elementwise/pardo"><img src="https://beam.apache.org/images/logos/full-color/name-bottom/beam-logo-full-color-name-bottom-100.png" width="32" height="32" />View the docs</a></td></table>
# + cellView="form" id="_-code"
#@title Licensed under the Apache License, Version 2.0 (the "License")
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# + [markdown] id="pardo"
# # ParDo
#
# <script type="text/javascript">
# localStorage.setItem('language', 'language-py')
# </script>
#
# <table align="left" style="margin-right:1em">
# <td>
# <a class="button" target="_blank" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.ParDo"><img src="https://beam.apache.org/images/logos/sdks/python.png" width="32px" height="32px" alt="Pydoc"/> Pydoc</a>
# </td>
# </table>
#
# <br/><br/><br/>
#
# A transform for generic parallel processing.
# A `ParDo` transform considers each element in the input `PCollection`,
# performs some processing function (your user code) on that element,
# and emits zero or more elements to an output `PCollection`.
#
# See more information in the
# [Beam Programming Guide](https://beam.apache.org/documentation/programming-guide/#pardo).
# + [markdown] id="setup"
# ## Setup
#
# To run a code cell, you can click the **Run cell** button at the top left of the cell,
# or select it and press **`Shift+Enter`**.
# Try modifying a code cell and re-running it to see what happens.
#
# > To learn more about Colab, see
# > [Welcome to Colaboratory!](https://colab.sandbox.google.com/notebooks/welcome.ipynb).
#
# First, let's install the `apache-beam` module.
# + id="setup-code"
# !pip install --quiet -U apache-beam
# + [markdown] id="examples"
# ## Examples
#
# In the following examples, we explore how to create custom `DoFn`s and access
# the timestamp and windowing information.
# + [markdown] id="example-1-pardo-with-a-simple-dofn"
# ### Example 1: ParDo with a simple DoFn
#
# The following example defines a simple `DoFn` class called `SplitWords`
# which stores the `delimiter` as an object field.
# The `process` method is called once per element,
# and it can yield zero or more output elements.
# + id="example-1-pardo-with-a-simple-dofn-code"
import apache_beam as beam
class SplitWords(beam.DoFn):
def __init__(self, delimiter=','):
self.delimiter = delimiter
def process(self, text):
for word in text.split(self.delimiter):
yield word
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.ParDo(SplitWords(','))
| beam.Map(print))
# + [markdown] id="example-1-pardo-with-a-simple-dofn-2"
# <table align="left" style="margin-right:1em">
# <td>
# <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/elementwise/pardo.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
# </td>
# </table>
#
# <br/><br/><br/>
# + [markdown] id="example-2-pardo-with-timestamp-and-window-information"
# ### Example 2: ParDo with timestamp and window information
#
# In this example, we add new parameters to the `process` method to bind parameter values at runtime.
#
# * [`beam.DoFn.TimestampParam`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.TimestampParam)
# binds the timestamp information as an
# [`apache_beam.utils.timestamp.Timestamp`](https://beam.apache.org/releases/pydoc/current/apache_beam.utils.timestamp.html#apache_beam.utils.timestamp.Timestamp)
# object.
# * [`beam.DoFn.WindowParam`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.WindowParam)
# binds the window information as the appropriate
# [`apache_beam.transforms.window.*Window`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.window.html)
# object.
# + id="example-2-pardo-with-timestamp-and-window-information-code"
import apache_beam as beam
class AnalyzeElement(beam.DoFn):
def process(
self,
elem,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam):
yield '\n'.join([
'# timestamp',
'type(timestamp) -> ' + repr(type(timestamp)),
'timestamp.micros -> ' + repr(timestamp.micros),
'timestamp.to_rfc3339() -> ' + repr(timestamp.to_rfc3339()),
'timestamp.to_utc_datetime() -> ' + repr(timestamp.to_utc_datetime()),
'',
'# window',
'type(window) -> ' + repr(type(window)),
'window.start -> {} ({})'.format(
window.start, window.start.to_utc_datetime()),
'window.end -> {} ({})'.format(
window.end, window.end.to_utc_datetime()),
'window.max_timestamp() -> {} ({})'.format(
window.max_timestamp(), window.max_timestamp().to_utc_datetime()),
])
with beam.Pipeline() as pipeline:
dofn_params = (
pipeline
| 'Create a single test element' >> beam.Create([':)'])
| 'Add timestamp (Spring equinox 2020)' >>
beam.Map(lambda elem: beam.window.TimestampedValue(elem, 1584675660))
|
'Fixed 30sec windows' >> beam.WindowInto(beam.window.FixedWindows(30))
| 'Analyze element' >> beam.ParDo(AnalyzeElement())
| beam.Map(print))
# + [markdown] id="example-2-pardo-with-timestamp-and-window-information-2"
# <table align="left" style="margin-right:1em">
# <td>
# <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/elementwise/pardo.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
# </td>
# </table>
#
# <br/><br/><br/>
# + [markdown] id="example-3-pardo-with-dofn-methods"
# ### Example 3: ParDo with DoFn methods
#
# A [`DoFn`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn)
# can be customized with a number of methods that can help create more complex behaviors.
# You can customize what a worker does when it starts and shuts down with `setup` and `teardown`.
# You can also customize what to do when a
# [*bundle of elements*](https://beam.apache.org/documentation/runtime/model/#bundling-and-persistence)
# starts and finishes with `start_bundle` and `finish_bundle`.
#
# * [`DoFn.setup()`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.setup):
# Called *once per `DoFn` instance* when the `DoFn` instance is initialized.
# `setup` need not to be cached, so it could be called more than once per worker.
# This is a good place to connect to database instances, open network connections or other resources.
#
# * [`DoFn.start_bundle()`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.start_bundle):
# Called *once per bundle of elements* before calling `process` on the first element of the bundle.
# This is a good place to start keeping track of the bundle elements.
#
# * [**`DoFn.process(element, *args, **kwargs)`**](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.process):
# Called *once per element*, can *yield zero or more elements*.
# Additional `*args` or `**kwargs` can be passed through
# [`beam.ParDo()`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.ParDo).
# **[required]**
#
# * [`DoFn.finish_bundle()`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.finish_bundle):
# Called *once per bundle of elements* after calling `process` after the last element of the bundle,
# can *yield zero or more elements*. This is a good place to do batch calls on a bundle of elements,
# such as running a database query.
#
# For example, you can initialize a batch in `start_bundle`,
# add elements to the batch in `process` instead of yielding them,
# then running a batch query on those elements on `finish_bundle`, and yielding all the results.
#
# Note that yielded elements from `finish_bundle` must be of the type
# [`apache_beam.utils.windowed_value.WindowedValue`](https://github.com/apache/beam/blob/master/sdks/python/apache_beam/utils/windowed_value.py).
# You need to provide a timestamp as a unix timestamp, which you can get from the last processed element.
# You also need to provide a window, which you can get from the last processed element like in the example below.
#
# * [`DoFn.teardown()`](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.DoFn.teardown):
# Called *once (as a best effort) per `DoFn` instance* when the `DoFn` instance is shutting down.
# This is a good place to close database instances, close network connections or other resources.
#
# Note that `teardown` is called as a *best effort* and is *not guaranteed*.
# For example, if the worker crashes, `teardown` might not be called.
# + id="example-3-pardo-with-dofn-methods-code"
import apache_beam as beam
class DoFnMethods(beam.DoFn):
def __init__(self):
print('__init__')
self.window = beam.window.GlobalWindow()
def setup(self):
print('setup')
def start_bundle(self):
print('start_bundle')
def process(self, element, window=beam.DoFn.WindowParam):
self.window = window
yield '* process: ' + element
def finish_bundle(self):
yield beam.utils.windowed_value.WindowedValue(
value='* finish_bundle: 🌱🌳🌍',
timestamp=0,
windows=[self.window],
)
def teardown(self):
print('teardown')
with beam.Pipeline() as pipeline:
results = (
pipeline
| 'Create inputs' >> beam.Create(['🍓', '🥕', '🍆', '🍅', '🥔'])
| 'DoFn methods' >> beam.ParDo(DoFnMethods())
| beam.Map(print))
# + [markdown] id="example-3-pardo-with-dofn-methods-2"
# <table align="left" style="margin-right:1em">
# <td>
# <a class="button" target="_blank" href="https://github.com/apache/beam/blob/master/sdks/python/apache_beam/examples/snippets/transforms/elementwise/pardo.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" width="32px" height="32px" alt="View source code"/> View source code</a>
# </td>
# </table>
#
# <br/><br/><br/>
#
# > *Known issues:*
# >
# > * [[BEAM-7885]](https://issues.apache.org/jira/browse/BEAM-7885)
# > `DoFn.setup()` doesn't run for streaming jobs running in the `DirectRunner`.
# > * [[BEAM-7340]](https://issues.apache.org/jira/browse/BEAM-7340)
# > `DoFn.teardown()` metrics are lost.
# + [markdown] id="related-transforms"
# ## Related transforms
#
# * [Map](https://beam.apache.org/documentation/transforms/python/elementwise/map) behaves the same, but produces exactly one output for each input.
# * [FlatMap](https://beam.apache.org/documentation/transforms/python/elementwise/flatmap) behaves the same as `Map`,
# but for each input it may produce zero or more outputs.
# * [Filter](https://beam.apache.org/documentation/transforms/python/elementwise/filter) is useful if the function is just
# deciding whether to output an element or not.
#
# <table align="left" style="margin-right:1em">
# <td>
# <a class="button" target="_blank" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.core.html#apache_beam.transforms.core.ParDo"><img src="https://beam.apache.org/images/logos/sdks/python.png" width="32px" height="32px" alt="Pydoc"/> Pydoc</a>
# </td>
# </table>
#
# <br/><br/><br/>
# + [markdown] id="view-the-docs-bottom"
# <table align="left"><td><a target="_blank" href="https://beam.apache.org/documentation/transforms/python/elementwise/pardo"><img src="https://beam.apache.org/images/logos/full-color/name-bottom/beam-logo-full-color-name-bottom-100.png" width="32" height="32" />View the docs</a></td></table>
|
examples/notebooks/documentation/transforms/python/elementwise/pardo-py.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="6t25Q4DM9ZFL" executionInfo={"status": "ok", "timestamp": 1646297374778, "user_tz": -540, "elapsed": 5216, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} colab={"base_uri": "https://localhost:8080/"} outputId="c4d71029-a8c0-4c9b-e041-b68ef8c9102b"
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split,cross_validate
import pandas as pd
df = pd.read_excel('/content/Test_dataset.xlsx')
print(df)
# + id="_uSV80DaIBn3"
df.set_index('Date', inplace=True)
print(df)
Dataset = df[['MA5_f', 'Target', '1)철광석-중국', '4)철스크랩-터키수입가', '5)WTI', '5)BSI-해운지수', '2)중국-Flat재고', '3)중국-판재+롱재고', '4)중국-석탄내수', 'Quater',
'1)열연-미국', '1)열연-뭄바이', '1)열연-북유럽', '1)열연-한국', '1)GI-상해', '1)GI-미국']]
Dataset = Dataset.dropna(axis=0) # 데이터 없는거 지우기 (앞부분)
# + colab={"base_uri": "https://localhost:8080/"} id="EJTOWOU8-2VA" executionInfo={"status": "ok", "timestamp": 1646297591113, "user_tz": -540, "elapsed": 3, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="b3db1c9b-46ee-4773-8df7-cd4988325f02"
print(Dataset)
Dataset.info()
# + id="QrF3GNiTgnXD"
# df.info()
# split_time = 420
Y_colname = ['MA5_f']
# X_remove = ['datetime', 'DateTime', 'temp_group', 'casual', 'registered']
# X_colname = [x for x in raw_fe.columns if x not in Y_colname+X_remove]
X_colname = ['Target', '1)철광석-중국', '4)철스크랩-터키수입가', '5)WTI', '5)BSI-해운지수', '2)중국-Flat재고', '3)중국-판재+롱재고', '4)중국-석탄내수', 'Quater',
'1)열연-미국', '1)열연-뭄바이', '1)열연-북유럽', '1)열연-한국', '1)GI-상해', '1)GI-미국']
# + colab={"base_uri": "https://localhost:8080/", "height": 783} id="1cwlMdYUidFf" executionInfo={"status": "ok", "timestamp": 1646297890966, "user_tz": -540, "elapsed": 6, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="37960b23-93dc-40f1-ba3c-ddb7eec990f5"
split_time = 437 # 547의 80% 수준
# 1) 평균가격 만으로 예측한 것 - MA5_f
x_train = Dataset.MA5_f[:split_time]
x_valid = Dataset.MA5_f[split_time:]
plt.figure(figsize=(10, 6))
x_train.plot()
x_today = Dataset.Target[split_time:] #당일 가격
plt.figure(figsize=(10, 6))
x_valid.plot()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 877} id="NygGIOxPKhzr" executionInfo={"status": "ok", "timestamp": 1646297903696, "user_tz": -540, "elapsed": 739, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="0e772e9b-2415-4d84-e1f9-4520e0aacb82"
# Naive Forecasting - 1차버전 : "이번주 평균가격 = 다음주 평균가격" 가정시
#naive_forecast = df.MA5_f[split_time - 1:-1]
naive_forecast = x_valid.shift(1)
plt.figure(figsize=(14, 6))
#naive_forecast.plot()
print(x_valid)
print(naive_forecast)
x_valid.plot(label='x_valid')
#plt.show()
naive_forecast.plot(label='Naive')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 81} id="FlTsnVbeNh1v" executionInfo={"status": "ok", "timestamp": 1646297911438, "user_tz": -540, "elapsed": 557, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="ad9a94a6-63db-468a-fdc3-c84aa9ebf11a"
from sklearn.metrics import mean_absolute_error, mean_squared_error
def MAPE(y_test, y_pred):
return np.mean(np.abs((y_test - y_pred) / y_test)) * 100
MAE = mean_absolute_error(x_valid[1:], naive_forecast[1:])
MSE = mean_squared_error(x_valid[1:], naive_forecast[1:])
MAPE = MAPE(x_valid[1:], naive_forecast[1:])
print("Naive Forecasting - 1차버전 : 이번주 평균가격 = 다음주 평균가격 가정시\n")
pd.options.display.float_format = '{:,.2f}'.format
display(pd.DataFrame([MAE, MSE, MAPE], index=['MAE', 'MSE', 'MAPE'], columns=['Score']).T)
# + colab={"base_uri": "https://localhost:8080/", "height": 941} id="Im2EcD4zfplL" executionInfo={"status": "ok", "timestamp": 1646297928489, "user_tz": -540, "elapsed": 762, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="c4f93699-d26a-426b-b89d-b1ef84e2da5c"
# Naive Forecasting 2차 버전 : 현재가격 = 1주일 후 평균가격 가정
plt.figure(figsize=(14, 6))
#naive_forecast.plot()
print(x_valid)
print(x_today)
x_valid.plot(label='x_valid')
#plt.show()
x_today.plot(label='x_today')
#plot_series(x_valid.index, x_valid)
#plot_series(naive_forecast.index, naive_forecast)
plt.legend()
plt.show()
from sklearn.metrics import mean_absolute_error, mean_squared_error
def MAPE(y_test, y_pred):
return np.mean(np.abs((y_test - y_pred) / y_test)) * 100
MAE = mean_absolute_error(x_valid, x_today)
MSE = mean_squared_error(x_valid, x_today)
MAPE = MAPE(x_valid, x_today)
print("Naive Forecasting - 1차버전 : 현재가격 = 1주일 후 평균가격 가정시\n")
pd.options.display.float_format = '{:,.2f}'.format
display(pd.DataFrame([MAE, MSE, MAPE], index=['MAE', 'MSE', 'MAPE'], columns=['Score']).T)
# + id="nKOMh9KojIPG"
|
2022-03-21_Steel_REG_BL_Naive_1_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# # En esta lectura aprenderemos acerca de los diferentes tipos de numeros, sus operadores y como usarlos:
#
# ### 1.) Tipos de datos en python
# ### 2.) Aritmetica basica
# ### 3.) Operadores de asignacion
# ### 4.) Jerarquia
#
# ## En python existen principalmente dos tipos de numeros, enteros (ints) y decimales (floats)
#Entero
5
#flotante
5.0
parametro1= "hola"
parametro2= "hola2"
parametro3 = 5
#keyword ejemplo: end, sep
print(prametro1,parametro2,parametro3,end="(fin)")
# Los numeros que son muy grandes como un millon, 1,000,000 suelen llevar comas, esto en python no se pueden usar pero podemos hacer uso de **( _ )** para ser mas ordenados
#un millon
1_000_000
# Las operaciones basicas en python son las siguientes
#suma
2 + 1
#resta
2 - 1
#multiplicacion
3 * 3
#division
2/5
# Para numeros complejos es de la siguiente manera:
#(parte real + parte imaginaria)
2 + 3j + 6 - 2j
# Existen otras operaciones como la raiz cuadrada :
#raiz cuadrada
4**0.5
# Las expresiones matematicas se resuelven de izquierda a derecha **(left sided binding)** respetando al jerarquia de las operaciones . Las expresiones como **$a^{n^{m}}$** son un caso especial pues se ejecutan de derecha a izquierda, primero ${n^{m}}$ y posteriormente el resultado se usa como exponente para **a**. A esta regla se le conoce como **right sided binding**
2**3**2
#3**2
#2**9
(2**3)**2
# Podemos agrupar terminos para establecer cual se ejecutara primero, ejemplo: $ (a^n)^{m} $
#a = 2, n= 3, m= 4
2**3**4
# Podemos ver como se ejecuta el orden de las operaciones
2 + 10 * 10 + 3
# Podemos usar parentesis para especificar agrupacion de terminos
(2+10) * (10+3)
# ### Operadones binarios (Bitwise operators)
#
# Son un tipo especial de operadores que nos ayudaran a hacer logica bit por bit, todos los tipos de datos al final terminan siendo solo 1's y 0's. Aunque python es un lenguaje de alto nivel tambien podemos hacer uso de algunas funciones para trabajar en algo mas cercano a lo que entiende la computadora ([operadores binarios](https://es.wikipedia.org/wiki/Operador_a_nivel_de_bits)):
#
#
# +
#Corrimiento de registros
int_num = 8
numero_de_movimientos = 1
#a la izquierda (multiplicacion por dos), Bitwise left shift
int_num = 8
corrimiento = int_num << numero_de_movimientos
print("Bitwise left shift (<<) de ",int_num,": ",corrimiento)
corrimiento = int_num >> numero_de_movimientos
print("Bitwise right shift (>>) de ",int_num,": ",corrimiento)
#negacion
negacion = ~ int_num # convierte los 1's en 0's y los 0's en 1's
print("Negacion (-) de ",int_num,": ",negacion )
#la negacion sale como negativo por el complemento a dos https://es.wikipedia.org/wiki/Complemento_a_dos
# -
# Podemos representar los numeros enteros en base 10 a base 2 de la siguiente manera:
# +
a = 0b00101010
b = 0b00100011
#Si imprimimos la variable se obtiene en base 10
print("La variable 'a' vale:",a,"y en binario se representa como:",bin(a),"\n")
print("El resto de operaciones binarias son:\n")
print(bin(a),"and",bin(b),"=",bin(a & b))
print(bin(a),"or",bin(b),"=",bin(a | b))
print(bin(a),"xor",bin(b),"=",bin(a ^ b))
# -
# ### Hexadecimales y octales
# Existen muchas bases numericas pero entre las mas utilizadas son la hexadecimal y en menor medida la octal, se pueden realizar las mismas operaciones y sus representaciones son las siguientes:
# +
#numeros hexadecimales se representan con "0x" seguidos de un numero en base 16
b10 = 15 # base 10
b16 = 0x0F
#revisamos si son identicos b10 y b16
print(b10," == ", hex(b16), "<-->",b10 == b16)
#numeros octales se representan con "0o" seguidos de un numero en base 8
b8 = 0o10
b10 = 8
print(b10," == ", oct(b8), "<-->",b10 == b8)
numero = 0x10
print(numero)
"""
0
1
2
3
4
5
6
7
8
9
A = 10
B = 11
C=12
D=13
E =14
F =15
10 =16
11 = 17
"""
# -
10%2
11%2
# ***
#
# Podemos usar python como una calculadora, siempre teniendo en cuenta la jerarquia de operaciones
# 
# Tambien podemos usar operadores de atajo como:
i=2
#i=i+1
i+=1
print(i)
#i=i-1
i-=1
print(i)
#i=i*3
i*=3
print(i)
#i=i/2
i/=2
print(i)
# ***
# ## Asignacion de variables
#
# En los casos anteriores solo se desplego la informacion de la operacion resultante
#
# Podemos asignar nombres a variables (excepto keywords) para almacenar los resultados de las operaciones, ejemplo:
#variable de nombre a que almacena el valor 5
a =5
#para poder obervar el valor basta con imprimirla en el interprete de la siguiente manera
a
# A diferencia de otros lenguajes de programacion podemos omitir la palabra revervada **var**
#podemos usar A para redefinir su propio valor
a = a + a
#sumamos dos veces el valor de a y lo guardamos en la misma variable, checamos su valor
a
# ## Los nombres que puedes usar al crear estas etiquetas (nombres de variables )deben seguir algunas reglas:
#
# ### 1. Los nombres no pueden comenzar con un número.
# ### 2. No puede haber espacios en el nombre, usa _ en su lugar.
# ### 3. No puedes usar ninguno de estos símbolos: '", <> /? | \ ()! @ # $% ^ & * ~ - +
# ### 4. Se considera una buena práctica (PEP8) que los nombres estén en minúsculas (snake_case para nombres de mas de una palabra).
#
# +
#ejemplos
my_income = 100
tax_rate = 0.1
my_taxes = my_income*tax_rate
#muestra mis taxes (impuestos)
my_taxes
# -
# ### - Aprendimos algunos de los conceptos básicos de los números en Python.
# ### - También aprendimos cómo hacer aritmética y usar Python como calculadora básica.
# ### - Luego lo terminamos aprendiendo sobre la asignación de variables en Python.
#
# ## **A continuación aprenderemos sobre Strings (cadenas)**
# ## Las cadenas se utilizan en Python para registrar información de texto, como el nombre de una persona.
#
# Los strings en Python son en realidad una * secuencia *, lo que básicamente significa que Python realiza un seguimiento
# de cada elemento de la cadena como una secuencia.
#
# Por ejemplo, Python entiende:
# ** La cadena "hola" **
# Es una secuencia de letras en un orden específico. Esto significa podremos usar la indexación para tomar letras particulares (como la primera letra, o la última letra).
#
# Esta idea de secuencia es importante en Python y la retomaremos este termino
# más adelante.
#
# ## En este documento aprenderemos sobre lo siguiente:
#
# ### 1.) Creando cadenas
# ### 2.) Impresión de cadenas
# ### 3) Indexación y corte de cadenas
# ### 4.) Propiedades de la cadena
# ### 5.) Métodos de cadena
# ### 6.) Formato de impresión
# Creando un string
'hello'# se imprime en consola
'Esto tambien es una cadena (string)'
# Tambien podemos construir cadenas usando comillas dobles
"Strings construido con comillas dobles"
# **El siguiente codigo genera un error, hay que tener cuidado de cerrar las comillas y usar un numero indicado de ellas**
' si agrego una comilla ' dentro de dos comillas '
# La manera correcta de imprimir una comilla seria usando una juego con las comillas dobles
"Esta es una manera correcta de imprimir una comilla simple -> ' <- para evitar errores "
# **El siguiente paso seria aprender como imprimir strings**
#usando la funcion print()
print("Esto de aqui es una cadena")
# **Otras formas de usar print**
print('Hello World 1')
print('Hello World 2')
print('Usando \n impresion de nueva linea')
print('\n')
print('que produjo \\n?')
# ### La funcion print acepta dos tipos de argumentos:
# 1. Posicionales
# 2. Argumentos de palabras clave
#posicionales son los valores que va a imprimir
arg1 = 5
arg2 = "cadena"
arg3 = True
print(arg1,arg2,arg3)
#argumentos de palabra clave
#sep -> define el separador entre cada argumento posicional
#end -> define el valor al final de la llamada a la funcion print
print(arg1,arg2,arg3,end="$",sep="--")
# Los argumentos posicionales <span style="color: red;">(positional arguments)</span> van antes que los argumentos de palabbra clave <span style="color: green;">(keyword arguments)</span>.
# <br><br><b> En caso contrario hay un error de sintaxis
print(sep="?",arg1,arg2,arg3)
# ### En las cadenas de texto podemos agregar variables a una cadena usando ***format***
datos = "Mi nombre es {} y estoy aprendiendo {}".format("Gustavo","python")
print(datos)
# Podemos pasar variables a esta cadena
nombre="gustavo"
lenguaje="python"
datos = "Mi nombre es {} y estoy aprendiendo {}".format(nombre,lenguaje)
print(datos)
# #### Se puede modificar la posicion de los argumentos
datos = "Mi nombre es {1} y estoy aprendiendo {0}".format(nombre,lenguaje)
print(datos)
# #### Las operaciones que podemos hacer con cadenas son:
# 1. ) La concatenacion
# 2. ) La repeticion
#concatenacion
cadena1 = "Hola "
cadena2 = "Adios"
cadena_concatenada = cadena1+cadena2
print(cadena_concatenada)
#repeticion
saludo = "hola "
repeticion = 5
print(saludo*repeticion)
# ### Existen un elemento llamado indices, que nos permiten acceder de manera individual a los valores de una cadena
cad = "Esto es una cadena. Esta parte tambien"
#accediendo al primer elemento
print("El primer elemento de mi cadena es: ",cad[0])
#accediendo al ultimo elemento
print("El ultimo elemento de mi cadena es: ",cad[-1])
# ### Se puede acceder a un un subconjunto de caracteres indicando el inicio y fin del indice
# +
tupla = (2,4)
type(tupla)
tupla[1]= 0
# -
# Los metodos mas usuales para una cadena son split(), len()
longitud_cadena = len(cad)
print(longitud_cadena)
#split creara un conjunto de elementos, donde el elemento de separacion son los espacios en blanco
separando_cadena = cad.split()
print(separando_cadena)
# Para indicar otro valor de separacion basta con agregar un parametro a la funcion split()
# ### Separar por cada vocal "e"
separando_cadena = cad.split('e')
print(separando_cadena)
# Usando los indices es posible imprimir una lista en orden inverso
cadena = "Python es un lenguaje de alto nivel"
cadena[-1::-1]
#
# ***
#
# Dentro de python podemos usar indices negativos, ejemplo:
# 1. -1 Hace referencia al ultimo elemento de la cadena,
# 2. -2 Hace referencia al penultimo y asi sucesivamente.
#
# cadena[inicio: final: incremento]
#
# Podemos especificar hasta 3 indices, ejemplo:
#
cadena = "Python es un lenguaje interpretado"
#imprime la cadena desde el primer elemento hasta el ultimo haciendo saltos de 3 en 3
cadena[::3]
#Tambien hace lo mismo
cadena[0:len(cadena):3]
# ### Funcion input
x = input()
type(x)
# back slash
print('i\'m gustavo')
a = 'c'
b = "hola"
|
notebooks/.ipynb_checkpoints/Modulo_3-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #Using deep features to build an image classifier
#
# #Fire up GraphLab Create
import graphlab
# #Load a common image analysis dataset
#
# We will use a popular benchmark dataset in computer vision called CIFAR-10.
#
# (We've reduced the data to just 4 categories = {'cat','bird','automobile','dog'}.)
#
# This dataset is already split into a training set and test set.
image_train = graphlab.SFrame('image_train_data/')
image_test = graphlab.SFrame('image_test_data/')
# #Exploring the image data
graphlab.canvas.set_target('ipynb')
image_train['image'].show()
image_train.head()
# #Train a classifier on the raw image pixels
#
# We first start by training a classifier on just the raw pixels of the image.
raw_pixel_model = graphlab.logistic_classifier.create(image_train,target='label',
features=['image_array'])
# #Make a prediction with the simple model based on raw pixels
image_test[0:3]['image'].show()
image_test[0:3]['label']
raw_pixel_model.predict(image_test[0:3])
# The model makes wrong predictions for all three images.
# #Evaluating raw pixel model on test data
raw_pixel_model.evaluate(image_test)
# The accuracy of this model is poor, getting only about 46% accuracy.
# #Can we improve the model using deep features
#
# We only have 2005 data points, so it is not possible to train a deep neural network effectively with so little data. Instead, we will use transfer learning: using deep features trained on the full ImageNet dataset, we will train a simple model on this small dataset.
len(image_train)
# ##Computing deep features for our images
#
# The two lines below allow us to compute deep features. This computation takes a little while, so we have already computed them and saved the results as a column in the data you loaded.
#
# (Note that if you would like to compute such deep features and have a GPU on your machine, you should use the GPU enabled GraphLab Create, which will be significantly faster for this task.)
deep_learning_model = graphlab.load_model('http://s3.amazonaws.com/GraphLab-Datasets/deeplearning/imagenet_model_iter45')
image_train['deep_features'] = deep_learning_model.extract_features(image_train)
# As we can see, the column deep_features already contains the pre-computed deep features for this data.
image_train.head()
# #Given the deep features, let's train a classifier
deep_features_model = graphlab.logistic_classifier.create(image_train,
features=['deep_features'],
target='label')
# #Apply the deep features model to first few images of test set
image_test[0:3]['image'].show()
deep_features_model.predict(image_test[0:3])
# The classifier with deep features gets all of these images right!
# #Compute test_data accuracy of deep_features_model
#
# As we can see, deep features provide us with significantly better accuracy (about 78%)
deep_features_model.evaluate(image_test)
|
course_materials/course_1/06_deep-learning-searching-for-images/04_deep-features-for-image-classification-ipython-notebook/01_download-the-ipython-notebook-used-in-this-lesson-to-follow-along_deep_features_for_image_classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Motion detection via 3D convolution
#
# In section 10.4.1, we discussed how to detect motion in a sequence of consecutive images via 3D convolution. This notebook contains the fully functional code for the same.
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
width = 320
height = 320
num_frames = 3
# +
def generate_img(centre_x, centre_y, radius):
"""
A helper function to help generate our artificial generated image
"""
img = np.ones([width, height], dtype=np.uint8) * 255
cv2.circle(img, (centre_x, centre_x), radius, (0, 0, 0), -1)
return img
def tensor_to_img(x):
"""
Takes a tensor, does min-max normalization to scale the tensor to a value between 0 and 255
and returns the resulting numpy array
"""
x_norm = ((x - x.min()) / (x.max() - x.min())) * 255
return x_norm.to(torch.uint8).numpy()
# -
# For the purpose of demonstration, let us artificially generate a sequence of images that simulates the motion of a ball.
# +
center_x, center_y = 150, 150
motion_x, motion_y = 30, 30
radius = 40
imgs = []
for i in range(5):
circle_img = generate_img(center_x + i * motion_x, center_y + i * motion_y, radius)
imgs.append(circle_img)
# -
# Let us take a look at the images
for i, img in enumerate(imgs):
fig = plt.figure()
fig.suptitle(f"Input T={i}")
plt.imshow(img.astype(np.float32) / 255.0, cmap="gray")
plt.xticks([])
plt.yticks([])
# Let us convert the list of images to a tensor of shape B x C x T x H x W, where B is batch size, C is number of channels, T is number of time steps (a.k.a length of image sequence), H is height and W is width. In our case, B = 1 and C = 1 because we are dealing a single sequence of grayscale images. T = 5 since we are looking at 5 consecutive images. H and W are both 320.
img = torch.tensor([imgs], dtype=torch.float32) # [C, T, H, W] tensor where num channels (C) = 1
img_batch = img.unsqueeze(0) # [B, C, T, H, W] tensor where batch size (B) = 1
# Now, let us define the motion detection kernel. It is essentially two 2D smoothing kernels (with inverted weights) stacked along the T dimension, i.e.
#
# $$
# \begin{bmatrix}
# \begin{bmatrix}
# \frac{-1}{9} & \frac{-1}{9} & \frac{-1}{9}\\
# \frac{-1}{9} & \frac{-1}{9} & \frac{-1}{9}\\
# \frac{-1}{9} & \frac{-1}{9} & \frac{-1}{9}\\
# \end{bmatrix}
# \begin{bmatrix}
# \frac{1}{9} & \frac{1}{9} & \frac{1}{9}\\
# \frac{1}{9} & \frac{1}{9} & \frac{1}{9}\\
# \frac{1}{9} & \frac{1}{9} & \frac{1}{9}\\
# \end{bmatrix}
# \end{bmatrix}
# $$
#
# When there is motion, we expect pixels in the same spatial location in subsequent frames to have different values. Hence multiplying with the above kernel would result in high values (positive or negative depending on direction of motion) whenever there is motion in the input image.
# Let us define the motion detection kernel
smoothing_2d_kernel = torch.tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=torch.float32).unsqueeze(0)
kernel = torch.cat([-smoothing_2d_kernel, smoothing_2d_kernel])
# Conv 2D kernels need to be of shape (out_channel, in_channel, k_t, k_x, k_y). Since we are defining a single kernel
# out_channel = in_channel = 1. So we unsqueeze to add those extra dimensions
kernel = kernel.unsqueeze(0).unsqueeze(0)
weight = nn.Parameter(data=kernel, requires_grad=False)
print(f"Kernel shape {kernel.shape}\nKernel: {kernel}") # 1x1x3x3
weight = nn.Parameter(data=kernel, requires_grad=False)
# +
# Let us convolve the motion detection kernel over the input image.
# This can be done in 2 ways.
# Using conv3d function
out_tensor_1 = F.conv3d(img_batch, weight, padding=0)
# Using conv3d layer
# Conv3D layer
conv3d = nn.Conv3d(1, 1, kernel_size=[2, 3, 3], stride=1, padding=0, bias=False)
conv3d.weight = weight
with torch.no_grad():
out_tensor_2 = conv3d(img_batch)
# -
assert torch.allclose(out_tensor_1, out_tensor_2)
# In the images displayed below, gray represents regions of no motion. Black and white represent regions of motion
for i, img_tensor in enumerate(out_tensor_1[0][0]):
img = tensor_to_img(img_tensor)
fig = plt.figure()
fig.suptitle(f"Output: T={i}")
plt.imshow(img.astype(np.float32) / 255.0, cmap="gray")
plt.xticks([])
plt.yticks([])
|
python/ch10/10.4.1-3dconv-motion-detection.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # 4 prepare remote deploy
# ## prepare turtlebot3
# You can use either turtlebot3_simulator (A.) or actual turtlebot3 robot (B.).
# ### A. use "turtlebot3 simulator" on Ubuntu desktop
# #### prepare Ubuntu desktop as a turlebot3 simulator
# 1. prepare Ubuntu 16.04 desktop
# * If you use a virtual machine on VirtualBox as Ubuntu desktop, confirm below:
# * "3d acceralation" of display is "OFF"
# * set `export LIBGL_ALWAYS_SOFTWARE=1` to your .bashrc
# 1. set `export TURTLEBOT3_MODEL=waffle` to your .bashrc
# 1. install `ros-kinetic-desktop-full` and `ros-kinetic-rqt-*` using `apt`.
# 1. create ROS workspace.
# 1. clone repositories of turtlebot3_simulator from github.
# * https://github.com/ROBOTIS-GIT/turtlebot3.git
# * https://github.com/ROBOTIS-GIT/turtlebot3_msgs.git
# * https://github.com/ROBOTIS-GIT/turtlebot3_simulations.git
# 1. make repositories using `catkin_make`.
# ### B. use actual "turtlebot3" robot
# #### prepare Ubuntu desktop as a turlebot3 simulator
# 1. prepare turtlebot3
# 1. make repositories using `catkin_make`.
# ----
# change ${CORE_ROOT} to your path of `core`.
export CORE_ROOT="${HOME}/core"
# change ${PJ_ROOT} to your path of `example-turtlebot3`.
export PJ_ROOT="${HOME}/example-turtlebot3"
cd ${PJ_ROOT};pwd
# example)
# ```
# /Users/user/example-turtlebot3
# ```
# ## load environment variables
# load from `core`
source ${CORE_ROOT}/docs/environments/azure_aks/env
# load from `example-turtlebot3`
source ${PJ_ROOT}/docs/environments/azure_aks/env
# ## setup alias
if [ "$(uname)" == 'Darwin' ]; then
alias openbrowser='open'
elif [ "$(expr substr $(uname -s) 1 5)" == 'Linux' ]; then
alias openbrowser='xdg-open'
else
echo "Your platform ($(uname -a)) is not supported."
exit 1
fi
# ----
# ## setup docker
# 1. install required packages
#
# ```bash
# <EMAIL>tlebot3@turtlebot3:~$ sudo apt update
# <EMAIL>tlebot3@turtlebot3:~$ sudo apt upgrade -y
# turtlebot3@turtlebot3:~$ sudo apt install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
# ```
# 2. add Docker’s official GPG key:
#
# ```bash
# <EMAIL>tlebot3@turtlebot3:~$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# ```
# 3. set up the stable repository of Docker
#
# ```bash
# turtlebot3@turtlebot3:~$ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
# ```
# 4. install Docker
#
# ```bash
# <EMAIL>tlebot3@turtlebot3:~$ sudo apt update
# turtlebot3@turtlebot3:~$ sudo apt-get install -y docker-ce docker-ce-cli containerd.io
# ```
# 5. verify instration
#
# ```bash
# turtlebot3@turtlebot3:~$ sudo docker version
# turtlebot3@turtlebot3:~$ sudo docker run hello-world
# ```
# ## setup minikube
# 1. setup minikube
#
# ```bash
# turtlebot3@turtlebot3:~$ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.0.0/minikube-linux-amd64 && chmod +x minikube && sudo cp minikube /usr/local/bin/ && rm minikube
# ```
# 2. verify instration
#
# ```bash
# turtlebot3@turtlebot3:~$ minikube version
# ```
# ## setup kubectl
# 1. setup kubectl
#
# ```bash
# turtlebot3@turtlebot3:~$ curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl && chmod +x kubectl && sudo cp kubectl /usr/local/bin/ && rm kubectl
# ```
# 2. verify instration
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl version --client
# ```
# ## start minikube without virtualization
# ### start minikube
# 1. delete minikube if already started
#
# ```bash
# turtlebot3@turtlebot3:~$ sudo minikube stop
# turtlebot3@turtlebot3:~$ sudo minikube delete
# <EMAIL>tlebot3@turtlebot3:~$ sudo rm -rf /etc/kubernetes/
# turtlebot3@turtlebot3:~$ sudo rm -rf $HOME/.minikube/
# turtlebot3@turtlebot3:~$ rm -rf $HOME/.kube/
# ```
# 2. setup environment variables
#
# ```bash
# turtlebot3@turtlebot3:~$ export MINIKUBE_WANTUPDATENOTIFICATION=false
# <EMAIL>tlebot3@turtlebot3:~$ export MINIKUBE_WANTREPORTERRORPROMPT=false
# <EMAIL>tlebot3@turtlebot3:~$ export MINIKUBE_HOME=$HOME
# <EMAIL>tlebot3@turtlebot3:~$ export CHANGE_MINIKUBE_NONE_USER=true
# turtlebot3@turtlebot3:~$ export KUBECONFIG=$HOME/.kube/config
# ```
#
# ```bash
# turtlebot3@turtlebot3:~$ export CPU_CORE_NUM="1"
# turtlebot3@turtlebot3:~$ export MEMORY_MB=2048
# turtlebot3@turtlebot3:~$ export K8S_VERSION="v1.13.5"
# ```
# 3. create directories and `.kube/config`
#
# ```
# turtlebot3@turtlebot3:~$ mkdir -p $HOME/.kube $HOME/.minikube
# turtlebot3@turtlebot3:~$ touch $KUBECONFIG
# ```
# 4. start minikube without virtualization
#
# ```bash
# turtlebot3@turtlebot3:~$ sudo -E minikube start --cpus ${CPU_CORE_NUM} --memory ${MEMORY_MB} --vm-driver=none --kubernetes-version ${K8S_VERSION} --feature-gates=CoreDNS=false
# ```
# 5. verify minikube
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl version
# turtlebot3@turtlebot3:~$ kubectl get nodes
# ```
# 6. confirm that all PODs are `Running`
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl get pods --all-namespaces
# ```
# ```bash
# example)
# NAMESPACE NAME READY STATUS RESTARTS AGE
# kube-system etcd-minikube 1/1 Running 0 3m26s
# kube-system kube-addon-manager-minikube 1/1 Running 0 3m25s
# kube-system kube-apiserver-minikube 1/1 Running 0 3m17s
# kube-system kube-controller-manager-minikube 1/1 Running 0 3m15s
# kube-system kube-dns-86b8794d97-gwqkr 3/3 Running 0 4m8s
# kube-system kube-proxy-f4pvl 1/1 Running 0 4m8s
# kube-system kube-scheduler-minikube 1/1 Running 0 3m14s
# kube-system storage-provisioner 1/1 Running 0 4m6s
# ```
# ### confirm the dns settings of minkube
# 1. try to dig `www.google.com`
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl run -it --rm --restart=Never dig --image tutum/dnsutils -- dig www.google.com
# ```
# 2. if you encountered `connection timed out`, you have to setup the additional nameservers to `kube-dns`
# ### setup additional nameservers to `kube-dns`
# 1. prepare `/tmp/kube-dns-configmap.yaml` by using below command:
#
# ```bash
# turtlebot3@turtlebot3:~$ cat << __EOF__ > /tmp/kube-dns-configmap.yaml
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: kube-dns
# namespace: kube-system
# labels:
# addonmanager.kubernetes.io/mode: EnsureExists
# data:
# upstreamNameservers: |-
# ["8.8.8.8", "8.8.4.4"]
# __EOF__
# ```
# 2. apply `/tmp/kube-dns-configmap.yaml`
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl apply -f /tmp/kube-dns-configmap.yaml
# ```
# 3. delete the pod of `kube-dns` (restart `kube-dns` automatically)
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl delete pod -n kube-system $(kubectl get pods -n kube-system -l k8s-app=kube-dns -o template --template "{{(index .items 0).metadata.name}}")
# ```
# 4. confirm that `kube-dns` is started
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl get pods -n kube-system -l k8s-app=kube-dns
# NAME READY STATUS RESTARTS AGE
# kube-dns-86f4d74b45-x7m75 3/3 Running 0 2m
# ```
# 5. retry to dig `www.google.com`
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl run -it --rm --restart=Never dig --image tutum/dnsutils -- dig www.google.com
# ```
# ## register deployer service
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-ServicePath: ${DEPLOYER_SERVICEPATH}" -H "Content-Type: application/json" https://api.${DOMAIN}/idas/ul20/manage/iot/services/ -X POST -d @- <<__EOS__
{
"services": [
{
"apikey": "${DEPLOYER_TYPE}",
"cbroker": "http://orion:1026",
"resource": "/iot/d",
"entity_type": "${DEPLOYER_TYPE}"
}
]
}
__EOS__
# expected)
# ```json
# {}
# ```
# ### confirm registered service
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DEPLOYER_SERVICEPATH}" https://api.${DOMAIN}/idas/ul20/manage/iot/services/ | jq .
# example)
# ```bash
# {
# "count": 1,
# "services": [
# {
# "_id": "5b90ba6c0c4a497125314e7c",
# "subservice": "/deployer",
# "service": "fiwaredemo",
# "apikey": "deployer",
# "resource": "/iot/d",
# "__v": 0,
# "attributes": [],
# "lazy": [],
# "commands": [],
# "entity_type": "deployer",
# "internal_attributes": [],
# "static_attributes": []
# }
# ]
# }
# ```
# ## register deployer device
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-ServicePath: ${DEPLOYER_SERVICEPATH}" -H "Content-Type: application/json" https://api.${DOMAIN}/idas/ul20/manage/iot/devices/ -X POST -d @- <<__EOS__
{
"devices": [
{
"device_id": "${DEPLOYER_ID}",
"entity_name": "${DEPLOYER_ID}",
"entity_type": "${DEPLOYER_TYPE}",
"timezone": "Asia/Tokyo",
"protocol": "UL20",
"attributes": [
{
"name": "deployment",
"type": "string"
},
{
"name": "label",
"type": "string"
},
{
"name": "desired",
"type": "integer"
},
{
"name": "current",
"type": "integer"
},
{
"name": "updated",
"type": "integer"
},
{
"name": "ready",
"type": "integer"
},
{
"name": "unavailable",
"type": "integer"
},
{
"name": "available",
"type": "integer"
}
],
"commands": [
{
"name": "apply",
"type": "string"
}, {
"name": "delete",
"type": "string"
}
],
"transport": "AMQP"
}
]
}
__EOS__
# expected)
# ```json
# {}
# ```
# ### confirm registered device
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DEPLOYER_SERVICEPATH}" https://api.${DOMAIN}/idas/ul20/manage/iot/devices/${DEPLOYER_ID}/ | jq .
# example)
# ```bash
# {
# "device_id": "deployer_01",
# "service": "fiwaredemo",
# "service_path": "/deployer",
# "entity_name": "deployer_01",
# "entity_type": "deployer",
# "transport": "AMQP",
# "attributes": [
# {
# "object_id": "deployment",
# "name": "deployment",
# "type": "string"
# },
# {
# "object_id": "label",
# "name": "label",
# "type": "string"
# },
# {
# "object_id": "desired",
# "name": "desired",
# "type": "integer"
# },
# {
# "object_id": "current",
# "name": "current",
# "type": "integer"
# },
# {
# "object_id": "updated",
# "name": "updated",
# "type": "integer"
# },
# {
# "object_id": "ready",
# "name": "ready",
# "type": "integer"
# },
# {
# "object_id": "unavailable",
# "name": "unavailable",
# "type": "integer"
# },
# {
# "object_id": "available",
# "name": "available",
# "type": "integer"
# }
# ],
# "lazy": [],
# "commands": [
# {
# "object_id": "apply",
# "name": "apply",
# "type": "string"
# },
# {
# "object_id": "delete",
# "name": "delete",
# "type": "string"
# }
# ],
# "static_attributes": [],
# "protocol": "UL20"
# }
# ```
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DEPLOYER_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/${DEPLOYER_ID}/ | jq .
# example)
# ```bash
# {
# "id": "deployer_01",
# "type": "deployer",
# "TimeInstant": {
# "type": "ISO8601",
# "value": " ",
# "metadata": {}
# },
# "apply_info": {
# "type": "commandResult",
# "value": " ",
# "metadata": {}
# },
# "apply_status": {
# "type": "commandStatus",
# "value": "UNKNOWN",
# "metadata": {}
# },
# "available": {
# "type": "integer",
# "value": " ",
# "metadata": {}
# },
# "current": {
# "type": "integer",
# "value": " ",
# "metadata": {}
# },
# "delete_info": {
# "type": "commandResult",
# "value": " ",
# "metadata": {}
# },
# "delete_status": {
# "type": "commandStatus",
# "value": "UNKNOWN",
# "metadata": {}
# },
# "deployment": {
# "type": "string",
# "value": " ",
# "metadata": {}
# },
# "desired": {
# "type": "integer",
# "value": " ",
# "metadata": {}
# },
# "label": {
# "type": "string",
# "value": " ",
# "metadata": {}
# },
# "ready": {
# "type": "integer",
# "value": " ",
# "metadata": {}
# },
# "unavailable": {
# "type": "integer",
# "value": " ",
# "metadata": {}
# },
# "updated": {
# "type": "integer",
# "value": " ",
# "metadata": {}
# },
# "apply": {
# "type": "string",
# "value": "",
# "metadata": {}
# },
# "delete": {
# "type": "string",
# "value": "",
# "metadata": {}
# }
# }
# ```
# ## deploy `deployer` to turtlebot3
# ### prepare a command to create Secret of username & password
echo "kubectl create secret generic mqtt-username-password --from-literal=mqtt_username=ros --from-literal=mqtt_password=${<PASSWORD>}"
# 1. create Secret of username & password of MQTT Broker using above command
# ### prepare a command to create ConfigMap of mqtt endpoint
echo "kubectl create configmap mqtt-config --from-literal=mqtt_use_tls=true --from-literal=mqtt_host=mqtt.${DOMAIN} --from-literal=mqtt_port=8883 --from-literal=device_type=${DEPLOYER_TYPE} --from-literal=device_id=${DEPLOYER_ID}"
# 1. create ConfigMap of host & port & topic to connect MQTT Broker using above command
# ### start `deployer` to operate k8s Resources through MQTT
# 1. prepare `/tmp/mqtt-kube-operator.yaml` use below command:
#
# ```bash
# turtlebot3@turtlebot3:~$ cat << __EOF__ > /tmp/mqtt-kube-operator.yaml
# apiVersion: v1
# kind: ServiceAccount
# metadata:
# name: mqtt-kube-operator
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: Role
# metadata:
# name: mqtt-kube-operator
# namespace: default
# rules:
# - apiGroups: [""]
# resources: ["services", "configmaps", "secrets"]
# verbs: ["get", "list", "create", "update", "delete"]
# - apiGroups: ["apps"]
# resources: ["deployments"]
# verbs: ["get", "list", "create", "update", "delete"]
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# name: mqtt-kube-operator
# namespace: default
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: Role
# name: mqtt-kube-operator
# subjects:
# - kind: ServiceAccount
# name: mqtt-kube-operator
# namespace: default
# ---
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# name: mqtt-kube-operator
# spec:
# replicas: 1
# selector:
# matchLabels:
# app: mqtt-kube-operator
# template:
# metadata:
# labels:
# app: mqtt-kube-operator
# spec:
# serviceAccountName: mqtt-kube-operator
# containers:
# - name: mqtt-kube-operator
# image: roboticbase/mqtt-kube-operator:0.2.0
# imagePullPolicy: Always
# env:
# - name: LOG_LEVEL
# value: "info"
# - name: MQTT_USERNAME
# valueFrom:
# secretKeyRef:
# name: mqtt-username-password
# key: mqtt_username
# - name: MQTT_PASSWORD
# valueFrom:
# secretKeyRef:
# name: mqtt-username-password
# key: mqtt_password
# - name: MQTT_USE_TLS
# valueFrom:
# configMapKeyRef:
# name: mqtt-config
# key: mqtt_use_tls
# - name: MQTT_HOST
# valueFrom:
# configMapKeyRef:
# name: mqtt-config
# key: mqtt_host
# - name: MQTT_PORT
# valueFrom:
# configMapKeyRef:
# name: mqtt-config
# key: mqtt_port
# - name: DEVICE_TYPE
# valueFrom:
# configMapKeyRef:
# name: mqtt-config
# key: device_type
# - name: DEVICE_ID
# valueFrom:
# configMapKeyRef:
# name: mqtt-config
# key: device_id
# - name: REPORT_INTERVAL_SEC
# value: "1"
# - name: USE_DEPLOYMENT_STATE_REPORTER
# value: "true"
# - name: REPORT_TARGET_LABEL_KEY
# value: "report"
# __EOF__
# ```
# 2. apply `/tmp/mqtt-kube-operator.yaml`
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl apply -f /tmp/mqtt-kube-operator.yaml
# ```
# 3. confirm that `mqtt-kube-operator` connect to MQTT Broker
#
# ```bash
# turtlebot3@turtlebot3:~$ kubectl logs -f $(kubectl get pods -l app=mqtt-kube-operator -o template --template "{{(index .items 0).metadata.name}}")
# ```
# ## test publishing the `apply` command of `deployer`
# ### prepare a command to subscribe all topics
echo "mosquitto_sub -h mqtt.${DOMAIN} -p 8883 --cafile ${CORE_ROOT}/secrets/DST_Root_CA_X3.pem -d -u iotagent -P ${MQTT__iotagent} -t /#"
# ### subscribe all topics
# _Outside of this notebook_
# 1. open a ternminal.
# 1. run the above command displayed `prepare a command to subscribe all topics`.
# ### send a NGSI to emulate sending command
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DEPLOYER_SERVICEPATH}" -H "Content-Type: application/json" https://api.${DOMAIN}/orion/v2/entities/${DEPLOYER_ID}/attrs?type=${DEPLOYER_TYPE} -X PATCH -d @-<<__EOS__
{
"apply": {
"value": "{}"
}
}
__EOS__
# example)
# ```
# HTTP/1.1 204 No Content
# content-length: 0
# fiware-correlator: 7e608a4c-7b7a-11e9-a519-ea31d91d5184
# date: Tue, 21 May 2019 03:42:44 GMT
# x-envoy-upstream-service-time: 42
# server: envoy
# ```
# ### confirm the topic
# when executing the above command, show below messages on the opened terminal.
#
# example)
# ```
# Client mosqsub|70811-MacBook-P received PUBLISH (d0, q0, r0, m0, '/deployer/deployer_01/cmd', ... (20 bytes))
# deployer_01@apply|{}
# Client mosqsub|70811-MacBook-P received PUBLISH (d0, q0, r0, m0, '/deployer/deployer_01/cmdexe', ... (50 bytes))
# deployer_01@apply|ignore format, skip this message
# ```
# ### confirm `deployer` log
# example)
#
# ```
# 2019-04-24T06:16:05.808Z INFO handlers/messageHandler.go:110 received message: deployer_01@apply|{}
# 2019-04-24T06:16:05.809Z INFO handlers/messageHandler.go:138 data: {}
# 2019-04-24T06:16:05.809Z INFO handlers/messageHandler.go:169 invalid format, skip this message: Object 'Kind' is missing in '{}'
# 2019-04-24T06:16:06.309Z INFO handlers/messageHandler.go:105 send message: deployer_01@apply|invalid format, skip this message
# ```
# ### confirm deployer entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DEPLOYER_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/${DEPLOYER_ID}/ | jq .
# example)
# ```bash
# {
# "id": "deployer_01",
# "type": "deployer",
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22.00Z",
# "metadata": {}
# },
# "apply_info": {
# "type": "commandResult",
# "value": "invalid format, skip this message",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:29:28.956Z"
# }
# }
# },
# "apply_status": {
# "type": "commandStatus",
# "value": "OK",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:29:28.956Z"
# }
# }
# },
# "available": {
# "type": "integer",
# "value": "1",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "current": {
# "type": "integer",
# "value": "1",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "delete_info": {
# "type": "commandResult",
# "value": " ",
# "metadata": {}
# },
# "delete_status": {
# "type": "commandStatus",
# "value": "UNKNOWN",
# "metadata": {}
# },
# "deployment": {
# "type": "string",
# "value": "mqtt-kube-operator",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "desired": {
# "type": "integer",
# "value": "1",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "ready": {
# "type": "integer",
# "value": "1",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "unavailable": {
# "type": "integer",
# "value": "0",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "updated": {
# "type": "integer",
# "value": "1",
# "metadata": {
# "TimeInstant": {
# "type": "ISO8601",
# "value": "2018-09-29T04:30:22Z"
# }
# }
# },
# "apply": {
# "type": "string",
# "value": "",
# "metadata": {}
# },
# "delete": {
# "type": "string",
# "value": "",
# "metadata": {}
# }
# }
# ```
# ## register `cygnus-elasticsearch` as a subscriber of deployer device
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DEPLOYER_SERVICEPATH}" -H "Content-Type: application/json" https://api.${DOMAIN}/orion/v2/subscriptions/ -X POST -d @- <<__EOS__
{
"subject": {
"entities": [{
"idPattern": "${DEPLOYER_ID}.*",
"type": "${DEPLOYER_TYPE}"
}],
"condition": {
"attrs": ["deployment", "label", "desired", "current", "updated", "ready", "unavailable", "available"]
}
},
"notification": {
"http": {
"url": "http://cygnus-elasticsearch:5050/notify"
},
"attrs": ["deployment", "label", "desired", "current", "updated", "ready", "unavailable", "available"],
"attrsFormat": "legacy"
}
}
__EOS__
# example)
# ```
# HTTP/1.1 201 Created
# content-length: 0
# location: /v2/subscriptions/5baf010d9970a6a6642afb82
# fiware-correlator: 159fb0aa-c3a1-11e8-bf68-82ea6d7e02e9
# date: Sat, 29 Sep 2018 04:35:25 GMT
# x-envoy-upstream-service-time: 4
# server: envoy
# ```
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-ServicePath: ${DEPLOYER_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/subscriptions/ | jq .
# example)
# ```json
# [
# {
# "id": "5bb054b40af338d2b9bff606",
# "status": "active",
# "subject": {
# "entities": [
# {
# "idPattern": "deployer_01.*",
# "type": "deployer"
# }
# ],
# "condition": {
# "attrs": [
# "deployment",
# "label",
# "desired",
# "current",
# "updated",
# "ready",
# "unavailable",
# "available"
# ]
# }
# },
# "notification": {
# "timesSent": 1,
# "lastNotification": "2018-09-30T04:44:36.00Z",
# "attrs": [
# "deployment",
# "label",
# "desired",
# "current",
# "updated",
# "ready",
# "unavailable",
# "available"
# ],
# "attrsFormat": "legacy",
# "http": {
# "url": "http://cygnus-elasticsearch:5050/notify"
# }
# }
# }
# ]
# ```
# ## confirm cygnus-elasticsearch
# ### confirm basic auth username & password for Kibana
cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[]|select(.host == "kibana\\..+$")|.settings.basic_auths[0].username' -r
cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[]|select(.host == "kibana\\..+$")|.settings.basic_auths[0].password' -r
# ### set up kibana
openbrowser https://kibana.${DOMAIN}/
# 1. Login kibana by basic authorization using above username and password
# 2. show `Management -> Index Patterns`
# 3. set `cygnus-fiwaredemo-deployer-*` as Index Pattern, and push `Next step`
# 4. set `recvTime` as Time Filter field name, and push `Create index pattern`
#
# **Index name is like: `<<IndexPrefix of cygnus>>-<<FIWARE_SERVICE>>-<<FIWARE_SERVICEPATH>>-<<entityId>>-<<entityType>>-<<md5 hash of attributes>>-<<yyyy.mm.dd>>`**
# ### set up a datasource of grafana
openbrowser https://grafana.${DOMAIN}/login
# 1. add a new Data Source (Elasticsearch)
# * Name: `cygnus-fiwaredemo-deployer`
# * URL: `http://elasticsearch-logging:9200`
# * Access: `Server(Default)`
# * Index name: `cygnus-fiwaredemo-deployer-*`
# * Time field name: `recvTime`
# * Version: `6.0+`
#
# **Index name is like: `<<IndexPrefix of cygnus>>-<<FIWARE_SERVICE>>-<<FIWARE_SERVICEPATH>>-<<entityId>>-<<entityType>>-<<md5 hash of attributes>>-<<yyyy.mm.dd>>`**
#
# 2. import `monitoring/dashboard_turtlebot3.json`
|
docs/en-jupyter_notebook/azure_aks/04_prepare_remote_deploy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Parameter management
#
# PyBamm comes with a set of pre-defined parameters for common chemistries. These files are located inside your PyBaMM installation directory, typically something like
# ```
# ../venv/lib/pythonX.Y/site-packages/pybamm/input/parameters
# ```
# The `input/parameters` directory is organised as follows:
# ```
# input/parameters
# lithium-ion/ # chemistry
# anodes/ # Component
# graphite_Chen2020/ # parameter set
# parameters.csv
# graphite_LGM50_diffusivity_Chen2020.py
# ...
# graphite_mcmb2528_Marquis2019/
# graphite_Ecker2015/
# ...
# cathodes/
# cells/
# electrolytes/
# seis/
# separators/
# experiments/
# lead-acid/
# ...
# ```
# When setting parameter values using `pybamm.ParameterValues` and a filename, the file is searched in several locations, described by
# the variable `pybamm.PARAMETER_PATH`.
# The default is
import pybamm
pybamm.PARAMETER_PATH
# which means that parameter files will first be searched in the current directory, and then the `input/parameters` directory within the
# PyBaMM installation directory.
# ## Editing the default parameters
# It is often much easier to define new parameters by editing exiting ones.
#
# We recommend not to alter the default parameters provided with the PyBaMM installation. Instead, you can pull all parameter set for
# a given chemistry into the current directory:
# + language="bash"
# if [[ ! -d "lithium-ion" ]]; then
# echo "Directory lithium-ion does not exist."
# fi
# + language="bash"
# pybamm_edit_parameter lithium-ion
# -
# The above commands will create a `lithium-ion` directory in the current directory, populated with copies of the default parameters, for editing:
# + language="bash"
# ls lithium-ion
# -
# As an example, let's create a new parameter file for the `cell` component, for the `lithium-ion` chemistry. We first create a new directory `my_new_param_set`:
# + language="bash"
# mkdir lithium-ion/cells/my_new_param_set
# -
# and write some data in a file `my_new_param_set/param_file.csv`. This is done using python in this simple example, but it can be done using the text editor of your choice to create a new file or edit an existing file.
# Create example parameter file
f = open("lithium-ion/cells/my_new_param_set/param_file.csv", "w+")
f.write(
"""
Name [units],Value
a, 4
b, 5
c, 6
"""
)
f.close()
# ## Adding a parameter directory
# You can add a search location by modifying the `PARAMETER_PATH` list.
# Let's add our new parameter directory to the list, in first position so that it is searched first:
pybamm.PARAMETER_PATH.insert(0, "lithium-ion/cells/my_new_param_set")
pybamm.PARAMETER_PATH
# Note that relative paths added to `pybamm.PARAMETER_PATH` are considered relative to the current directory. In the above example, if you change directory, the new parameter `my_new_param_set` will not be found.
# Let's check that our new parameter file can be read:
pybamm.ParameterValues("param_file.csv")
# ## Contributing a new parameter set
# Once you're happy with a particular parameter set of your creation, you may want to contribute it to the PyBaMM parameter distribution, so
# that it comes bundled with future release of PyBaMM.
# This will require you to open a Pull Request on the PyBaMM repository, a process that is described here.
#
# A prerequisite is that you add you parameter set to the default parameter directory. You could manually copy the corresponding directory to your installation directory, but the command `pybamm_add_param` can do it for you:
# + language="bash"
# # pybamm_add_parameter <dir> <chemistry> <component>
# pybamm_add_parameter lithium-ion/cells/my_new_param_set lithium-ion cells
# -
# Let's remove the `my_new_param_set` from the parameter search path to check that the parameter directory was added correctly to set of default parameters:
pybamm.PARAMETER_PATH.remove("lithium-ion/cells/my_new_param_set")
pybamm.PARAMETER_PATH
pybamm.ParameterValues("lithium-ion/cells/my_new_param_set/param_file.csv")
# If you think you made a mistake, you can always delete a specific parameter set using `pybamm_rm_param`.
#
# + language="bash"
# pybamm_rm_parameter -f lithium-ion/cells/my_new_param_set lithium-ion cells
# -
# Let's remove the local lithium-ion directory to leave this directory as we found it
# + language="bash"
# rm -rf lithium-ion
|
examples/notebooks/parameter-management.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, glob
import pandas as pd
import numpy as np
import sklearn
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
pd.set_option('precision', 5)
pd.set_option('display.max_columns', None)
pd.set_option('max_colwidth', None)
plt.rcParams['figure.figsize'] = (6, 6)
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['figure.titlesize'] = 14
plt.rcParams['axes.titlesize'] = 12
# -
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_curve, auc, f1_score
from sklearn.model_selection import cross_val_predict, cross_val_score, StratifiedKFold
def plot_digit(data, **kwargs):
image = data.reshape(28, 28)
plt.figure(figsize=kwargs.get('figsize', (4,4)))
plt.imshow(image, cmap = plt.cm.binary,
interpolation="nearest")
plt.axis("off")
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1)
X, y = mnist["data"], mnist["target"]
y = y.astype(np.uint8)
X.shape
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# ### Exercise 1. KNN to achieve over 97% accuracy in test set
# KNN is slow to run. Use a subset of data insteaad.
from collections import Counter
Counter(y_train[:2000])
obs_upto = 5000
X_train_trimmed = X_train[:obs_upto]
y_train_trimmed = y_train[:obs_upto]
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
# +
param_grid = [{'n_neighbors': [3,5,7], 'weights': ['uniform', 'distance']}]
knn = KNeighborsClassifier(n_jobs=4)
grid_search = GridSearchCV(knn, param_grid=param_grid, cv=3, verbose=3)
grid_search.fit(X_train_trimmed, y_train_trimmed)
# -
grid_search.best_params_
grid_search.best_score_
# +
from sklearn.metrics import accuracy_score
y_pred = grid_search.predict(X_test)
accuracy_score(y_test, y_pred)
# -
# ### Exercise 2. Shift image as data augmentation
from scipy.ndimage.interpolation import shift, rotate
# +
def shift_image(image, move, new=0.0):
return shift(image.reshape(28, 28), move, cval=new).reshape(784)
def rotate_image(image, angle, new=0.0):
return rotate(image.reshape(28, 28), angle, cval=new, order=1, reshape=False).reshape(784)
# -
X_train_extended = [X_train]
# +
moves = [(1,0), (-1,0), (0,1), (0,-1)]
for move in moves:
X_train_extended.append(np.apply_along_axis(shift_image, axis=1, arr=X_train, move=move))
# +
angles = [-10, 10]
for angle in angles:
X_train_extended.append(np.apply_along_axis(rotate_image, axis=1, arr=X_train, angle=angle))
# -
y_train_extended = np.tile(y_train, len(X_train_extended))
X_train_extended = np.concatenate(X_train_extended)
X_train_extended.shape
y_train_extended.shape
np.random.seed(42)
permutation_ids = np.random.permutation(len(X_train))
X_train_extended = X_train_extended[permutation_ids]
y_train_extended = y_train_extended[permutation_ids]
X_train_extended_trimed = X_train_extended[:obs_upto]
y_train_extended_trimed = y_train_extended[:obs_upto]
knn_clf = KNeighborsClassifier(**grid_search.best_params_)
knn_clf.fit(X_train_extended_trimed, y_train_extended_trimed)
y_pred = knn_clf.predict(X_test)
accuracy_score(y_test, y_pred)
conf_mat = confusion_matrix(y_test, y_pred)
conf_mat_normed = conf_mat / conf_mat.sum(axis=1, keepdims=True)
np.fill_diagonal(conf_mat_normed, val=0)
plt.matshow(conf_mat_normed, cmap='Greys')
# ### 3. Tackle the Titanic dataset
# #### Load data
TITANIC_PATH = os.path.join("/Users/huishi/Learning_DS/handson-ml2/datasets", "titanic")
# +
#os.makedirs("/Users/huishi/Learning_DS/handson-ml2/datasets/titanic")
# -
def load_titanic_data(filename, titanic_path=TITANIC_PATH):
csv_path = os.path.join(titanic_path, filename)
return pd.read_csv(csv_path)
train_data = load_titanic_data("train.csv")
test_data = load_titanic_data("test.csv")
complete_data = np.c
train_data.head()
# #### Take a look
train_data.shape
train_data.info()
# Is data imbalanced?
pd.value_counts(train_data['Survived'])
train_data.isnull().sum(axis=0) / len(train_data)
# Age is null for some survivors.
pd.crosstab(train_data['Survived'], train_data['Age'].isnull())
train_data.describe()
train_data.describe(include='O')
def survive_rate_by_cat_feature(df, metric):
return df[[metric, 'Survived']].groupby(metric).Survived.agg(['mean', 'count']).sort_values('mean', ascending=False)
agg_list = []
for metric in ['Sex', 'Embarked', 'Pclass']:
agg_list.append(survive_rate_by_cat_feature(train_data, metric))
agg_list
train_data.Cabin.str.extract(pat='^([a-zA-Z]*)', expand=False).dropna().value_counts()
g = sns.FacetGrid(train_data, col='Survived')
g.map(sns.kdeplot, 'Age')
g = sns.FacetGrid(train_data, col='Survived')
g.map(plt.hist, 'Age', alpha=0.7)
sns.boxplot(y='Age', x='Survived', data=train_data, palette='Set2')
# #### Feature transformation
num_vars = ['Age', 'SibSp', 'Parch', 'Fare']
cat_vars = ['Pclass', 'Sex', 'Embarked']
X_train, X_test = train_data[num_vars + cat_vars], test_data[num_vars + cat_vars]
y_train = train_data['Survived']
# Use only cabin class and cast Pclass from int to str.
# +
X_train = X_train.assign(
# Cabin = X_train['Cabin'].str.extract('^([a-zA-Z]+)', expand=False),
Pclass = X_train['Pclass'].astype('str'))
X_test = X_test.assign(
# Cabin = X_test['Cabin'].str.extract('^([a-zA-Z]+)', expand=False),
Pclass = X_test['Pclass'].astype('str'))
# -
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder, LabelEncoder
from sklearn.compose import ColumnTransformer
# +
num_pipeline = Pipeline([
('med_imputer', SimpleImputer(strategy='median')),
('std_scaler', StandardScaler())]
)
cat_pipeline = Pipeline([
('freq_imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder())
])
preprocess_pipeline = ColumnTransformer([
('num', num_pipeline, num_vars),
('cat', cat_pipeline, cat_vars[1:]),
('ord', OrdinalEncoder(), ['Pclass']) # Pclass has a natural order itself
])
label_encoder = LabelEncoder() # for target variable
# -
X_train_trans = preprocess_pipeline.fit_transform(X_train)
X_test_trans = preprocess_pipeline.transform(X_test)
y_train_trans = label_encoder.fit_transform(y_train)
X_train_trans.shape
# #### Fit two models
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score, f1_score, auc, precision_recall_curve, roc_curve, roc_auc_score
1e-4
# +
log_reg = LogisticRegression(random_state=42)
param_grid_lr = [{"tol": [0.3, 1e-1, 1e-2, 1e-3]}]
grid_search_lr = GridSearchCV(log_reg, param_grid_lr, scoring='accuracy', cv=10)
grid_search_lr.fit(X_train_trans, y_train_trans)
# -
cv_results_lr = pd.DataFrame(grid_search_lr.cv_results_).sort_values('mean_test_score', ascending=False)
cv_results_lr
grid_search_lr.best_score_
# +
gbc = GradientBoostingClassifier(random_state=42)
param_grid_gbc = [{"learning_rate":[0.3, 0.1, 0.03], "n_estimators": [50,100,200], "max_features": ["log2", 0.6, None]}]
grid_search_gbc = GridSearchCV(gbc, param_grid_gbc, scoring='accuracy', cv=10)
grid_search_gbc.fit(X_train_trans, y_train_trans)
# -
cv_results_gbc = pd.DataFrame(grid_search_gbc.cv_results_).sort_values('mean_test_score', ascending=False)
cv_results_gbc.head(3)
grid_search_gbc.best_estimator_
grid_search_gbc.best_score_
# #### performance and error analysis
y_train_pred = grid_search_gbc.predict_proba(X_train_trans)[:,1]
# **Precision and Recall**
precisions, recalls, thresholds = precision_recall_curve(y_train_trans, y_train_pred)
# +
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.legend(loc="lower left", fontsize=16) # Not shown in the book
plt.xlabel("Threshold", fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.axis([0, 1, 0, 1]) # Not shown
# when precision is 90%, what are the recall and threshold?
recall_90_precision = recalls[np.argmax(precisions >= 0.90)]
threshold_90_precision = thresholds[np.argmax(precisions >= 0.90)]
plt.figure(figsize=(8, 4)) # Not shown
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], "r:") # Not shown
plt.plot([0.02, threshold_90_precision], [0.9, 0.9], "r:") # Not shown
plt.plot([0.02, threshold_90_precision], [recall_90_precision, recall_90_precision], "r:")# Not shown
plt.plot([threshold_90_precision], [0.9], "ro") # Not shown
plt.plot([threshold_90_precision], [recall_90_precision], "ro") # Not shown
plt.show()
# -
threshold_precision_90 = thresholds[np.argmax(precision>0.9)]
recall_90_precision, threshold_90_precision, threshold_precision_90
y_train_pred_v2 = y_train_pred >= threshold_precision_90
precision_score(y_train_trans, y_train_pred_v2)
recall_score(y_train_trans, y_train_pred_v2)
plt.plot(recall, precision)
plt.xlabel("recall")
plt.ylabel("precision")
plt.title(f'Precision Recall Curve: Gradient Boosting Classifier\n{grid_search_gbc.best_params_}')
plt.scatter(x=recall_90_precision, y=0.9, c='r')
plt.plot([0, recall_90_precision], [0.9, 0.9], 'r--')
plt.plot([recall_90_precision, recall_90_precision], [0, 0.9], 'r--')
plt.axis([0,1,0,1])
plt.text(0.1, 0.1, s=f"AUC-PR: {auc(recall, precision):.3f}", fontsize=12, color='C2')
plt.grid(True)
# **ROC**
fpr, tpr, thresholds = roc_curve(y_train_trans, y_train_pred)
# +
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal
plt.axis([0, 1, 0, 1]) # Not shown in the book
plt.xlabel('False Positive Rate (Fall-Out)', fontsize=16) # Not shown
plt.ylabel('True Positive Rate (Recall)', fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.figure(figsize=(8, 6)) # Not shown
plot_roc_curve(fpr, tpr)
plt.plot([fpr_90_tpr, fpr_90_tpr], [0., recall_90_precision], "r:") # Not shown
plt.plot([0.0, fpr_90_tpr], [recall_90_precision, recall_90_precision], "r:") # Not shown
plt.plot([fpr_90_tpr], [recall_90_precision], "ro") # Not shown
save_fig("roc_curve_plot") # Not shown
plt.show()
# -
plt.plot(fpr, tpr)
plt.plot([0,1], [0,1], 'k--')
plt.axis([0,1,0,1])
plt.text(0.1, 0.7, s=f"AUC-ROC: {roc_auc_score(y_train_trans, y_train_pred):.3f}", fontsize=12, color='C2')
plt.grid(True)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC: Gradient Boosting Classifier\n{grid_search_gbc.best_params_}')")
# **boxplot for cv 10 fold split test**
lr_scores = cv_results_lr.filter(regex='split[0-9]+_test_score', axis=1).head(1).values.flatten()
gbc_scores = cv_results_gbc.filter(regex='split[0-9]+_test_score', axis=1).head(1).values.flatten()
plt.figure(figsize=(6, 4))
plt.plot([1]*10, lr_scores, ".")
plt.plot([2]*10, gbc_scores, ".")
plt.boxplot([lr_scores, gbc_scores], labels=("Logistic","GBC"))
plt.ylabel("Accuracy", fontsize=14)
plt.show()
|
exercises/ch03.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 5.3 Whole Slide Scoring - CNN Scores vs. CERAD-like Scores
#
# A two-sided, independent, two-sample t-test was used to test the null hypothesis that two independent samples have identical expected values. CNN-based quantification scores of WSIs from different CERAD categories were used for the test. Data were presented as box plots overlaid with dot plot. Box plot plotted interquartile range (top and bottom of the box), median (the band inside the box), and outliers (points beyond the whiskers).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import stats
from tqdm import tqdm
# -
CSV_PATH = 'data/outputs/CNNscore/CNN_vs_CERAD.csv'
SAVE_DIR = 'data/outputs/CNNscore/'
CERAD_score_names = ['Cored_MTG', 'Diffuse_MTG', 'CAA_MTG']
CNN_score_names = ['CNN_cored_count', 'CNN_diffuse_count', 'CNN_caa_count']
classes = ['cored plaque', 'diffuse plaque', 'CAA']
def get_significance_label(p):
if p > 0.05:
return 'n.s.'
if p > 0.01:
return '*'
if p > 0.001:
return '**'
if p > 0.0001:
return '***'
return '****'
def p_show(p):
for i in range(2,15):
if p > 1/10**i:
return round(p, i+1)
# +
cdict = {'red': ((0.0, 0.0, 1.0),
(1.301/4, 0.7, 0.7),
(1.0, 0/255, 0.0)),
'green': ((0.0, 0.0, 0.0),
(1.301/4, 0.7, 0.7),
(1.0, 100/255, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(1.301/4,0.7,0.7),
(1.0, 1.0, 0.0))
}
colorbar = LinearSegmentedColormap('pvalue', cdict)
flierprops = dict(marker='+', markerfacecolor='red', markersize=12, markeredgecolor='red',
linestyle='none')
# +
cnn_file = pd.read_csv(CSV_PATH)
for name in [0,1,2]:
CERAD_score_name = CERAD_score_names[name]
CNN_score_name = CNN_score_names[name]
data = []
scores = [0,1,2,3]
for score in scores:
count = np.asarray(cnn_file[cnn_file[CERAD_score_name]==score][CNN_score_name]) * 1000
count = count / np.asarray(cnn_file[cnn_file[CERAD_score_name]==score]['Area'])
count.shape = (-1,1)
data.append(count[~np.isnan(count)])
# box plot and dot plot
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(111)
bp = ax.boxplot(data, flierprops=flierprops, showmeans=False)
for i in range(4):
plt.setp(bp['medians'][i], color=(0,0,1), lw=4)
ax.scatter([i+1 for _ in data[i]], data[i], c='k')
# t test - calculate p-values
p_values = np.ones([4,4])
max_value = max([d.max() for d in data])
for x in [1,2,3]:
_, p = stats.ttest_ind(data[x-1], data[x], nan_policy='omit')
p_values[x, x-1] = p
# plot significance label
x1, x2 = x+0.03, x+0.97
y, h, col = max_value*1.1, max_value*0.03, 'k'
text = get_significance_label(p)
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1.5, c=col)
ax.text((x1+x2)*.5, y+h, text, ha='center', va='bottom', color=col, fontsize=25)
for x in [1,2]:
_, p = stats.ttest_ind(data[x-1], data[x+1], nan_policy='omit')
p_values[x+1, x-1] = p
_, p = stats.ttest_ind(data[0], data[3], nan_policy='omit')
p_values[3, 0] = p
ax.set_ylim([-max([d.max() for d in data])/10, max([d.max() for d in data])*1.3])
_ = ax.set_xticklabels(['none', 'sparse', 'moderate', 'frequent'])
ax.set_xlabel('\nCERAD-like categories ({})'.format(classes[name]), fontsize=30)
ax.set_ylabel('CNN-based scores ({})'.format(classes[name]), fontsize=30)
ax.xaxis.set_tick_params(labelsize=30, size=0, width=2)
ax.yaxis.set_tick_params(labelsize=30, size=5, width=2)
fig.savefig(SAVE_DIR+'box_plot_{}_entireset.png'.format(CNN_score_name),
bbox_inches='tight', dpi=300)
plt.pause(0.001)
# plot p-values matric
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
nlogp = -np.log10(p_values[1:, :3]) # only have 3x3 p-values
mask = np.tri(nlogp.shape[0], k=0)
nlogp = np.ma.array(nlogp, mask=1-mask)
im = ax.imshow(nlogp, cmap=colorbar, vmin=0, vmax=4)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cbar = plt.colorbar(im, cax=cax, ticks=[0,1.30103,2,4])
cbar.ax.tick_params(labelsize=30, size=8, width=4)
cbar.ax.set_yticklabels(['1','0.05','0.01','< 1e-4'])
cbar.ax.text(4.2, 0.65, 'p vlaues', rotation=90, fontsize=30)
for i in range(3):
for j in range(0, i+1):
text = ax.text(j, i, p_show(p_values[i+1,j]),
ha="center", va="center", color=(1,1,1), fontsize=30)
ax.set_title('t-test p-values', fontsize=30, y=1.01)
_ = plt.xticks([0,1,2], ['none', 'sparse', 'moderate'], fontsize=30)
_ = plt.yticks([0,1,2], ['sparse', 'moderate', 'frequent'], fontsize=30)
ax.xaxis.set_tick_params(labelsize=30, size=0, width=2)
ax.yaxis.set_tick_params(labelsize=30, size=0, width=2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig.savefig(SAVE_DIR+'p_vaplus_{}_entireset.png'.format(CNN_score_name),
bbox_inches='tight', dpi=300)
# print statistics
print('m, f: ', stats.ttest_ind(data[2], data[3], nan_policy='omit'))
print('s, m: ', stats.ttest_ind(data[1], data[2], nan_policy='omit'))
print('n, s: ', stats.ttest_ind(data[0], data[1], nan_policy='omit'))
print('s, f: ', stats.ttest_ind(data[1], data[3], nan_policy='omit'))
print('n, m: ', stats.ttest_ind(data[0], data[2], nan_policy='omit'))
print('n, f: ', stats.ttest_ind(data[0], data[3], nan_policy='omit'))
|
5.3) Whole Slide Scoring - CNN Score vs. CERAD-like Scores.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### This notebook collects all of the tweets for a given team, imports them into a dataframe, adds a team classification tag, and dumps to a csv.
# +
#bring in the prerequisites
import pandas as pd
import glob
import json
import numpy as np
import pandas as pd
# -
# identify the folder where the files are -- this will be done once per team, aligning the team folder name to the column we will create
files = glob.glob('raw_tweets/pride_phillies/*')
#files = glob.glob('user/*')
len(files)
# +
#make a list to hold the files we are going to read
#then read the files into a dictionary
dictlist = []
for file in files:
json_string = open(file, 'r').read()
json_dict = json.loads(json_string)
dictlist.append(json_dict)
# -
#create a dataframe from the dictionary
df = pd.DataFrame(dictlist)
df.head()
#create the new column and give all rows the same value-- this is how we will keep track of the team names
df['team'] = "phillies"
df.head()
# +
#dump it to a csv so we can use it later
df.to_csv('basic_csv/phillies_tweets.csv', index=True)
# -
|
BasicDataClean.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bayesian-modelling-tutorial
# language: python
# name: bayesian-modelling-tutorial
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# ## Learning Objectives
#
# In this notebook, we're going to get some practice writing data generating processes,
# and calculating joint likelihoods between our data and model,
# using the SciPy statistics library.
# ## Simulating coin flips (again!)
#
# We're going to stick with coin flip simulations, because it's a very good "simplest complex model".
#
# Previously, we constructed coin flips where we knew the parameter $p$ (probability of heads) precisely.
# This time, though, we're going to construct a model of coin flips
# that no longer involves a fixed/known $p$,
# but instead involves a $p$ that is not precisely known.
# ### Protocol
#
# If we have a $p$ that is not precisely known, we can set it up by instantiating a probability distribution for it, rather than a fixed value.
#
# How do we decide what distribution to use?
# Primarily, the criteria that should guide us is the _support_ of the distribution,
# that is, the range of values for which the probability distribution is valid.
#
# $p$ must be a value that is bounded between 0 and 1.
# As such, the choice of probability distribution for $p$ is most intuitively the Beta distribution,
# which provides a probability distribution over the interval $[0, 1]$.
#
# Taking that value drawn from the Beta, we can pass it into the Bernoulli distribution,
# and then draw an outcome (either 1 or 0).
# In doing so, we now have the makings of a __generative model__ for our coin flip data!
# ### Generating in code
#
# Let's see the algorithmic protocol above implemented in code!
# +
from scipy import stats as sts
import numpy as np
def coin_flip_generator() -> np.ndarray:
"""
Coin flip generator for a `p` that is not precisely known.
"""
p = sts.beta(a=10, b=10).rvs(1)
result = sts.bernoulli(p=p).rvs(1)
return result
coin_flip_generator()
# -
# ### Graph form
#
# If we visualize this model in graphical form,
# it would look something like this:
# +
from bayes_tutorial.solutions.simulation import coin_flip_pgm
coin_flip_pgm()
# -
# In this graph, each node is a random variable. For example, `result` is the random variable that models outcomes. It accepts a parameter `p`, which itself is a random variable that does not depend on anything. At the same time, `p` depends on two parameters, $\alpha$ and $\beta$, which are fixed.
#
# The graphical form expresses _conditional dependence_ between random variables, that is to say, `result`'s draws depend on the value of `p` drawn. In math symbols, we would write this joint distribution between `p` and `result` as:
#
# $$P(p, result) = P(result | p)P(p)$$
#
# The `|` tells us that `results` is conditioned on, or depends on, the value of the random variable `p`.
#
# The graphical form is a definitely a simplified view, in that we don't show the exact probability distributions by which each random variable is distributed. That is what can make reading the diagrams a bit confusing at first, though with practice, things get much easier over time.
# ## Prior Information
#
# The astute eyes amongst you will notice
# that the Beta distribution has parameters of its own,
# so how do we instantiate that?
# Well, one thing we can do is bring in some _prior information_ to the problem.
#
# Is our mental model of this coin that it behaves like billions of other coins in circulation,
# in that it will generate outcomes with basically equal probability?
# Turns out, the Beta distribution can assign credibility in this highly opinionated fashion!
# And by doing so, we are injecting _prior information_
# by instantiating a Beta _prior distribution_.
# +
from ipywidgets import FloatSlider, interact, Checkbox
import matplotlib.pyplot as plt
import numpy as np
alpha = FloatSlider(value=2, min=1.0, max=100, step=1, description=r'$\alpha$')
beta = FloatSlider(value=2, min=1.0, max=100, step=1, description=r'$\beta$')
equal = Checkbox(value=False, description=r"set $\beta$ to be equal to $\alpha$")
@interact(alpha=alpha, beta=beta, equal=equal)
def visualize_beta_distribution(alpha, beta, equal):
if equal:
beta = alpha
dist = sts.beta(a=alpha, b=beta)
xs = np.linspace(0, 1, 100)
ys = dist.pdf(xs)
plt.xlabel("Support")
plt.ylabel("Likelihood")
plt.plot(xs, ys)
plt.title(fr"$\alpha$={alpha}, $\beta$={beta}")
# -
# As you play around with the slider, notice how when you increase the $\alpha$ and $\beta$ sliders,
# the width of the probability distribution decreases,
# while the height of the maximum value increases,
# thus reflecting greater _certianty_ in what values for $p$ get drawn.
# Using this _prior distribution_ on $p$, we can express what we think is reasonable
# given _prior knowledge_ of our system.
# ### Justifying priors
#
# Some of you, at this point, might be wondering - is there an algorithmic protocol for justifying our priors too?
# Can we somehow "just pass our priors into a machine and have it tell us if we're right or wrong"?
#
# It's a great wish, but remains just that: wishful thinking.
# Just like the "Aye Eye Drug", one for which a disease is plugged in,
# and the target and molecule are spat out.
# (I also find it to not be an inspiring goal,
# as the fun of discovery is removed.)
#
# Rather, as with all modelling exercises,
# I advocate for human debate about the model.
# After all, humans are the ones taking action based on, and being affected by, the modelling exercise.
# There are a few questions we can ask to help us decide:
#
# - Are the prior assumptions something a _reasonable_ person would make?
# - Is there evidence that lie outside of our problem that can help us justify these priors?
# - Is there a _practical_ difference between two different priors?
# - In the limit of infinite data, do various priors converge? (We will see later how this convergence can happen.)
# ## Exercises
#
# It's time for some exercises to practice what we've learnt!
# ### Exercise: Control prior distribution
#
# In this first exercise, I would like you to modify the `coin_flip_generator` function
# such that it allows a user to control what the prior distribution on $p$ should look like
# before returning outcomes drawn from the Bernoulli.
#
# Be sure to check that the values of `alpha` and `beta` are valid values, i.e. floats greater than zero.
# +
from bayes_tutorial.solutions.simulation import coin_flip_generator_v2
# Your answer below:
# def coin_flip_generator_v2(alpha: float, beta: float) -> np.ndarray:
# pass
# -
# ### Exercise: Simulate data
#
# Now, simulate data generated from your new coin flip generator.
# +
from typing import List
from bayes_tutorial.solutions.simulation import generate_many_coin_flips
# Your answer below:
# def generate_many_coin_flips(n_draws: int, alpha: float, beta: float) -> List[int]:
# pass
generate_many_coin_flips(50, alpha=5, beta=1)
# -
# With that written, we now have a "data generating" function!
# ## Joint likelihood
#
# Remember back in the first notebook how we wrote about evaluating the joint likelihood of multiple coin flip data
# against an assumed Bernoulli model?
#
# We wrote a function that looked something like the following:
#
# ```python
# from scipy import stats as sts
# from typing import List
#
# def likelihood(data: List[int]):
# c = sts.bernoulli(p=0.5)
# return np.product(c.pmf(data))
# ```
#
# Now, if $p$ is something that is not precisely known,
# then any "guesses" of $p$ will have to be subject to the Likelihood principle too,
# which means that we need to jointly evaluate the likelihood of $p$ and our data.
#
# Let's see that in code:
# +
def coin_flip_joint_likelihood(data: List[int], p: float) -> float:
p_like = sts.beta(a=10, b=10).pdf(p) # evaluate guesses of `p` against the prior distribution
data_like = sts.bernoulli(p=p).pmf(data)
return np.product(data_like) * np.product(p_like)
coin_flip_joint_likelihood([1, 1, 0, 1], 0.3)
# -
# ## Joint _log_-likelihood
#
# Because we are dealing with decimal numbers,
# when multiplying them together,
# we might end up with underflow issues.
# As such, we often take the log of the likelihood.
#
# ### Exercise: Implementing joint _log_-likelihood
#
# Doing this means we can use summations on our likelihood calculations,
# rather than products.
#
# Because of the rules of logarithms, what originally was:
#
# $$P(D|p)P(p)$$
#
# becomes:
#
# $$\log(P(D|p)) + \log(P(p))$$
#
# Also, if you think about the joint distribution of data,
# $P(D)$ is actually $P(D_1, D_2, ..., D_n)$ for $n$ data points,
# but because each is independent from one another, the joint distribution of $P(D)$ factorizes out to $P(D_1)P(D_2)...P(D_n)$. Taking the log then allows us to sum up the log of PMFs!
# +
from bayes_tutorial.solutions.simulation import coin_flip_joint_loglike
# Your answer below:
# def coin_flip_joint_loglike(data: List[int], p: float) -> float:
# pass
coin_flip_joint_loglike([1, 1, 0, 1], 0.3)
# -
# ### Exercise: Confirm equality
#
# Now confirm that the joint log-likelihood is of the same value as the log of the joint likelihood,
# subject to machine precision error.
np.log(coin_flip_joint_likelihood([1, 1, 0, 1], 0.3))
# ## Key Idea: Statistical Stories
#
# Before we can go into probabilistic programming,
# one has to know the skill of "telling statistical stories".
#
# In telling statistical stories, we are using probability distributions
# to represent the pieces of our problem that are difficult to precisely know.
# It is because they are difficult to precisely know
# that we use random variables, distributed by some probability distribution,
# as the modelling tool of choice.
# ### Stories of probability distributions
#
# One skill that is necessary in knowing how to choose
# what probability distribution to associate with a random variable
# is to learn their "distribution stories".
#
# Here's an example, taken from [<NAME>' excellent resource][jsbois],
# for the Bernoulli distribution:
#
# > A Bernoulli trial is an experiment that has two outcomes that can be encoded as success ($y=1$) or failure ($y=0$). The result $y$ of a Bernoulli trial is Bernoulli distributed.
#
# [jsbois]: http://bois.caltech.edu/dist_stories/t3b_probability_stories.html
# ### Workflow
#
# A generally usable workflow for telling statistical stories
# is to work backwards from the data.
# Using our evergreen coin flip example, if we start with coin flip-like data,
# and have a hunch that our data are never going to be anything other than 0s and 1s,
# then we might use a Bernoulli to model the data.
# And then as we saw above, if we realize that we can't be precisely sure
# of the value $p$, then we model it using a Beta distribution.
# In many cases, knowing the distribution of $p$ is useful.
#
# One might ask, then, how about the parameters of the Beta distribution?
# Do we have to give _them_ distributions too?
#
# The answer is "usually not", as we consider them "nuisance" parameters:
# parameters that we need to have, but can't take action on even if we know something about them.
# ## Exercises
#
# To help you get familiar with this skill,
# I've designed a number of exercises below that will help you get some practice.
# Be sure to reference the [distribution stories][jsbois]
# for any probability distributions mentioned in here.
#
# [jsbois]: http://bois.caltech.edu/dist_stories/t3b_probability_stories.html
#
# As you embark on the exercises, always remember:
#
# 
# ### Exercise: Simulate the number of car crashes per week at Brigham circle
#
# Brigham circle is a place in Boston near the Longwood Medical Area, and is notorious for car crashes. (I made the car crashes piece up.)
#
# Write down a statistical simulation that generates counts of car crashes per week at Brigham circle.
#
# Some hints that may help:
#
# - Count data are normally distributed by [Poisson][poisson] or [Negative Binomial][negbinom] distributions.
# - If you use the Poisson distribution, then its key parameter, the "rate" parameter, is a positive real number (positive floats). The [exponential distribution][expon] is a good choice.
# - If you use the negative binomial distribution, remember that it takes in one integer and one float parameter.
# - The official answer uses the Poisson distribution, and follows the following graphical form.
#
# [expon]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html
# [poisson]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html#scipy.stats.poisson
# [negbinom]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.nbinom.html#scipy.stats.nbinom
# +
from bayes_tutorial.solutions.simulation import car_crash_pgm
car_crash_pgm()
# -
def car_crash_generator():
"""Generate a "per week" car crash data point"""
rate = sts.expon(0.5).rvs()
crashes = sts.poisson(mu=rate).rvs()
return crashes
# Now, simulate 10 draws from the generator.
[car_crash_generator() for _ in range(10)]
# ### Exercise: Joint log-likelihood function for observed car crashes
#
# Now, write down the joint likelihood function for observed car crashes and its key parameters.
# +
from bayes_tutorial.solutions.simulation import car_crash_loglike
# Uncomment the block below and fill in your answer.
# def car_crash_loglike(rate: float, crashes: List[int]) -> float:
# """Evaluate likelihood of per-week car crash data points."""
#
# your answer goes here
#
# return rate_like + crashes_like
# -
# ### Exercise: Evaluate joint log-likelihood of data and parameter guesses
#
# Now that you have a log likelihood function that was constructed from your priors,
# evaluate guesses of car crash rates against the following data.
#
# To best visualize this, make a plot of log likelihood on the y-axis against rate on the x-axis.
# +
from bayes_tutorial.solutions.simulation import car_crash_data, car_crash_loglike_plot
import matplotlib.pyplot as plt
data = car_crash_data()
# Comment out the next line before filling in your answer
car_crash_loglike_plot();
# Your answer goes below:
# -
# ### Bonus exercise
#
# As a bonus exercise, add a few more data points.
# ### Exercise: Simulate the heights of men in North and South Korea
#
# It is well-known that there is a height difference between adult men in North and South Korea,
# due to differences in nutrition (direct cause) resulting from government (mis-)management.
#
# Write two functions that simulates the data generating process for observed human male height in North and South Korea.
# Assume that South Korean men are somewhere in the vicinity of 180 cm on average,
# while North Korean mean are somwhere in the vicinity of 165 cm on average,
# but that this is not precisely known.
#
# Some guides to help:
#
# - Name the two functions `s_korea_generator()` and `n_korea_generator()`.
# - For height, a [Gaussian distribution][gaussian] is a _good enough_ model, even though strictly speaking it is positive-bound.
# - We should operate in the centimeter scale, as this scale places us in the hundreds range, which makes things easier to reason about.
# - Because the spread of heights might not be precisely known, we can model this uncertainty by placing an [exponential distribution][expon] over it, because scale parameters are positive-only distributed.
# - Assume that the mean height and the variance of the height distribution cannot be precisely known, which means you have to place a probability distribution over those parameters too.
#
# [gaussian]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html#scipy.stats.norm
# [expon]: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html#scipy.stats.expon
#
# The graphical form would look like this:
# +
from bayes_tutorial.solutions.simulation import korea_pgm
korea_pgm()
# +
from bayes_tutorial.solutions.simulation import s_korea_generator, n_korea_generator
# Your answer goes here.
# def s_korea_generator():
# pass
# def n_korea_generator():
# pass
# -
s_korea_generator()
n_korea_generator()
# You might notice that the two are of the same structure, so you can probably merge them into one function:
# +
def korea_height_generator(mean_loc: float, mean_scale: float, scale_scale: float) -> float:
mean = sts.norm(loc=mean_loc, scale=mean_scale).rvs()
scale = sts.expon(scale=scale_scale).rvs()
height = sts.norm(loc=mean, scale=scale).rvs()
return height
n_korea_height = korea_height_generator(mean_loc=165, mean_scale=3, scale_scale=1)
s_korea_height = korea_height_generator(mean_loc=180, mean_scale=3, scale_scale=1)
n_korea_height, s_korea_height
# -
# ### Exercise: Joint log-likelihood of heights
#
# Similar to the exercise above, calcualte the joint log-likelihood of heights with possible values of mean and scale evaluated against the prior distributions stated.
#
# To be a bit more precise, create one log-likelihood function for South Korean heights and one for North Korean heights, and then one for their combined joint likelihood.
# +
from bayes_tutorial.solutions.simulation import s_korea_height_loglike, n_korea_height_loglike, joint_height_loglike
# Your answer for South Korean log likelihoods here
# def s_korea_height_loglike(mean: float, scale: float, heights: List[int]) -> float:
# pass
# Your answer for North Korean log likelihoods here
# def n_korea_height_loglike(mean: float, scale: float, heights: List[int]) -> float:
# pass
# Your answer for the combined joint likelihood of South and North Korean heights
# def joint_height_loglike(s_mean: float, s_scale: float, n_mean: float, n_scale: float, s_heights: List[int], n_heights: List[int]) -> float:
# pass
# -
# ### Exercise: Evaluate log-likelihood of true parameter guesses
#
# Now that you've got a log likelihood function written down,
# evaluate some guesses as to what the best "mean" and "scale" values are,
# given the data
# and the priors that you specified in your log likelihood.
from bayes_tutorial.solutions.simulation import s_korea_height_data, n_korea_height_data
s_korea_heights = s_korea_height_data()
n_korea_heights = n_korea_height_data()
# +
s_mean = FloatSlider(min=150, max=190, value=155, step=1)
s_scale = FloatSlider(min=0.1, max=10, value=2, step=0.1)
n_mean = FloatSlider(min=150, max=190, value=155, step=1)
n_scale = FloatSlider(min=0.1, max=10, value=2, step=0.1)
@interact(s_mean=s_mean, s_scale=s_scale, n_mean=n_mean, n_scale=n_scale)
def evaluate_joint_likelihood(s_mean: float, s_scale: float, n_mean: float, n_scale: float) -> float:
return joint_height_loglike(s_mean, s_scale, n_mean, n_scale, s_korea_heights, n_korea_heights)
# -
# ## Visualizing the full uncertainty
#
# Exciting stuff ahead! Notice how it's super troublesome to manually slide sliders all over the place.
# Well, we're going to attempt to solve that by using Monte Carlo simulation!
# +
# Firstly, draw numbers uniformly in the regime of 130-210 for heights, and 1-6 for scales.
def draw():
s_mean, n_mean = sts.uniform(130, 80).rvs(2) # bounds are 150-190, rtfd
s_scale, n_scale = sts.uniform(1, 5).rvs(2) # bounds are 2-8, rtfd
return (s_mean, s_scale, n_mean, n_scale)
# Then, set up 2000 draws
params = np.array([draw() for _ in range(2000)])
# -
# Now, we evaluate the log-likelihood.
loglikes = []
for param_set in params:
loglikes.append(evaluate_joint_likelihood(*param_set))
loglikes = np.array(loglikes)
# +
import pandas as pd
param_df = pd.DataFrame(params)
loglike_df = pd.DataFrame(loglikes)
plotting_df = pd.concat([param_df, loglike_df], axis=1)
plotting_df.columns = ["s_mean", "s_scale", "n_mean", "n_scale", "loglike"]
plotting_df.head()
# +
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
s_mean = params[:, 0]
s_scale = params[:, 1]
n_mean = params[:, 2]
n_scale = params[:, 3]
alpha=1
axes[0, 0].hexbin(s_mean, s_scale, C=loglikes, alpha=alpha)
axes[0, 0].set_xlabel("South Korea Mean")
axes[0, 0].set_ylabel("South Korea Scale")
axes[0, 1].hexbin(s_mean, n_mean, C=loglikes, alpha=alpha)
axes[0, 1].set_xlabel("South Korea Mean")
axes[0, 1].set_ylabel("North Korea Mean")
axes[0, 2].hexbin(s_mean, n_scale, C=loglikes, alpha=alpha)
axes[0, 2].set_xlabel("South Korea Mean")
axes[0, 2].set_ylabel("North Korea Scale")
axes[1, 0].hexbin(s_scale, n_mean, C=loglikes, alpha=alpha)
axes[1, 0].set_xlabel("South Korea Scale")
axes[1, 0].set_ylabel("North Korea Mean")
axes[1, 1].hexbin(s_scale, n_scale, C=loglikes, alpha=alpha)
axes[1, 1].set_xlabel("South Korea Scale")
axes[1, 1].set_ylabel("North Korea Scale")
axes[1, 2].hexbin(n_mean, n_scale, C=loglikes, alpha=alpha)
axes[1, 2].set_xlabel("North Korea Mean")
axes[1, 2].set_ylabel("North Korea Scale")
plt.tight_layout()
# -
# ### Exercise: What are _plausible_ values?
#
# Given the chart that you see above,
# what are the plausible values of the mean and scale parameters?
# ## Inference: Figuring out plausible values
#
# Now that you've seen how to use the `scipy.stats` module to write
# data-generating stories and simulate data,
# in the next notebook, we are going to use PyMC3
# to help us with the inferential protocol,
# i.e. inferring the most credible values of key model parameters, given the data.
# Hop over to the next chapter to learn about the Inference Button (tm)!
# ## Solutions
#
# Here are the solutions to the chapter.
# +
from bayes_tutorial.solutions import simulation
# simulation??
|
bayesian-stats-modelling-tutorial/docs/basics/simulation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
print(tf.__version__)
# +
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, LeakyReLU, Dropout, BatchNormalization,Flatten,Conv2D,Reshape,MaxPool2D, Conv2DTranspose,BatchNormalization
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.applications import VGG16
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys, os
import random
import cv2
pathname="../input/vggface-using-tripletloss/celebs/celebs"
dirList=os.listdir(pathname)
ln=len(dirList)
# + jupyter={"source_hidden": true}
# Load in the data
#mnist = tf.keras.datasets.mnist
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
# map inputs to (-1, +1) for better training
#x_train, x_test = x_train / 255.0 * 2 - 1, x_test / 255.0 * 2 - 1
#print("x_train.shape:", x_train.shape)
#print("x_test.shape:", x_test.shape)
# +
#x_train=np.reshape(x_train,(60000,28,28,1))
#x_test=np.reshape(x_test,(10000,28,28,1))
# -
# Get the generator model
def build_generator():
generator=Sequential()
generator.add(Conv2DTranspose(512, (4, 4),input_shape=(28,28,512), padding="same",activation=LeakyReLU()))
generator.add(Conv2DTranspose(512, (4, 4),input_shape=(28,28,512), padding="same",activation=LeakyReLU()))
#generator.add(BatchNormalization())
#generator.add(Conv2DTranspose(512, (6, 6), strides=2, padding="same", activation=LeakyReLU()))
#generator.add(Conv2DTranspose(512, (6, 6), padding="same", activation=LeakyReLU()))
generator.add(BatchNormalization())
generator.add(Conv2DTranspose(256, (8, 8), strides=2, padding="same", activation=LeakyReLU()))
generator.add(Conv2DTranspose(256, (8, 8), padding="same", activation=LeakyReLU()))
generator.add(BatchNormalization())
generator.add(Conv2DTranspose(128, (10, 10), strides=2, padding="same", activation=LeakyReLU()))
generator.add(Conv2DTranspose(128, (10, 10), padding="same", activation=LeakyReLU()))
generator.add(BatchNormalization())
generator.add(Conv2DTranspose(64, (12, 12), strides=2, padding="same", activation=LeakyReLU()))
generator.add(Conv2DTranspose(filters=64, kernel_size=(12,12), padding="same", activation=LeakyReLU()))
generator.add(Conv2DTranspose(filters=64, kernel_size=(12,12), padding="same", activation=LeakyReLU()))
generator.add(BatchNormalization())
generator.add(Conv2DTranspose(filters=3,kernel_size=(12, 12), padding="same",activation=LeakyReLU()))
generator.add(Conv2DTranspose(filters=3,kernel_size=(12, 12), padding="same",activation=LeakyReLU()))
generator.add(Conv2DTranspose(filters=3,kernel_size=(12, 12), padding="same",activation=LeakyReLU()))
#generator.summary()
return generator
# +
decoder = Sequential()
decoder.add(Conv2D(filters=3,kernel_size=(12,12),input_shape=(224,224,3),padding="same", activation=LeakyReLU()))
decoder.add(Conv2D(filters=3,kernel_size=(12,12),padding="same", activation=LeakyReLU()))
decoder.add(BatchNormalization())
decoder.add(Conv2D(filters=64,kernel_size=(12,12),padding="same", activation=LeakyReLU()))
decoder.add(Conv2D(filters=64,kernel_size=(12,12),padding="same", activation=LeakyReLU()))
decoder.add(BatchNormalization())
decoder.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
decoder.add(Conv2D(filters=128, kernel_size=(10,10), padding="same", activation=LeakyReLU()))
decoder.add(Conv2D(filters=128, kernel_size=(10,10), padding="same", activation=LeakyReLU()))
decoder.add(BatchNormalization())
decoder.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
decoder.add(Conv2D(filters=256, kernel_size=(8,8), padding="same", activation=LeakyReLU()))
decoder.add(Conv2D(filters=256, kernel_size=(8,8), padding="same", activation=LeakyReLU()))
decoder.add(BatchNormalization())
decoder.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
decoder.add(Conv2D(filters=512, kernel_size=(6,6), padding="same", activation=LeakyReLU()))
decoder.add(Conv2D(filters=512, kernel_size=(6,6), padding="same", activation=LeakyReLU()))
#decoder.add(BatchNormalization())
#decoder.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
#decoder.add(Conv2D(filters=512, kernel_size=(4,4), padding="same", activation=LeakyReLU()))
#decoder.add(Conv2D(filters=512, kernel_size=(4,4), padding="same", activation=LeakyReLU()))
decoder.add(BatchNormalization())
decoder.summary()
# -
discriminator=Sequential()
discriminator.add(VGG16(include_top=False,input_shape=(224,224,3)))
discriminator.add(Flatten())
discriminator.add(Dense(1,activation="sigmoid"))
discriminator.summary()
# +
# Compile both models in preparation for training
# Build and compile the discriminator
discriminator.compile(
loss='binary_crossentropy',
optimizer=Adam(0.0002, 0.5),
metrics=['accuracy'])
# Build and compile the combined model
generator = build_generator()
# Create an input to represent noise sample from latent space
#z = Input(shape=(latent_dim,))
# Pass noise through generator to get an image
#img = generator(z)
# Make sure only the generator is trained
discriminator.trainable = False
autoencoder=Sequential()
autoencoder.add(decoder)
autoencoder.add(generator)
print("Here")
autoencoder.summary()
# The true output is fake, but we label them real!
fake_pred = Sequential()
fake_pred.add(autoencoder)
fake_pred.add(discriminator)
fake_pred.summary()
# Create the combined model object
# Compile the combined model
fake_pred.compile(loss='binary_crossentropy', optimizer=Adam(0.0002, 0.5))
# +
# Train the GAN
# Config
batch_size = 64
batch=64
epochs = 30000
sample_period = 200 # every `sample_period` steps generate and save some data
# Create batch labels to use when calling train_on_batch
ones = np.ones(batch_size)
zeros = np.zeros(batch_size)
# Store the losses
d_losses = []
g_losses = []
# Create a folder to store generated images
if not os.path.exists('gan_images'):
os.makedirs('gan_images')
# -
def randImagePicker(batch):
img=[]
for x in range(batch):
pi=random.randint(0,ln-1)
pl=os.listdir(pathname+"/"+dirList[pi])
#print(pl)
pii=random.randint(0,len(pl)-1)
faceImg1=cv2.imread(pathname+"/"+dirList[pi]+"/"+pl[pii])
faceImg1=cv2.resize(faceImg1, (224, 224) ,interpolation = cv2.INTER_NEAREST)
img.append(faceImg1)
return(np.array(img))
# A function to generate a grid of random samples from the generator
# and save them to a file
def sample_images(epoch):
rows, cols = 5, 5
real_imgs=randImagePicker(rows*cols)
imgs = autoencoder.predict(real_imgs)
fig, axs = plt.subplots(rows, cols)
idx = 0
for i in range(rows):
for j in range(cols):
axs[i,j].imshow(imgs[idx])
axs[i,j].axis('off')
idx += 1
fig.savefig("%d.png" % epoch)
plt.show()
plt.close()
# +
# Main training loop
for epoch in range(epochs):
###########################
### Train discriminator ###
###########################
###########################
### Getting Images. ###
###########################
real_imgs=randImagePicker(batch_size)
#print(real_imgs.shape)
fake_imgs=autoencoder.predict(real_imgs)
#print(fake_imgs.shape)
#for i in fake_imgs:
# plt.imshow(i)
# plt.show()
# Train the discriminator
# both loss and accuracy are returned
d_loss_real, d_acc_real = discriminator.train_on_batch(real_imgs, ones)
d_loss_fake, d_acc_fake = discriminator.train_on_batch(fake_imgs, zeros)
d_loss = 0.5 * (d_loss_real + d_loss_fake)
d_acc = 0.5 * (d_acc_real + d_acc_fake)
#######################
### Train generator ###
#######################
g_loss = fake_pred.train_on_batch(real_imgs, ones)
real_imgs=randImagePicker(batch_size)
g_loss = fake_pred.train_on_batch(real_imgs, ones)
# Save the losses
d_losses.append(d_loss)
g_losses.append(g_loss)
if epoch % 100 == 0:
print(f"epoch: {epoch+1}/{epochs}, d_loss: {d_loss:.2f}, \
d_acc: {d_acc:.2f}, g_loss: {g_loss:.2f}")
if epoch % sample_period == 0:
sample_images(epoch)
# -
plt.plot(g_losses, label='g_losses')
plt.plot(d_losses, label='d_losses')
plt.legend()
###Always save what you did forgot few timess!!!!!!!!!!
decoder.save('models/decoder')
generator.save('model/generator')
discriminator.save('model/discriminator')
|
AFG V3 (GAN)/Artificial-face-gan.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Creating custom colormaps in matplotlib using cmap_builder
#
# In this example, we will show how to use the `cmap_builder` package to create
# complex colormaps and colorbars in matplotlib.
#
# This tutorial covers the following topics:
#
# ### Table of contents
#
# - [How to use cmap_builder](#How-to-use-cmap_builder?)
# - [Create non-uniform discrete colormaps](#Create-non-uniform-discrete-colormaps)
# - [Create non-uniform discrete colormaps with equally spaced color segments](#Create-non-uniform-discrete-colormaps-with-equally-spaced-color-segments)
# - [Create linearly varying and non-uniform colormaps](#Create-linearly-varying-and-non-uniform-colormaps)
# - [Why use uniform spacing in continuous colorbars?](#Why-use-uniform-spacing-in-continuous-colorbars?)
# - [Create descriptive colormaps](#Create-descriptive-colormaps)
#
# But, first, let us begin plotting all the named colors available in matplotlib.
# A helper function is included `cmap_builder.utils.plot_colortable` to quickly make this plot.
# + tags=[]
# %matplotlib inline
from cmap_builder.utils import plot_colortable
plot_colortable()
# -
# ## How to use cmap_builder?
#
# Custom colormaps can be built using the `cmap_builder.build_cmap()` function.
#
# Let's see the function's help:
# +
from cmap_builder import build_cmap
help(build_cmap)
# -
# Hence, in a nutshell, the first step to creating colormaps is specifying our colormap definition
# ```python
# cmap_def = [
# (x0, color_0, [next_color_0]) # next_color_0 ignored if provided.
# (x1, color_1, [next_color_1])
# ...
# (xi, color_i, [next_color_i])
# ..
# (xn, color_n, [next_color_n]) # next_color_n is ignored if provided.
# ]
# ```
#
# Since this type of definition is very versatile, we will use it to create all sorts of colormaps.
#
# ## Create non-uniform discrete colormaps
#
# Now, let's start building a simple colormap that has 5 discrete colors segments *of different sizes*.
#
# We start first by defining the colormap as a (value, color) sequence, indicating the color
# at each data value. This is one of the supported colormap definitions supported by the `cmap_builder`, but not the only one! (we will see other types of definitions later on).
#
# Although the matplotlib's colormaps map values in the (0,1) interval to colors,
# the `cmap_builder` colormap definition supports using any units and intervals.
# The colormap normalization is done internally by the `build_cmap` function.
# Defining the colormaps in the same data units allows one to easily create colormaps that fit particular datasets.
#
# Without further ado, let's create the colormap.
# First, a helper function to visualize the colormap
def show_cmap(cmap, norm, xticks):
import numpy as np
from matplotlib import pyplot as plt
import matplotlib_inline.backend_inline
import numpy as np
matplotlib_inline.backend_inline.set_matplotlib_formats("svg")
plt.rc("font", family="serif", size=12)
gradient_1d = np.linspace(norm.vmin, norm.vmax, 700)
gradient = np.vstack((gradient_1d, gradient_1d))
figh = 1
fig, ax = plt.subplots(nrows=1, figsize=(6.4, figh), dpi=300)
fig.patch.set_facecolor("white") # Add white background to figure
fig.subplots_adjust(top=1 - 0.35 / figh, bottom=0.15 / figh, left=0.2, right=0.99)
ax.set_title(cmap.name, fontsize=14)
X, Y = np.meshgrid(gradient_1d, [0, 1])
ax.pcolormesh(X, Y, gradient, cmap=cmap, norm=norm, rasterized=True)
ax.set_yticks([])
ax.set_xticks(xticks)
# Let's first specify the colormap definition.
# Any named color supported by matplotlib can be used to define the colormap.
# **IMPORTANT** (r,g,b) values or hex colors are not supported yet.
# +
from cmap_builder import build_cmap
cmap_def = [
# (value, color)
(0, "red"),
(2, "blue"),
(4, "green"),
(8, "yellow"),
(9, "purple"),
(10, "purple"),
# The last repeated color is used to indicate the
# end of the discrete colormapping.
]
# Using the definition above, with `discrete=True`, we have a colormap that
# maps the following values to colors:
# [0-2) -> red
# [2-4) -> blue
# [4-8) -> green
# [8-9) -> yellow
# [9-10) -> purple
my_cmap, my_ticks, my_norm = build_cmap(
"non_uniform_discrete_cmap", # Name of the colormap
cmap_def,
discrete=True, # Return a discrete colormap.
N=700, # color palette quantization levels.
)
show_cmap(my_cmap, my_norm, my_ticks)
# -
# The `build_cmap` function returns three objects:
# - cmap: The colormap
# - ticks: The data values corresponding to color segments definitions of the colormap.
# - norm: The norm used to normalize the data into the [0,1] interval used by the color plotting functions.
#
# Let's now use this colormap to create a simple plot.
# Helper function to quickly make plots.
# We will use the same function for all the examples.
def make_plot(
cmap,
ticks,
norm,
title=None,
clabel="My colorbar",
xlabel="",
ylabel="",
return_axes=False,
):
# Make plots look pretty in the jupyter lab
import matplotlib.ticker as ticker
import matplotlib_inline.backend_inline
import numpy as np
from matplotlib import pyplot as plt
matplotlib_inline.backend_inline.set_matplotlib_formats("svg")
plt.rc("font", family="serif", size=12)
N = 1000
X, Y = np.mgrid[-2 : 2 : complex(0, N), -2 : 2 : complex(0, N)]
Z1 = np.exp(-(X ** 2) - Y ** 2)
Z = Z1 * 10
fig = plt.figure(figsize=(5, 4), dpi=300)
fig.patch.set_facecolor("white") # Add white background to figure
ax = plt.gca()
pcm = ax.pcolormesh(X, Y, Z, cmap=cmap, shading="auto", norm=norm, rasterized=True)
cb = fig.colorbar(pcm, ticks=ticks)
cb.set_label(clabel, labelpad=-2)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_title(title, pad=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_aspect("equal")
ax.axhline(0, linewidth=1.0, color="white", linestyle="--")
ax.axvline(0, linewidth=1.0, color="white", linestyle="--")
if return_axes:
return ax, cb
else:
plt.show()
make_plot(my_cmap, my_ticks, norm=my_norm, title="Non-uniform discrete colormap")
# ## Create non-uniform discrete colormaps with equally spaced color segments
#
# Intencionally, we defined uneven color segments in the colormap.
# That does not look very nice in the colorbar.
#
# To have equally spaced elements in the colorbar, we can recreate the colormap with the "uniform=True" option.
# +
my_cmap, my_ticks, my_norm = build_cmap(
"non_uniform_discrete_cmap", # Name of the colormap
cmap_def,
uniform=True, # Uniform spacing for each color segment
discrete=True, # Return a discrete colormap
N=700, # color palette quantization levels.
)
make_plot(
my_cmap,
my_ticks,
norm=my_norm,
title="Non-uniform discrete colormap\n(uniform spacing)",
)
# -
# ## Create linearly varying and non-uniform colormaps
#
# Now, let's use the colormap builder to create the same colormaps as before but using a continuous color transition.
# We use the same colormap definition as before, but this time we pass `discrete=False`.
# +
cmap_def = [
# (value, color)
(0, "red"),
(2, "blue"),
(4, "green"),
(8, "yellow"),
(9, "purple"),
(10, "indigo"),
# The last repeated color is used to indicate the
# end of the discrete colormapping.
]
# Using the definition above, with `discrete=False`, we have a colormap that
# maps the following values to colors:
# [0-2) -> varies from red to blue
# [2-4) -> varies from blue to green
# [4-8) -> varies from green to yellow
# [8-9) -> varies from yellow to purple
# [9-10) -> varies from purple to indigo
my_cmap, my_ticks, my_norm = build_cmap(
"non_uniform_continuous_cmap", # Name of the colormap
cmap_def,
uniform=True, # Uniform spacing for each color segment
discrete=False, # Return a discrete colormap
N=700, # color palette quantization levels.
)
show_cmap(my_cmap, my_norm, my_ticks)
# -
make_plot(
my_cmap,
my_ticks,
norm=my_norm,
title="Non-uniform continous colormap\n(uniform spacing)",
)
# ## Why use uniform spacing in continuous colorbars?
#
# Using uniform spacing for the colormap segments gives additional control to
# the level of details of the plots.
# To better explain this point, let's give a hypothetical meaning to the plots that we showed before.
# Let's say we want to plot the "happiness" of a hummingbird as a function of the distance to the flower.
# Then, the x- and the y-axes denote the distance to the flower in meters, and the value of the function we plot is a measure of happiness. The happiness can be interpreted as the hummingbird being:
#
# - \[0,2): Desperate!
# - \[2, 4\]: Worried.
# - \[4-8\]: A little worried.
# - \[8-9\]: Happy.
# - \[9-10\]: Extremely happy
#
#
# Then, each of the above intervals has a particular (totally made up) meaning.
# Although any colormap (and their colorbar) can display the hummingbird mood as the function of distance, the boundaries where there is a transition of "happiness" are not clear. We can, of course, add contours to clearly denote the boundaries.
#
# However, in the next section, we will see an alternative way to quickly see the type of mood of the little bird using more descriptive colormaps.
# ## Create descriptive colormaps
#
# The previous examples showed how the `cmap_builder` library can create simple colormaps.
# Now, let's make an awesome and very descriptive colorbar.
#
# For that, we will create a colormap with the following properties:
#
# - Each different mood category is denoted by a different color.
# - Within each category, the values vary from light to dark colors according to the mood intensity.
#
# Therefore, let's build a colormap like this:
#
# - \[0,2): Desperate!. Varying from dark to light green.
# - \[2, 4\]: Worried. Varying from dark to light purple.
# - \[4-8\]: A little worried. Varying from dark to light orange.
# - \[8-9\]: Happy. Varying from dark to light yellow.
# - \[9-10\]: Extremely happy. Varying from dark to light green.
#
# Note that the mood intensity increases with decreasing values of our happiness measure of the happiness.
#
# So, let's begin by specifying the colormap definition.
cmap_def = [
# (value, color)
(0, "red_dark"),
(2, "red_light", "orange_light"),
(4, "orange_dark", "blue_light"),
(8, "blue_dark", "purple_light"),
(9, "purple_dark", "green_light"),
(10, "green_dark"),
# The last repeated color is used to indicate the
# end of the discrete colormapping.
]
# Did you note that we used named colors with the "\_light" and "\_dark" suffixes?
#
# These are called "color modifiers" and it is one of the features included in the `cmap_builder` library.
# Internally, the color names are parsed by the `cmap_builder.utils.rgb_from_name()` function and supports any of the following color modifiers:
#
# The color name supports **at most one** of the following color modifiers suffixes:
#
# - "\_l#": HLS lightness (L) modifier (0 to 100 range).
# For example, "yellow\_l75" changes the lightness to 0.75.
# - "\_s#": HSV saturation (S) modifier (0 to 100 range).
# For example, "yellow\_s75" changes the saturation to 0.75.
# - "\_v#": HSV value (V, or brightness) modifier (0 to 100 range).
# For example, "yellow\_v75" changes the brightness (value) to 0.75.
# - "\_light": Make color lighter. Same as the "\_l10" modifier.
# - "\_dark": Make color darker. Same as the "\_l80" modifier.
#
# Now, let's build the colormap using the definition specified before.
# +
my_cmap, my_ticks, my_norm = build_cmap(
"hummingbird_happyness", # Name of the colormap
cmap_def,
uniform=True,
N=700, # color palette quantization levels.
)
show_cmap(my_cmap, my_norm, my_ticks)
# + tags=[]
make_plot(
my_cmap,
my_ticks,
norm=my_norm,
title="Hummingbird happyness",
xlabel="x distance [m]",
ylabel="y distance[m]",
clabel="Hummingbird mood",
)
# -
# We can go one step further and replace the tick labels in the colorbar with different moods.
# + tags=["nbsphinx-thumbnail"]
ax, cbar = make_plot(
my_cmap,
my_ticks,
norm=my_norm,
title="Hummingbird happyness",
xlabel="x distance [m]",
ylabel="y distance[m]",
clabel="",
return_axes=True,
)
mid_point_ticks = [1, 3, 6, 8.5, 9.5]
mood_labels = ["Desperate!", "Worried", "A little worried", "Happy", "Extremely happy"]
_ = cbar.ax.set_yticks(mid_point_ticks)
_ = cbar.ax.set_yticklabels(mood_labels)
# -
# The previous plot describes the evolution of the little bird's mood as it approaches a flower.
# We can see, for example, where the mood transition takes place and how the intensity of the mood changes with distance.
# # The end
|
examples/1-Create_a_custom_colormap.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Refer360
# language: python
# name: refer360test
# ---
# +
import numpy as np
import json
from pprint import pprint
from collections import defaultdict
import operator
import matplotlib.pyplot as plt; plt.rcdefaults()
import seaborn as sns
import sys
import cv2
from scipy.spatial import Delaunay
import os
from tqdm import tqdm
sys.path.append('../src')
from panoramic_camera import PanoramicCamera as camera
from utils import rad2degree
# -
def imshow(img):
import cv2
import IPython
_,ret = cv2.imencode('.jpg', img)
i = IPython.display.Image(data=ret)
#IPython.display.display(i)
def generate_grid(full_w=4552,
full_h=2276,
degree = 15):
left_w = int(full_w * (degree/360)+1)
dx = full_w * (degree/360)
dy = full_h * (degree/180)
DISTANCE = (dx ** 2 + dy ** 2) ** 0.5 + 10
print('left_w',left_w)
print('dx',dx)
print('dy',dy)
print('distance',DISTANCE)
font = cv2.FONT_HERSHEY_SIMPLEX
size = 10
objects = []
nodes = []
slng, slat = rad2degree(np.random.uniform(
0, 6), np.random.uniform(1, 1.5), adjust=True)
sx = int(full_w * ((slng + 180)/360.0))
sy = int(full_h - full_h *
((slat + 90)/180.0))
# objects.append((slat, slng, -1, sx, sy, [sx-1, sy-1, sx+1, sy+1]))
# nodes.append([sx, sy])
for lat in range(-75, 75, degree):
for lng in range(0, 360, degree):
gt_x = int(full_w * ((lng)/360.0))
gt_y = int(full_h - full_h * ((lat + 90)/180.0))
#canvas[gt_y-size:gt_y+size,gt_x-size:gt_x+size] = 1.0
objects.append((lng, lat, 2, gt_x, gt_y, []))
nodes.append([gt_x, gt_y])
canvas = np.zeros((full_h, full_w, 3), dtype='uint8')
clr = (255, 0, 0)
node_dict = dict()
for kk, o in enumerate(objects):
o_type, ox, oy = o[2], o[3], o[4]
o_label = '<START>'
if o_type > 0:
o_label = ''
#cv2.putText(canvas, o_label, (ox+size, oy+size), font, 3, clr, 5)
n = {
'id': kk,
'lng': o[0],
'lat': o[1],
'obj_label': o_label,
'obj_id': o_type,
'x': o[3],
'y': o[4],
'boxes': o[5],
'neighbors': []
}
node_dict[kk] = n
color = (125, 125, 125)
n_nodes = len(nodes)
order2nid = {i: i for i in range(n_nodes)}
idx = n_nodes
new_nodes = nodes
for ii, n in enumerate(nodes):
if n[0] < left_w:
order2nid[idx] = ii
new_nodes.append((n[0]+full_w, n[1]))
idx += 1
for ii,s1 in enumerate(new_nodes):
for jj, s2 in enumerate(new_nodes):
if ii == jj:
continue
d = ((s1[0]-s2[0])**2 + (s1[1]-s2[1])**2)**0.5
if d <= DISTANCE:
n0 = order2nid[ii]
n1 = order2nid[jj]
node_dict[n0]['neighbors'] += [n1]
node_dict[n1]['neighbors'] += [n0]
cv2.line(canvas, (s1[0], s1[1]),
(s2[0], s2[1]), color, 3, 8)
for kk, o in enumerate(objects):
o_type, ox, oy = o[2], o[3], o[4]
canvas[oy-size:oy+size, ox-size:ox+size, 0] = 255.
canvas[oy-size:oy+size, ox-size:ox+size, 1:] = 0
return node_dict, canvas
# +
node_dict, canvas = generate_grid(degree = 20)
fig, ax = plt.subplots(figsize=(16,8))
sns.set_style("white")
sns.set_style({'font.family': 'sans-serif',
'font.sans-serif': 'Ubuntu',
'font_scale' : 5})
imgplot = plt.imshow(canvas)
# -
ncount = defaultdict(int)
for n in node_dict:
neighbors = len(set(node_dict[n]['neighbors']))
ncount[neighbors]+=1
print(ncount)
|
notebooks/VisualizeGrid.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Map function in python
def even_odd(num):
if num%2==0:
return True
else:
return False
even_odd(5)
def even_odd1(num):
if num%2==0:
return " The number {} is Even".format(num)
else:
return " The number {} is Odd".format(num)
even_odd1(1236)
lst = [1,2,3,4,5,6,7,8,9,123,12343,235,5432]
map(even_odd1,lst)
# +
# lazy loading technique, memory hasn't been instantiated
# +
# we use list to instantiated
# -
list(map(even_odd1, lst))
list(map(even_odd, lst))
def even_odd2(num):
if num%2==0:
return "The given number {} is Even".format(num)
else:
return "The given number {} is Odd".format(num)
lst=[2,34,53,234,53,4553,2345,567,778,76554,1234]
even_odd2(156)
list(map(even_odd2,lst))
# ### Filter function in python
def even(i):
if i%2 == 0:
return True
lst=[1,2,3,4,5,6,7,8,9]
filter(even,lst)
list(filter(even,lst))
# +
# which ever its returning True will show (it filters the value)
# -
list(filter(lambda num: num%2==0, lst))
list(map(lambda num: num%2==0, lst))
# +
# here map will return both true and false
# -
|
pypractise12.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from kafka import KafkaConsumer, KafkaProducer
servers = '192.168.99.100:9092'
producer = KafkaProducer(bootstrap_servers=servers)
producer.config
for i in range(100):
producer.send('test', key=b'foo', value=b'bar')
producer.close()
consumer = KafkaConsumer('feed_items', bootstrap_servers=servers)
for msg in consumer:
print(msg)
|
notebooks/kafka-consumer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
df = pd.read_csv("grocery_data.csv")
data = list(df["products"].apply(lambda x:x.split(',')))
data
# +
te = TransactionEncoder()
te_data = te.fit(data).transform(data).astype("int")
df = pd.DataFrame(te_data,columns=te.columns_)
df
# df.to_csv("transformed_data.csv", encoding='utf-8', index=False)
# +
apriori_data = apriori(df,min_support=0.01,use_colnames=True)
apriori_data.sort_values(by="support",ascending=False)
apriori_data['length'] = apriori_data['itemsets'].apply(lambda x:len(x))
apriori_data
# -
apriori_data[(apriori_data['length']==2) & (apriori_data['support']>=0.05)]
|
machine_learning/Associative Mining/Association.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# | | final valid | average |
# | -------- | ------------------ | ------------------ |
# | baseline | 0.905679012345679 | 0.5134979423868313 |
# | ewc | 0.9202469135802469 | 0.5199725651577504 |
|
playground.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 100 pandas puzzles
#
# Inspired by [100 Numpy exerises](https://github.com/rougier/numpy-100), here are 100* short puzzles for testing your knowledge of [pandas'](http://pandas.pydata.org/) power.
#
# Since pandas is a large library with many different specialist features and functions, these excercises focus mainly on the fundamentals of manipulating data (indexing, grouping, aggregating, cleaning), making use of the core DataFrame and Series objects.
#
# Many of the excerises here are stright-forward in that the solutions require no more than a few lines of code (in pandas or NumPy... don't go using pure Python or Cython!). Choosing the right methods and following best practices is the underlying goal.
#
# The exercises are loosely divided in sections. Each section has a difficulty rating; these ratings are subjective, of course, but should be a seen as a rough guide as to how inventive the required solution is.
#
# If you're just starting out with pandas and you are looking for some other resources, the official documentation is very extensive. In particular, some good places get a broader overview of pandas are...
#
# - [10 minutes to pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html)
# - [pandas basics](http://pandas.pydata.org/pandas-docs/stable/basics.html)
# - [tutorials](http://pandas.pydata.org/pandas-docs/stable/tutorials.html)
# - [cookbook and idioms](http://pandas.pydata.org/pandas-docs/stable/cookbook.html#cookbook)
#
# Enjoy the puzzles!
#
# \* *the list of exercises is not yet complete! Pull requests or suggestions for additional exercises, corrections and improvements are welcomed.*
# ## Importing pandas
#
# ### Getting started and checking your pandas setup
#
# Difficulty: *easy*
#
# **1.** Import pandas under the alias `pd`.
import pandas as pd
# **2.** Print the version of pandas that has been imported.
pd.__version__
# **3.** Print out all the *version* information of the libraries that are required by the pandas library.
pd.show_versions()
# ## DataFrame basics
#
# ### A few of the fundamental routines for selecting, sorting, adding and aggregating data in DataFrames
#
# Difficulty: *easy*
#
# Note: remember to import numpy using:
# ```python
# import numpy as np
# ```
#
# Consider the following Python dictionary `data` and Python list `labels`:
#
# ``` python
# data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
# 'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
# 'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
# 'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}
#
# labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
# ```
# (This is just some meaningless data I made up with the theme of animals and trips to a vet.)
#
# **4.** Create a DataFrame `df` from this dictionary `data` which has the index `labels`.
# +
import numpy as np
data = {'animal': ['cat', 'cat', 'snake', 'dog', 'dog', 'cat', 'snake', 'cat', 'dog', 'dog'],
'age': [2.5, 3, 0.5, np.nan, 5, 2, 4.5, np.nan, 7, 3],
'visits': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'priority': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(data=data, index=labels)
# -
# **5.** Display a summary of the basic information about this DataFrame and its data (*hint: there is a single method that can be called on the DataFrame*).
df.info()
# **6.** Return the first 3 rows of the DataFrame `df`.
df.head(3)
# **7.** Select just the 'animal' and 'age' columns from the DataFrame `df`.
df[['animal','age']]
# **8.** Select the data in rows `[3, 4, 8]` *and* in columns `['animal', 'age']`.
df.loc[df.index[[3,4,8]], ['animal', 'age']]
# **9.** Select only the rows where the number of visits is greater than 3.
df[df['age'] > 3]
# **10.** Select the rows where the age is missing, i.e. it is `NaN`.
df[df['age'].isnull()]
# **11.** Select the rows where the animal is a cat *and* the age is less than 3.
df[(df['animal'] == 'cat') & df['age'] < 3]
# **12.** Select the rows the age is between 2 and 4 (inclusive).
df[(df['age'] >= 2) & (df['age'] <= 4)]
# **13.** Change the age in row 'f' to 1.5.
df['age'] = df['age'].replace(df.loc['f', 'age'], 1.5)
df['age']
# **14.** Calculate the sum of all visits in `df` (i.e. find the total number of visits).
df['visits'].sum()
# **15.** Calculate the mean age for each different animal in `df`.
df.groupby('animal')['age'].mean()
# **16.** Append a new row 'k' to `df` with your choice of values for each column. Then delete that row to return the original DataFrame.
df.loc['k'] = [6, 'frog', 'yes', 3]
df = df.drop('k')
# **17.** Count the number of each type of animal in `df`.
df['animal'].value_counts()
# **18.** Sort `df` first by the values in the 'age' in *decending* order, then by the value in the 'visits' column in *ascending* order (so row `i` should be first, and row `d` should be last).
df.sort_values(by=['age', 'visits'], ascending=[False, True])
df
# **19.** The 'priority' column contains the values 'yes' and 'no'. Replace this column with a column of boolean values: 'yes' should be `True` and 'no' should be `False`.
df = df.replace(['yes', 'no'], [True, False])
df
# **20.** In the 'animal' column, change the 'snake' entries to 'python'.
df = df.replace('snake', 'python')
df
# **21.** For each animal type and each number of visits, find the mean age. In other words, each row is an animal, each column is a number of visits and the values are the mean ages (*hint: use a pivot table*).
table = pd.pivot_table(df, index='animal', columns='visits', values='age', aggfunc=np.mean)
table
# ## DataFrames: beyond the basics
#
# ### Slightly trickier: you may need to combine two or more methods to get the right answer
#
# Difficulty: *medium*
#
# The previous section was tour through some basic but essential DataFrame operations. Below are some ways that you might need to cut your data, but for which there is no single "out of the box" method.
# **22.** You have a DataFrame `df` with a column 'A' of integers. For example:
# ```python
# df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]})
# ```
#
# How do you filter out rows which contain the same integer as the row immediately above?
#
# You should be left with a column containing the following values:
#
# ```python
# 1, 2, 3, 4, 5, 6, 7
# ```
df = pd.DataFrame({'A': [1, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7]})
df.drop_duplicates(subset='A')
# **23.** Given a DataFrame of numeric values, say
# ```python
# df = pd.DataFrame(np.random.random(size=(5, 3))) # a 5x3 frame of float values
# ```
#
# how do you subtract the row mean from each element in the row?
df = pd.DataFrame(np.random.random(size=(5, 3)))
df.sub(df.mean(axis=1), axis='index')
# **24.** Suppose you have DataFrame with 10 columns of real numbers, for example:
#
# ```python
# df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))
# ```
# Which column of numbers has the smallest sum? Return that column's label.
df = pd.DataFrame(np.random.random(size=(5, 10)), columns=list('abcdefghij'))
df.sum(axis='index').sort_values(ascending=False).head(1).index
# **25.** How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)? As input, use a DataFrame of zeros and ones with 10 rows and 3 columns.
#
# ```python
# df = pd.DataFrame(np.random.randint(0, 2, size=(10, 3)))
# ```
df = pd.DataFrame(np.random.randint(0, 2, size=(10, 3)))
len(df.duplicated(keep=False))
# The next three puzzles are slightly harder.
#
#
# **26.** In the cell below, you have a DataFrame `df` that consists of 10 columns of floating-point numbers. Exactly 5 entries in each row are NaN values.
#
# For each row of the DataFrame, find the *column* which contains the *third* NaN value.
#
# You should return a Series of column labels: `e, c, d, h, d`
# +
nan = np.nan
data = [[0.04, nan, nan, 0.25, nan, 0.43, 0.71, 0.51, nan, nan],
[ nan, nan, nan, 0.04, 0.76, nan, nan, 0.67, 0.76, 0.16],
[ nan, nan, 0.5 , nan, 0.31, 0.4 , nan, nan, 0.24, 0.01],
[0.49, nan, nan, 0.62, 0.73, 0.26, 0.85, nan, nan, nan],
[ nan, nan, 0.41, nan, 0.05, nan, 0.61, nan, 0.48, 0.68]]
columns = list('abcdefghij')
df = pd.DataFrame(data, columns=columns)
(df.isnull().cumsum(axis=1) == 3).idxmax(1)
# -
# **27.** A DataFrame has a column of groups 'grps' and and column of integer values 'vals':
#
# ```python
# df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'),
# 'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})
# ```
# For each *group*, find the sum of the three greatest values. You should end up with the answer as follows:
# ```
# grps
# a 409
# b 156
# c 345
# ```
df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'),
'vals': [12,345,3,1,45,14,4,52,54,23,235,21,57,3,87]})
df.groupby('grps')['vals'].nlargest(n=3).sum(level=0)
# **28.** The DataFrame `df` constructed below has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive).
#
# For each group of 10 consecutive integers in 'A' (i.e. `(0, 10]`, `(10, 20]`, ...), calculate the sum of the corresponding values in column 'B'.
#
# The answer should be a Series as follows:
#
# ```
# A
# (0, 10] 635
# (10, 20] 360
# (20, 30] 315
# (30, 40] 306
# (40, 50] 750
# (50, 60] 284
# (60, 70] 424
# (70, 80] 526
# (80, 90] 835
# (90, 100] 852
# ```
df = pd.DataFrame(np.random.RandomState(8765).randint(1, 101, size=(100, 2)), columns = ["A", "B"])
df.groupby(pd.cut(x=df['A'], bins=np.arange(start=0, stop=101, step=10)))['B'].sum()
# ## DataFrames: harder problems
#
# ### These might require a bit of thinking outside the box...
#
# ...but all are solvable using just the usual pandas/NumPy methods (and so avoid using explicit `for` loops).
#
# Difficulty: *hard*
# **29.** Consider a DataFrame `df` where there is an integer column 'X':
# ```python
# df = pd.DataFrame({'X': [7, 2, 0, 3, 4, 2, 5, 0, 3, 4]})
# ```
# For each value, count the difference back to the previous zero (or the start of the Series, whichever is closer). These values should therefore be
#
# ```
# [1, 2, 0, 1, 2, 3, 4, 0, 1, 2]
# ```
#
# Make this a new column 'Y'.
# + jupyter={"outputs_hidden": true}
# -
# **30.** Consider the DataFrame constructed below which contains rows and columns of numerical data.
#
# Create a list of the column-row index locations of the 3 largest values in this DataFrame. In this case, the answer should be:
# ```
# [(5, 7), (6, 4), (2, 5)]
# ```
# + jupyter={"outputs_hidden": true}
df = pd.DataFrame(np.random.RandomState(30).randint(1, 101, size=(8, 8)))
# -
# **31.** You are given the DataFrame below with a column of group IDs, 'grps', and a column of corresponding integer values, 'vals'.
#
# ```python
# df = pd.DataFrame({"vals": np.random.RandomState(31).randint(-30, 30, size=15),
# "grps": np.random.RandomState(31).choice(["A", "B"], 15)})
# ```
#
# Create a new column 'patched_values' which contains the same values as the 'vals' any negative values in 'vals' with the group mean:
#
# ```
# vals grps patched_vals
# 0 -12 A 13.6
# 1 -7 B 28.0
# 2 -14 A 13.6
# 3 4 A 4.0
# 4 -7 A 13.6
# 5 28 B 28.0
# 6 -2 A 13.6
# 7 -1 A 13.6
# 8 8 A 8.0
# 9 -2 B 28.0
# 10 28 A 28.0
# 11 12 A 12.0
# 12 16 A 16.0
# 13 -24 A 13.6
# 14 -12 A 13.6
# ```
# + jupyter={"outputs_hidden": true}
# -
# **32.** Implement a rolling mean over groups with window size 3, which ignores NaN value. For example consider the following DataFrame:
#
# ```python
# >>> df = pd.DataFrame({'group': list('aabbabbbabab'),
# 'value': [1, 2, 3, np.nan, 2, 3, np.nan, 1, 7, 3, np.nan, 8]})
# >>> df
# group value
# 0 a 1.0
# 1 a 2.0
# 2 b 3.0
# 3 b NaN
# 4 a 2.0
# 5 b 3.0
# 6 b NaN
# 7 b 1.0
# 8 a 7.0
# 9 b 3.0
# 10 a NaN
# 11 b 8.0
# ```
# The goal is to compute the Series:
#
# ```
# 0 1.000000
# 1 1.500000
# 2 3.000000
# 3 3.000000
# 4 1.666667
# 5 3.000000
# 6 3.000000
# 7 2.000000
# 8 3.666667
# 9 2.000000
# 10 4.500000
# 11 4.000000
# ```
# E.g. the first window of size three for group 'b' has values 3.0, NaN and 3.0 and occurs at row index 5. Instead of being NaN the value in the new column at this row index should be 3.0 (just the two non-NaN values are used to compute the mean (3+3)/2)
# + jupyter={"outputs_hidden": true}
# -
# ## Series and DatetimeIndex
#
# ### Exercises for creating and manipulating Series with datetime data
#
# Difficulty: *easy/medium*
#
# pandas is fantastic for working with dates and times. These puzzles explore some of this functionality.
#
# **33.** Create a DatetimeIndex that contains each business day of 2015 and use it to index a Series of random numbers. Let's call this Series `s`.
dti = pd.date_range(start='1/1/2015', end='12/31/2015', freq='B')
s = pd.Series(np.random.rand(len(dti)), index=dti)
s
# **34.** Find the sum of the values in `s` for every Wednesday.
s[s.index.weekday ==2].sum()
# **35.** For each calendar month in `s`, find the mean of values.
s.resample(rule='M').mean()
# **36.** For each group of four consecutive calendar months in `s`, find the date on which the highest value occurred.
s.groupby(pd.Grouper(freq='4M')).idxmax()
# **37.** Create a DateTimeIndex consisting of the third Thursday in each month for the years 2015 and 2016.
dti = pd.date_range(start='1/1/2015', end='12/31/2016', freq='WOM-3THU')
dti
# ## Cleaning Data
#
# ### Making a DataFrame easier to work with
#
# Difficulty: *easy/medium*
#
# It happens all the time: someone gives you data containing malformed strings, Python, lists and missing data. How do you tidy it up so you can get on with the analysis?
#
# Take this monstrosity as the DataFrame to use in the following puzzles:
#
# ```python
# df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm',
# 'Budapest_PaRis', 'Brussels_londOn'],
# 'FlightNumber': [10045, np.nan, 10065, np.nan, 10085],
# 'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]],
# 'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )',
# '12. Air France', '"Swiss Air"']})
# ```
# Formatted, it looks like this:
#
# ```
# From_To FlightNumber RecentDelays Airline
# 0 LoNDon_paris 10045.0 [23, 47] KLM(!)
# 1 MAdrid_miLAN NaN [] <Air France> (12)
# 2 londON_StockhOlm 10065.0 [24, 43, 87] (British Airways. )
# 3 Budapest_PaRis NaN [13] 12. Air France
# 4 Brussels_londOn 10085.0 [67, 32] "Swiss Air"
# ```
#
#
# (It's some flight data I made up; it's not meant to be accurate in any way.)
#
# **38.** Some values in the the **FlightNumber** column are missing (they are `NaN`). These numbers are meant to increase by 10 with each row so 10055 and 10075 need to be put in place. Modify `df` to fill in these missing numbers and make the column an integer column (instead of a float column).
df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm',
'Budapest_PaRis', 'Brussels_londOn'],
'FlightNumber': [10045, np.nan, 10065, np.nan, 10085],
'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]],
'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )',
'12. Air France', '"Swiss Air"']})
df['FlightNumber'] = df['FlightNumber'].interpolate().astype(dtype=int)
df
# **39.** The **From\_To** column would be better as two separate columns! Split each string on the underscore delimiter `_` to give a new temporary DataFrame called 'temp' with the correct values. Assign the correct column names 'From' and 'To' to this temporary DataFrame.
temp = df['From_To'].str.split('_', expand=True)
temp.columns = ['From', 'To']
temp
# **40.** Notice how the capitalisation of the city names is all mixed up in this temporary DataFrame 'temp'. Standardise the strings so that only the first letter is uppercase (e.g. "londON" should become "London".)
temp['From'] = temp['From'].str.capitalize()
temp['To'] = temp['To'].str.capitalize()
temp
# **41.** Delete the **From_To** column from `df` and attach the temporary DataFrame 'temp' from the previous questions.
df = df.drop(labels='From_To', axis=1)
df = df.join(temp)
df
# **42**. In the **Airline** column, you can see some extra puctuation and symbols have appeared around the airline names. Pull out just the airline name. E.g. `'(British Airways. )'` should become `'British Airways'`.
df['Airline'] = df['Airline'].str.replace('[^\w\s]','')
df['Airline'] = df['Airline'].str.replace('\d+','')
df
# **43**. In the RecentDelays column, the values have been entered into the DataFrame as a list. We would like each first value in its own column, each second value in its own column, and so on. If there isn't an Nth value, the value should be NaN.
#
# Expand the Series of lists into a DataFrame named `delays`, rename the columns `delay_1`, `delay_2`, etc. and replace the unwanted RecentDelays column in `df` with `delays`.
delays = pd.DataFrame(df['RecentDelays'].to_list())
delays.columns = ['delay_1', 'delay_2', 'delay_3']
delays
df = df.drop(labels='RecentDelays', axis=1)
df = df.join(delays)
df
# The DataFrame should look much better now.
# ```
# FlightNumber Airline From To delay_1 delay_2 delay_3
# 0 10045 KLM London Paris 23.0 47.0 NaN
# 1 10055 Air France Madrid Milan NaN NaN NaN
# 2 10065 British Airways London Stockholm 24.0 43.0 87.0
# 3 10075 Air France Budapest Paris 13.0 NaN NaN
# 4 10085 Swiss Air Brussels London 67.0 32.0 NaN
# ```
# ## Using MultiIndexes
#
# ### Go beyond flat DataFrames with additional index levels
#
# Difficulty: *medium*
#
# Previous exercises have seen us analysing data from DataFrames equipped with a single index level. However, pandas also gives you the possibilty of indexing your data using *multiple* levels. This is very much like adding new dimensions to a Series or a DataFrame. For example, a Series is 1D, but by using a MultiIndex with 2 levels we gain of much the same functionality as a 2D DataFrame.
#
# The set of puzzles below explores how you might use multiple index levels to enhance data analysis.
#
# To warm up, we'll look make a Series with two index levels.
# **44**. Given the lists `letters = ['A', 'B', 'C']` and `numbers = list(range(10))`, construct a MultiIndex object from the product of the two lists. Use it to index a Series of random numbers. Call this Series `s`.
letters = ['A', 'B', 'C']
numbers = list(range(10))
index = pd.MultiIndex.from_product(iterables=[letters, numbers], names=['letter', 'number'])
s = pd.Series(np.random.rand(30), index=index)
s
# **45.** Check the index of `s` is lexicographically sorted (this is a necessary proprty for indexing to work correctly with a MultiIndex).
s.index.is_lexsorted()
# **46**. Select the labels `1`, `3` and `6` from the second level of the MultiIndexed Series.
s.loc[['A', 'B', 'C'], [1,3,6]]
# **47**. Slice the Series `s`; slice up to label 'B' for the first level and from label 5 onwards for the second level.
s.loc[slice('A','B'), slice(5,None)]
# **48**. Sum the values in `s` for each label in the first level (you should have Series giving you a total for labels A, B and C).
s.sum(level=0)
# **49**. Suppose that `sum()` (and other methods) did not accept a `level` keyword argument. How else could you perform the equivalent of `s.sum(level=1)`?
s.groupby(level=1).sum()
# **50**. Exchange the levels of the MultiIndex so we have an index of the form (letters, numbers). Is this new Series properly lexsorted? If not, sort it.
s_swapped = s.swaplevel(0,1)
if not s_swapped.index.is_lexsorted():
s_swapped = s_swapped.sort_index()
s_swapped
# ## Minesweeper
#
# ### Generate the numbers for safe squares in a Minesweeper grid
#
# Difficulty: *medium* to *hard*
#
# If you've ever used an older version of Windows, there's a good chance you've played with Minesweeper:
# - https://en.wikipedia.org/wiki/Minesweeper_(video_game)
#
#
# If you're not familiar with the game, imagine a grid of squares: some of these squares conceal a mine. If you click on a mine, you lose instantly. If you click on a safe square, you reveal a number telling you how many mines are found in the squares that are immediately adjacent. The aim of the game is to uncover all squares in the grid that do not contain a mine.
#
# In this section, we'll make a DataFrame that contains the necessary data for a game of Minesweeper: coordinates of the squares, whether the square contains a mine and the number of mines found on adjacent squares.
# **51**. Let's suppose we're playing Minesweeper on a 5 by 4 grid, i.e.
# ```
# X = 5
# Y = 4
# ```
# To begin, generate a DataFrame `df` with two columns, `'x'` and `'y'` containing every coordinate for this grid. That is, the DataFrame should start:
# ```
# x y
# 0 0 0
# 1 0 1
# 2 0 2
# ```
# + jupyter={"outputs_hidden": true}
# -
# **52**. For this DataFrame `df`, create a new column of zeros (safe) and ones (mine). The probability of a mine occuring at each location should be 0.4.
# + jupyter={"outputs_hidden": true}
# -
# **53**. Now create a new column for this DataFrame called `'adjacent'`. This column should contain the number of mines found on adjacent squares in the grid.
#
# (E.g. for the first row, which is the entry for the coordinate `(0, 0)`, count how many mines are found on the coordinates `(0, 1)`, `(1, 0)` and `(1, 1)`.)
# + jupyter={"outputs_hidden": true}
# -
# **54**. For rows of the DataFrame that contain a mine, set the value in the `'adjacent'` column to NaN.
# + jupyter={"outputs_hidden": true}
# -
# **55**. Finally, convert the DataFrame to grid of the adjacent mine counts: columns are the `x` coordinate, rows are the `y` coordinate.
# + jupyter={"outputs_hidden": true}
# -
# ## Plotting
#
# ### Visualize trends and patterns in data
#
# Difficulty: *medium*
#
# To really get a good understanding of the data contained in your DataFrame, it is often essential to create plots: if you're lucky, trends and anomalies will jump right out at you. This functionality is baked into pandas and the puzzles below explore some of what's possible with the library.
#
# **56.** Pandas is highly integrated with the plotting library matplotlib, and makes plotting DataFrames very user-friendly! Plotting in a notebook environment usually makes use of the following boilerplate:
#
# ```python
# import matplotlib.pyplot as plt
# # %matplotlib inline
# plt.style.use('ggplot')
# ```
#
# matplotlib is the plotting library which pandas' plotting functionality is built upon, and it is usually aliased to ```plt```.
#
# ```%matplotlib inline``` tells the notebook to show plots inline, instead of creating them in a separate window.
#
# ```plt.style.use('ggplot')``` is a style theme that most people find agreeable, based upon the styling of R's ggplot package.
#
# For starters, make a scatter plot of this random data, but use black X's instead of the default markers.
#
# ```df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]})```
#
# Consult the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html) if you get stuck!
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('ggplot')
df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]})
df.plot(x='xs', y='ys',kind="scatter", marker='x', color='black')
# **57.** Columns in your DataFrame can also be used to modify colors and sizes. Bill has been keeping track of his performance at work over time, as well as how good he was feeling that day, and whether he had a cup of coffee in the morning. Make a plot which incorporates all four features of this DataFrame.
#
# (Hint: If you're having trouble seeing the plot, try multiplying the Series which you choose to represent size by 10 or more)
#
# *The chart doesn't have to be pretty: this isn't a course in data viz!*
#
# ```
# df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9],
# "hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2],
# "happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3],
# "caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]})
# ```
df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9],
"hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2],
"happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3],
"caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]})
df.plot()
df.plot.scatter("hours_in", "productivity", s = df.happiness * 30, c = df.caffienated)
# **58.** What if we want to plot multiple things? Pandas allows you to pass in a matplotlib *Axis* object for plots, and plots will also return an Axis object.
#
# Make a bar plot of monthly revenue with a line plot of monthly advertising spending (numbers in millions)
#
# ```
# df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52],
# "advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9],
# "month":range(12)
# })
# ```
df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52],
"advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9],
"month":range(12)
})
bar = df.plot(kind='bar', x='month', y='revenue', color='green')
df.plot(x="month", y="advertising", kind='line', secondary_y = True, ax = bar)
bar.set_xlim((-1,12)) # this is in solutions - what does it do?
# Now we're finally ready to create a candlestick chart, which is a very common tool used to analyze stock price data. A candlestick chart shows the opening, closing, highest, and lowest price for a stock during a time window. The color of the "candle" (the thick part of the bar) is green if the stock closed above its opening price, or red if below.
#
# 
#
# This was initially designed to be a pandas plotting challenge, but it just so happens that this type of plot is just not feasible using pandas' methods. If you are unfamiliar with matplotlib, we have provided a function that will plot the chart for you so long as you can use pandas to get the data into the correct format.
#
# Your first step should be to get the data in the correct format using pandas' time-series grouping function. We would like each candle to represent an hour's worth of data. You can write your own aggregation function which returns the open/high/low/close, but pandas has a built-in which also does this.
# The below cell contains helper functions. Call ```day_stock_data()``` to generate a DataFrame containing the prices a hypothetical stock sold for, and the time the sale occurred. Call ```plot_candlestick(df)``` on your properly aggregated and formatted stock data to print the candlestick chart.
# + jupyter={"outputs_hidden": true}
import numpy as np
def float_to_time(x):
return str(int(x)) + ":" + str(int(x%1 * 60)).zfill(2) + ":" + str(int(x*60 % 1 * 60)).zfill(2)
def day_stock_data():
#NYSE is open from 9:30 to 4:00
time = 9.5
price = 100
results = [(float_to_time(time), price)]
while time < 16:
elapsed = np.random.exponential(.001)
time += elapsed
if time > 16:
break
price_diff = np.random.uniform(.999, 1.001)
price *= price_diff
results.append((float_to_time(time), price))
df = pd.DataFrame(results, columns = ['time','price'])
df.time = pd.to_datetime(df.time)
return df
#Don't read me unless you get stuck!
def plot_candlestick(agg):
"""
agg is a DataFrame which has a DatetimeIndex and five columns: ["open","high","low","close","color"]
"""
fig, ax = plt.subplots()
for time in agg.index:
ax.plot([time.hour] * 2, agg.loc[time, ["high","low"]].values, color = "black")
ax.plot([time.hour] * 2, agg.loc[time, ["open","close"]].values, color = agg.loc[time, "color"], linewidth = 10)
ax.set_xlim((8,16))
ax.set_ylabel("Price")
ax.set_xlabel("Hour")
ax.set_title("OHLC of Stock Value During Trading Day")
plt.show()
# -
# **59.** Generate a day's worth of random stock data, and aggregate / reformat it so that it has hourly summaries of the opening, highest, lowest, and closing prices
# + jupyter={"outputs_hidden": true}
# -
# **60.** Now that you have your properly-formatted data, try to plot it yourself as a candlestick chart. Use the ```plot_candlestick(df)``` function above, or matplotlib's [```plot``` documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html) if you get stuck.
# + jupyter={"outputs_hidden": true}
# -
# *More exercises to follow soon...*
|
100-pandas-puzzles.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
single_tuple_error = (0)
print(single_tuple_error)
print(type(single_tuple_error))
single_tuple = (0, )
print(single_tuple)
print(type(single_tuple))
# +
# print((0, 1, 2) + (3))
# TypeError: can only concatenate tuple (not "int") to tuple
# -
print((0, 1, 2) + (3, ))
t = 0, 1, 2
print(t)
print(type(t))
t_ = 0,
print(t_)
print(type(t_))
empty_tuple = ()
print(empty_tuple)
print(type(empty_tuple))
# +
# empty_tuple_error =
# SyntaxError: invalid syntax
# +
# empty_tuple_error = ,
# SyntaxError: invalid syntax
# +
# empty_tuple_error = (,)
# SyntaxError: invalid syntax
# -
empty_tuple = tuple()
print(empty_tuple)
print(type(empty_tuple))
def example(a, b):
print(a, type(a))
print(b, type(b))
example(0, 1)
# +
# example((0, 1))
# TypeError: example() missing 1 required positional argument: 'b'
# -
example((0, 1), 2)
example(*(0, 1))
|
notebook/tuple_single_empty.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mlcpuv1]
# language: python
# name: conda-env-mlcpuv1-py
# ---
# <font color=gray>Oracle Cloud Infrastructure Data Science Sample Notebook
#
# Copyright (c) 2021 Oracle, Inc. All rights reserved. <br>
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
# </font>
# # Deploying a PyTorch Model with Model Deployment
#
# In this tutorial we are going to prepare and save a pytorch model artifact using ADS, we are going to publish a conda environment, and deploy the model as an HTTP endpoint.
# ## Pre-requisites to Running this Notebook
# * We recommend that you run this notebook in a notebook session using the **Data Science Conda Environment "General Machine Learning for CPU (v1.0)"**
# * You need access to the public internet
# * **You need to upgrade the current version of the OCI Python SDK** (`oci`)
# * You need to install the `transformers` library
# !pip install --upgrade oci
# !pip install transformers
# +
import oci
import ads
import json
import logging
import os
import tempfile
import warnings
from os import path
import numpy as np
import pandas as pd
import time
from ads.common.model_export_util import prepare_generic_model
from transformers import AutoTokenizer, AutoModelForSequenceClassification
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)
warnings.filterwarnings('ignore')
ads.set_documentation_mode(False)
# -
# Here we download a pre-trained bert model:
# Download pretrained model
pretrained_model_name = "lannelin/bert-imdb-1hidden"
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name)
# Since we installed `transformers` in our conda environment, let's first publish the environment before saving the model to the catalog. We will need the same environment (with `transformers`) for model deployment.
#
# You can publish an environment by first initializing `odsc conda` with the namespace of your tenancy and the object storage bucket name where you want to store the conda environment. Then execute the `odsc conda publish` command in the terminal to copy the environment in the bucket. This command can take a few minutes to complete:
# !odsc conda init -b <your-bucket-name> -n <your-tenancy-namespace> # replace with your values.
# !odsc conda publish -s mlcpuv1 # change this value if you are running this notebook in a different conda environment.
# Also make sure that you write a policy allowing model deployment to read objects in your bucket:
#
# ```
# Allow any-user to read objects in compartment <your-compartment-name>
# where ALL { request.principal.type='datasciencemodeldeployment',
# target.bucket.name=<your-bucket-name> }
# ```
# Once we are done publishing the environment, we need to provide a reference of its path on object storage. The path of a published conda environment should be passsed to the parameter `inference_conda_env`.
#
# If you don't know how to find the path of your environment on object storage, simply go back to the "Environment Explorer" tool in the notebook session. Click on "Published Environments". The path is written on each environment card (`Object Storage URI`)
# +
# Specify the inference conda environment.
inference_conda_env = "<your-conda-env-object-storage-path>" # replace with your value.
# Prepare the model artifact template
path_to_model_artifacts = "pytorch_artifacts"
model_artifact = prepare_generic_model(path_to_model_artifacts,
function_artifacts=False,
force_overwrite=True,
data_science_env=False,
inference_conda_env=inference_conda_env)
model.save_pretrained(path_to_model_artifacts)
tokenizer.save_pretrained(path_to_model_artifacts)
# List the template files
print("Model Artifact Path: {}\n\nModel Artifact Files:".format(
path_to_model_artifacts))
for file in os.listdir(path_to_model_artifacts):
if path.isdir(path.join(path_to_model_artifacts, file)):
for file2 in os.listdir(path.join(path_to_model_artifacts, file)):
print(path.join(file, file2))
else:
print(file)
# +
# %%capture
score = '''
import json
import os
from functools import lru_cache
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "pytorch_model.bin"
tokenize_name = 'vocab'
@lru_cache(maxsize=10)
def load_model(model_file_name=model_name):
"""
Loads model from the serialized format
Returns
-------
model: a model instance on which predict API can be invoked
"""
model_dir = os.path.dirname(os.path.realpath(__file__))
contents = os.listdir(model_dir)
if model_file_name in contents:
model = AutoModelForSequenceClassification.from_pretrained(model_dir)
return model
else:
raise Exception('{0} is not found in model directory {1}'.format(model_file_name, model_dir))
def predict(data, model=load_model()):
"""
Returns prediction given the model and data to predict
Parameters
----------
model: Model instance returned by load_model API
data: Data format as expected by the predict API of the core estimator. For eg. in case of sckit models it could be numpy array/List of list/Panda DataFrame
Returns
-------
predictions: Output from scoring server
Format: {'prediction':output from model.predict method}
"""
tokenizer_dir = os.path.dirname(os.path.realpath(__file__))
contents = os.listdir(tokenizer_dir)
LABELS = ["negative", "positive"]
if tokenize_name + '.json' in contents:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)
outputs = []
for text in data:
inputs = tokenizer.encode_plus(text, return_tensors='pt')
output = model(**inputs)[0].squeeze().detach().numpy()
outputs.append(LABELS[(output.argmax())])
return {'prediction': outputs}
'''
with open(path.join(path_to_model_artifacts, "score.py"), 'w') as f:
print(f.write(score))
# +
project_id = os.environ['PROJECT_OCID']
compartment_id = os.environ['NB_SESSION_COMPARTMENT_OCID']
mc_model = model_artifact.save(
project_id=project_id, compartment_id=compartment_id,
display_name="pytorch_model (Model Deployment Test)",
description="A sample bert pretrained model",
ignore_pending_changes=True, timeout=6000)
# -
# Print published model information
mc_model
# ## Deploying the model with Model Deployment
#
# We are ready to deploy `mc_model`. We are using the user principal (config+key) method of authentication. Alternatively you can use resource principal.
# Getting OCI config information
oci_config = oci.config.from_file("~/.oci/config", "DEFAULT")
# Setting up DataScience instance
data_science = oci.data_science.DataScienceClient(oci_config)
# Setting up data science composite client to unlock wait_for_state operations
data_science_composite = oci.data_science.DataScienceClientCompositeOperations(data_science)
# The model deployment configuration object:
# Prepareing model deployment data
model_deployment_details = {
"displayName": "Pytorch model test",
"projectId": mc_model.project_id,
"compartmentId": mc_model.compartment_id,
"modelDeploymentConfigurationDetails": {
"deploymentType": "SINGLE_MODEL",
"modelConfigurationDetails": {
"modelId": mc_model.id,
"instanceConfiguration": {
"instanceShapeName": "VM.Standard2.4"
},
"scalingPolicy": {
"policyType": "FIXED_SIZE",
"instanceCount": 2
},
"bandwidthMbps": 10
}
},
"categoryLogDetails": None
}
# We are now ready to deploy. This takes a few minutes to complete.
# +
# %%time
model_deployment = data_science_composite.create_model_deployment_and_wait_for_state(model_deployment_details,
wait_for_states=["SUCCEEDED",
"FAILED"])
# -
# This cell extract from the `model_deployment` object a series of useful diagnostics about the creation of the model deployment resource:
# +
print("Grabbing the model deployment ocid...")
model_deployment_data = json.loads(str(model_deployment.data))
model_deployment_id = model_deployment_data['resources'][0]['identifier']
print(f"Model deployment ocid: {model_deployment_id}")
print("Checking for the correct response status code...")
if model_deployment.status == 200:
print(f"Work request status code returned: {model_deployment.status}")
print("Checking for non-empty response data...")
if model_deployment.data:
print(f"Data returned: {model_deployment.data}")
print("Grabbing the model deployment work request status...")
work_request_status = model_deployment_data['status']
print("Checking for the correct work request status...")
if work_request_status == "SUCCEEDED":
print(f"Work request status returned: {work_request_status}")
else:
print(
f"Work request returned an incorrect status of: {work_request_status}")
print(
f"Work requests error: {data_science.list_work_request_errors(model_deployment.data.id).data}")
print(
f"opc-request-id: {model_deployment.headers['opc-request-id']}")
else:
print("Failed to grab model deployment data.")
print(f"opc-request-id: {model_deployment.headers['opc-request-id']}")
else:
print(
f"Model deployment returned an incorrect status of: { model_deployment.status}")
print(f"opc-request-id: {model_deployment.headers['opc-request-id']}")
|
model_catalog_examples/notebook_examples/pytorch_pretrained.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
np.set_printoptions(precision=2)
# **1**.
#
# A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
#
# Find the largest palindrome made from the product of two 3-digit numbers.
#
# (Source: Project Euler Problem 4)
res = []
for i in range(999, 1, -1):
for j in range(999, 99, -1):
n = i*j
if str(i*j) == str(i*j)[::-1]:
res.append(n)
max(res)
# **2**.
#
# Construc the following matrix
#
# ```python
# array([[18, 13, 10, 9, 10, 13, 18],
# [13, 8, 5, 4, 5, 8, 13],
# [10, 5, 2, 1, 2, 5, 10],
# [ 9, 4, 1, 0, 1, 4, 9],
# [10, 5, 2, 1, 2, 5, 10],
# [13, 8, 5, 4, 5, 8, 13],
# [18, 13, 10, 9, 10, 13, 18]])
# ```
np.fromfunction(lambda i,j: (i-3)**2 + (j-3)**2, [7,7]).astype('int')
# **3**.
#
# Using numpy and string manipulation, create a DNA string with 150 bases that have the following probabilities: (A, 0.5), (C, 0.2), (T, 0.2), (G, 0.1). Find the most common triplet of bases in among sliding windows of this DMA string. For example, a sliding window of size 3 for GATTACA would be GAT, ATT, TTA, TAC, ...
dna = np.random.choice(list('ACTG'), size=150, p=[0.5,0.2,0.2,0.1])
triplets = list(map(lambda x: ''.join(x), np.c_[dna[:-2], dna[1:-1], dna[2:]]))
from collections import Counter
Counter(triplets).most_common(1)
# **4**.
#
# - Using only base Python and numpy, create a pairwise distance matrix of the two sets of *row* vectors given
# - Scale the distances so that each *row* has mean zero and standard deviation 1
#
# Use Euclidean distance.
np.random.seed(123)
xs = np.random.randint(1,10, (10, 5))
ys = np.random.randint(1,10, (8, 5))
xs
ys
# +
nx = xs.shape[0]
ny = ys.shape[0]
m = np.zeros([nx, ny])
for i in range(nx):
for j in range(ny):
m[i,j] = np.sqrt((xs[i] - ys[j]) @ (xs[i] - ys[j]))
# -
m
(m - m.mean(axis=1)[:, None])/m.std(axis=1)[:, None]
|
notebooks/solutions/S03_Exercises_Solutiion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="zBJLbaPM3XC9"
import pandas as pd
file= pd.read_csv("HRDataset_v14W.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 834} id="Wu3C16aV4CaD" outputId="dc81aa59-0345-4be9-c9c6-fb84cf702b7d"
file.loc[file["Position"]=="Production Technician I"]
# + colab={"base_uri": "https://localhost:8080/"} id="Z6EJ6HsG4RcC" outputId="fdf4cb46-8d99-43c6-9316-a6c28061f533"
file.loc[file["Position"]=="Production Technician I"].count()
# + colab={"base_uri": "https://localhost:8080/"} id="STblvbcj4Vvw" outputId="2e9807c5-8b8b-4500-9399-2086c6fd0562"
file.loc[file["Gender"]=="Male"].count()
# + colab={"base_uri": "https://localhost:8080/", "height": 730} id="fywdQBk-4fLY" outputId="8f14d774-548d-473e-c8f8-ae7563990c35"
file.loc[(file["EmploymentStatus"]=="Active") & (file["MaritalDesc"]=="Married") ]
# + colab={"base_uri": "https://localhost:8080/", "height": 748} id="AppH4QqB6PfZ" outputId="36eb6fa5-925a-4628-ee67-d2ecda0f5c73"
file
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="V-pkqqv96ynp" outputId="5e1edf8e-331e-402d-fd6d-1127d515a49f"
file.loc[(file["EmploymentStatus"]=="Voluntarily Terminated") & (file["MaritalDesc"]=="Divorced") ]
# + colab={"base_uri": "https://localhost:8080/"} id="VhmW9z9t7zFg" outputId="6b156f82-8c01-492d-e4ff-e858d5e391cb"
for k in file.index:
if (file.loc[k, "EmploymentStatus" ] == "Active"):
if (file.loc[k, "Salary" ] < 60000 ):
print(file.loc[k, "Employee_Name"])
# + colab={"base_uri": "https://localhost:8080/"} id="9fO7s-NG9eqQ" outputId="2db9ae4d-b7bc-456e-d107-fb5e44cdb042"
for k in file.index:
if (file.loc[k, "Position" ] == "Production Technician II"):
if (file.loc[k, "EngagementSurvey" ] == file["EngagementSurvey"].max() ):
print(file.loc[k, "Employee_Name"])
# + id="fy2u6Kh-_q9I"
df=file
# + colab={"base_uri": "https://localhost:8080/"} id="Te0TYnD5BFTV" outputId="30d584f6-233b-47fc-e57a-02b9f6b1312f"
for k in df.index :
if (df.loc[k,'Salary']== df['Salary'].max()) :
print(df.loc[k,'Position'])
# + colab={"base_uri": "https://localhost:8080/"} id="CnjybaWoB43a" outputId="e588190d-aeb6-4a53-d2a9-b7f296ff69cb"
for k in df.index :
if (df.loc[k,'Salary']< df['Salary'].max()) :
print(df.loc[k,'Position'])
# + colab={"base_uri": "https://localhost:8080/"} id="cHDXxLl_Fy6J" outputId="666ac1b7-be7b-4d46-c8e4-041525841199"
for k in df.index :
if (df.loc[k,'Salary'] < df['Salary'].mean()) :
print(df.loc[k,'Employee_Name'])
# + colab={"base_uri": "https://localhost:8080/"} id="_5B0n_NwGA3K" outputId="af9d0a0e-3fc3-44ed-ba79-3934d7f7e31c"
for k in df.index :
if (df.loc[k,'Salary'] < df['Salary'].mean()) :
if (df.loc[k,"EmpSatisfaction"]<3) :
print(df.loc[k,'Employee_Name'])
|
analytics,02_02_2022.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
names = ['f1','f2','f3','f4','f5','f6','f7','f8','f9','f10','target']
data_train = pd.read_csv('avila/avila-tr.txt', header = None, names = names)
data_test = pd.read_csv('avila/avila-ts.txt', header = None, names = names)
data_train.target = data_train.target.astype('category').cat.codes
data_test.target = data_test.target.astype('category').cat.codes
data_train.head()
data_test.head()
data_train.isnull().sum()
data_test.isnull().sum()
data_train.shape,data_test.shape
x = data_train.drop('target',axis=1)
y = data_train['target']
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25, random_state = 42)
x_train = data_train.drop('target',axis=1)
y_train = data_train['target']
x_test = data_test.drop('target', axis = 1)
y_test = data_test['target']
x_train.shape,y_train.shape,x_test.shape,y_test.shape
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(16,input_dim = 10, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(16, activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(16,activation = tf.nn.relu))
model.add(tf.keras.layers.Dense(12, activation = tf.nn.softmax))
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
from keras.callbacks import ModelCheckpoint
ckpt_model = 'avila.best.hdf5'
checkpoint = ModelCheckpoint(ckpt_model,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
history = model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
epochs=100,
batch_size=10,
callbacks=callbacks_list,
verbose=0)
# Model accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'])
plt.show()
# Model Losss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'])
plt.show()
# print final accuracy
scores = model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
|
Labs/Lab2/Romil/.ipynb_checkpoints/avila_ann-checkpoint.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.5
# language: julia
# name: julia-1.0
# ---
# # Julia 中的线性代数
# > Based on work by <NAME> (MIT) (http://www.econ.ku.dk/phdstudent/noack/)
# > with edits from <NAME>
#
# ## 目录
# - [基础线性代数操作](#基础线性代数操作)
# - [乘法](#乘法)
# - [转置](#转置)
# - [转置的乘法](#转置的乘法)
# - [解线性方程组](#解线性方程组)
# - [特殊的矩阵结构](#特殊的矩阵结构)
# - [大规模问题](#大规模问题)
# ## 基础线性代数操作
# 定义一个随机矩阵
A = rand(1:4,3,3)
# 定义一个元素全为 1 的向量
x = fill(1.0, (3))
# 注意 $A$ 的类型为 `Array{Int64,2}`,而 $x$ 的类型为 `Array{Float64,1}`。
#
# Julia 定义 `Array{Type,1}` 的别名为向量 `Vector{Type}`,`Array{Type,2}` 的别名为矩阵 `Matrix{Type}` 。
#
# 许多线性代数的基础操作和其他语言一样
#
# ### 乘法
b = A*x
# ### 转置
# 就像在其他语言中 `A'` 表示对 `A` 进行共轭转置
A'
# 我们还可以通过 `transpose` 函数获得转置矩阵
transpose(A)
# ### 转置的乘法
# Julia 中某些情况下可以省略 `*` 号
A'A
# ### 解线性方程组
# 用方阵 $A$ 表示的线性方程组 $Ax=b$ 可以用左除运算符(函数)`\` 求解
A\b
# ## 特殊的矩阵结构
#
# 矩阵结构在线性代数中非常重要。
# 接触一下大一些的线型系统就可以看到矩阵结构有*多*重要了。
#
# 用线性代数标准包 `LinearAlgebra` 可以获得结构化的矩阵(structured matrices):
using LinearAlgebra
n = 1000
A = randn(n,n);
# Julia 可以推断特殊矩阵结构,比如判断对称矩阵
Asym = A + A'
issymmetric(Asym)
# 但有时候浮点错误会比较麻烦
Asym_noisy = copy(Asym)
Asym_noisy[1,2] += 5eps()
issymmetric(Asym_noisy)
# 幸运的是我们可以通过如 `Diagonal`,`Triangular`,`Symmetric`,`Hermitian`,`Tridiagonal`
# 和 `SymTridiagonal` 这样的函数来明确地创建矩阵
Asym_explicit = Symmetric(Asym_noisy);
# 我们来看看 Julia 计算 `Asym`,`Asym_noisy` 和 `Asym_explicit` 的特征值各要花多少时间
@time eigvals(Asym);
@time eigvals(Asym_noisy);
@time eigvals(Asym_explicit);
#
# 本例中,使用 `Symmetric()` 处理 `Asym_noisy` 后让计算效率提高了约5倍
# ### 大规模问题
# 使用 `Tridiagonal` 和 `SymTridiagonal` 类型储存三对角矩阵(tridiagonal matrices)
# 让处理大规模的三对角矩阵问题变为可能。
#
# 以下问题如果使用稠密的 `Matrix` 类型储存,在个人计算机上是无法进行求解的。
n = 1_000_000;
A = SymTridiagonal(randn(n), randn(n-1));
@time eigmax(A)
|
zh-cn/intro-to-julia-ZH/简短版/02.Julia中的线性代数.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <style>div.container { width: 100% }</style>
# <img style="float:left; vertical-align:text-bottom;" height="65" width="172" src="../assets/holoviz-logo-unstacked.svg" />
# <div style="float:right; vertical-align:text-bottom;"><h2>Tutorial 6. Interlinked Plots</h2></div>
# Using hvPlot allows you to generate a number of different types of plot
# quickly from a standard API by building [HoloViews](https://holoviews.org) objects, as discussed in the previous
# notebook. These objects are rendered with Bokeh which offers a number of
# standard ways to interact with your plot, such as panning and zooming
# tools.
#
# Many other modes of interactivity are possible when building an
# exploratory visualization (such as a dashboard) and these forms of
# interactivity cannot be achieved using hvPlot alone.
#
# In this notebook, we will drop down to the HoloViews level of
# representation to build a visualization directly that consists of linked plots that
# update when you interactivity select a particular earthquake with the
# mouse. The goal is to show how more sophisticated forms of interactivity can be built when needed, in a way that's fully compatible with all the examples shown in earlier sections.
# First let us load our initial imports:
import numpy as np
import pandas as pd
import dask.dataframe as dd
import hvplot.pandas # noqa
import datashader.geo
from holoviews.element import tiles
# And clean the data before filtering (for magnitude `>7`) and projecting to to Web Mercator as before:
# +
df = dd.read_parquet('../data/earthquakes.parq').repartition(npartitions=4)
cleaned_df = df.copy()
cleaned_df['mag'] = df.mag.where(df.mag > 0)
cleaned_reindexed_df = cleaned_df.set_index(cleaned_df.time)
cleaned_reindexed_df = cleaned_reindexed_df.persist()
most_severe = cleaned_reindexed_df[cleaned_reindexed_df.mag >= 7].compute()
x, y = datashader.geo.lnglat_to_meters(most_severe.longitude, most_severe.latitude)
most_severe_projected = most_severe.join([pd.DataFrame({'easting': x}), pd.DataFrame({'northing': y})])
# -
# Towards the end of the previous notebook we generated a scatter plot of earthquakes
# across the earth that had a magnitude `>7` that was projected using
# datashader and overlaid on top of a map tile source:
high_mag_quakes = most_severe_projected.hvplot.points(x='easting', y='northing', c='mag',
title='Earthquakes with magnitude >= 7')
esri = tiles.ESRI().redim(x='easting', y='northing')
esri * high_mag_quakes
# And saw how this object is a HoloViews `Points` object:
print(high_mag_quakes)
# This object is an example of a HoloViews *Element* which is an object that can display itself. These elements are *thin* wrappers around your data and the raw input data is always available on the `.data` attribute. For instance, we can look at the `head` of the `most_severe_projected` `DataFrame` as follows:
high_mag_quakes.data.head()
# We will now learn a little more about `HoloViews` elements, including how to build them up from scratch so that we can control every aspect of them.
#
# ### An Introduction to HoloViews Elements
#
# HoloViews elements are the atomic, visualizable components that can be
# rendered by a plotting library such as Bokeh. We don't actually need to use
# hvPlot to create these element objects: we can create them directly by
# importing HoloViews (and loading the extension if we have not loaded
# hvPlot):
import holoviews as hv
hv.extension("bokeh") # Optional here as we have already loaded hvplot.pandas
# Now we can create our own example of a `Points` element. In the next
# cell we plot 100 points with a normal (independent) distrbutions in the
# `x` and `y` directions:
xs = np.random.randn(100)
ys = np.random.randn(100)
hv.Points((xs, ys))
# Now that the axis labels are 'x' and 'y', the default *dimensions* for
# this element type. We can use a different set of dimensions along the x- and y-axis (say
# 'weight' and 'height') and we can also associate additional `fitness` information with each point if we wish:
xs = np.random.randn(100)
ys = np.random.randn(100)
fitness = np.random.randn(100)
height_v_weight = hv.Points((xs, ys, fitness), ['weight', 'height'], 'fitness')
height_v_weight
# Now we can look at the printed representation of this object:
print(height_v_weight)
# Here the printed representation shows the *key dimensions* that we specified in square brackets as `[weight,height]` and the additional *value dimension* `fitness` in parentheses as `(fitness)`. The *key dimensions* map to the axes and the *value dimensions* can be visually represented by other visual attributes as we shall see shortly.
#
# For more information an HoloViews dimensions, see this [user guide](http://holoviews.org/user_guide/Annotating_Data.html).
# #### Exercise
#
# Visit the [HoloViews reference gallery](http://holoviews.org/reference/index.html) and browse
# the available set of elements. Pick an element type and try running
# one of the self-contained examples in the following cell.
# ### Setting Visual Options
#
# The two `Points` elements above look quite different from the one
# returned by hvplot showing the earthquake positions. This is because
# hvplot makes use of the HoloViews *options system* to customize the
# visual representation of these element objects.
#
# Let us color the `height_v_weight` scatter by the fitness value and use a larger
# point size:
height_v_weight.opts(color='fitness', size=8, colorbar=True, aspect='square')
# #### Exercise
#
# Copy the line above into the next cell and try changing the points to
# 'blue' or 'green' or another dimension of the data such as 'height' or 'weight'.
#
# Are the results what you expect?
# ### The `help` system
#
# You can learn more about the `.opts` method and the HoloViews options
# system in the [corresponding user
# guide](http://holoviews.org/user_guide/Applying_Customizations.html). To
# easily learn about the available options from inside a notebook, you can
# use `hv.help` and inspect the 'Style Options'.
# +
# Commented as there is a lot of help output!
# hv.help(hv.Scatter)
# -
# At this point, we can have some insight to the sort of HoloViews object
# hvPlot is building behind the scenes for our earthquake example:
esri * hv.Points(most_severe_projected, ['easting', 'northing'], 'mag').opts(color='mag', size=8, aspect='equal')
# #### Exercise
#
# Try using `hv.help` to inspect the options available for different element types such as the `Points` element used above. Copy the line above into the cell below and pick a `Points` option that makes sense to you and try using it in the `.opts` method.
# <details><summary>Hint</summary><br>
#
# If you can't decide on an option to pick, a good choice is `marker`. For instance, try:
#
# * `marker='+'`
# * `marker='d'`.
#
# HoloViews uses [matplotlib's conventions](https://matplotlib.org/3.1.0/api/markers_api.html) for specifying the various marker types. Try finding out which ones are support by Bokeh.
#
# </details>
# ### Custom interactivity for Elements
#
# When rasterization of the population density data via hvplot was
# introduced in the last notebook, we saw that the HoloViews object
# returned was not an element but a *`DynamicMap`*.
#
# A `DynamicMap` enables custom interactivity beyond the Bokeh defaults by
# dynamically generating elements that get displayed and updated as the
# plot is interacted with.
#
# There is a counterpart to the `DynamicMap` that does not require a live
# Python server to be running called the `HoloMap`. The `HoloMap`
# container will not be covered in the tutorial but you can learn more
# about them in the [containers user
# guide](http://holoviews.org/user_guide/Dimensioned_Containers.html).
#
# Now let us build a very simple `DynamicMap` that is driven by a *linked
# stream* (specifically a `PointerXY` stream) that represents the position
# of the cursor over the plot:
# +
from holoviews import streams
pointer = streams.PointerXY(x=0, y=0) # x=0 and y=0 are the initialized values
def crosshair(x, y):
return hv.Ellipse(0,0,1) * hv.HLine(y) * hv.VLine(x)
hv.DynamicMap(crosshair, streams=[pointer])
# -
# Try moving your mouse over the plot and you should see the crosshair
# follow your mouse position.
#
# The core concepts here are:
#
# * The plot shows an overlay built with the `*` operator introduced in
# the previous notebook.
# * There is a callback that returns this overlay that is built according
# to the supplied `x` and `y` arguments. A DynamicMap always contains a
# callback that returns a HoloViews object such as an `Element` or
# `Overlay`
# * These `x` and `y` arguments are supplied by the `PointerXY` stream
# that reflect the position of the mouse on the plot.
# #### Exercise
#
# Look up the `Ellipse`, `HLine`, and `VLine` elements in the
# [HoloViews reference guide](http://holoviews.org/reference/index.html) and see
# if the definitions of these elements align with your initial intuitions.
#
# #### Exercise (additional)
#
# If you have time, try running one of the examples in the
# 'Streams' section of the [HoloViews reference guide](http://holoviews.org/reference/index.html) in the cell below. All the examples in the reference guide should be relatively short and self-contained.
# ### Selecting a particular earthquake with the mouse
#
# Now we only need two more concepts before we can set up the appropriate
# mechanism to select a particular earthquake on the hvPlot-generated
# Scatter plot we started with.
#
# First, we can attach a stream to an existing HoloViews element such as
# the earthquake distribution generated with hvplot:
selection_stream = streams.Selection1D(source=high_mag_quakes)
# Next we need to enable the 'tap' tool on our Scatter to instruct Bokeh
# to enable the desired selection mechanism in the browser.
high_mag_quakes.opts(tools=['tap'])
# The Bokeh default alpha of points which are unselected is going to be too low when we overlay these points on a tile source. We can use the HoloViews options system to pick a better default as follows:
hv.opts.defaults(hv.opts.Points(nonselection_alpha=0.4))
# The tap tool is in the toolbar with the icon showing the concentric
# circles and plus symbol. If you enable this tool, you should be able to pick individual earthquakes above by tapping on them.
#
# Now we can make a DynamicMap that uses the stream we defined to show the index of the earthquake selected via the `hv.Text` element:
# +
def labelled_callback(index):
if len(index) == 0:
return hv.Text(x=0,y=0, text='')
first_index = index[0] # Pick only the first one if multiple are selected
row = most_severe_projected.iloc[first_index]
return hv.Text(x=row.easting,y=row.northing,text='%d : %s' % (first_index, row.place)).opts(color='white')
labeller = hv.DynamicMap(labelled_callback, streams=[selection_stream])
# -
# This labeller receives the index argument from the Selection1D stream
# which corresponds to the row of the original dataframe (`most_severe`)
# that was selected. This lets us present the index and place value using
# `hv.Text` which we then position at the corresponding latitude and
# longitude to label the chosen earthquake.
#
# Finally, we overlay this labeller `DynamicMap` over the original
# plot. Now by using the tap tool you can see the index number of an
# earthquake followed by the assigned place name:
(esri * high_mag_quakes * labeller).opts(hv.opts.Points(tools=['tap', 'hover']))
# #### Exercise
#
# Pick an earthquake point above and using the displayed index, display the corresponding row of the `most_severe` dataframe using the `.iloc` method in the following cell.
# ### Building a linked earthquake visualizer
#
# Now we will build a visualization that achieves the following:
#
# * The user can select an earthquake with magnitude `>7` using the tap
# tool in the manner illustrated in the last section.
#
# * In addition to the existing label, we will add concentric circles to further highlight the
# selected earthquake location.
#
# * *All* earthquakes within 0.5 degrees of latitude and longitude of the
# selected earthquake (~50km) will then be used to supply data for two linked
# plots:
#
# 1. A histogram showing the distribution of magnitudes in the selected area.
# 2. A timeseries scatter plot showing the magnitudes of earthquakes over time in the selected area.
# The first step is to generate a concentric-circle marker using a similar approach to the `labeller` above. We can write a function that uses `Ellipse` to mark a particular earthquake and pass it to a `DynamicMap`:
# +
def mark_earthquake(index):
if len(index) == 0:
return hv.Overlay([])
first_index = index[0] # Pick only the first one if multiple are selected
row = most_severe_projected.iloc[first_index]
return ( hv.Ellipse(row.easting, row.northing, 1.5e6).opts(color='white', alpha=0.5)
* hv.Ellipse(row.easting, row.northing, 3e6).opts(color='white', alpha=0.5))
quake_marker = hv.DynamicMap(mark_earthquake, streams=[selection_stream])
# -
# Now we can test this component by building an overlay of the `ESRI` tile source, the `>=7` magnitude points and `quake_marked`:
esri* high_mag_quakes.opts(tools=['tap']) * quake_marker
# Note that you may need to zoom in to your selected earthquake to see the
# localized, lower magnitude earthquakes around it.
# ### Filtering earthquakes by location
#
# We wish to analyse the earthquakes that occur around a particular latitude and longitude. To do this we will define a function that given a latitude and longitude, returns the rows of a suitable dataframe that corresponding to earthquakes within 0.5 degrees of that position:
def earthquakes_around_point(df, lat, lon, degrees_dist=0.5):
half_dist = degrees_dist / 2.0
return df[((df['latitude'] - lat).abs() < half_dist)
& ((df['longitude'] - lon).abs() < half_dist)].compute()
# As it can be slow to filter our dataframes in this way, we can define the following function that can cache the result of filtering `cleaned_reindexed_df` (containing all earthquakes) based on an index pulled from the `most_severe` dataframe:
def index_to_selection(indices, cache={}):
if not indices:
return most_severe.iloc[[]]
index = indices[0] # Pick only the first one if multiple are selected
if index in cache: return cache[index]
row = most_severe.iloc[index]
selected_df = earthquakes_around_point(cleaned_reindexed_df, row.latitude, row.longitude)
cache[index] = selected_df
return selected_df
# The caching will be useful as we know both of our planned linked plots (i.e the histogram and scatter over time) make use of the same earthquake selection once a particular index is supplied from a user selection. This particular caching strategy is rather awkward (and leaks memory!) but it simple and will serve for the current example. A better approach to caching will be presented in the [Advanced Dashboards](./08_Advanced_Dashboards.ipynb) section of the tutorial.
# #### Exercise
#
# Test the `index_to_selection` function above for the index you picked in the previous exercise. Note that the stream supplied a *list* of indices and that the function above only uses the first value given in that list. Do the selected rows look correct?:
# #### Exercise
#
# Convince yourself that the selected earthquakes are within 0.5$^o$ distance of each other in both latitude and longitude.
# <details><summary>Hint</summary><br>
#
# For a given `chosen` index, you can see the distance difference using the following code:
#
# ```python
# chosen = 235
# delta_long = index_to_selection([chosen]).longitude.max() - index_to_selection([chosen]).longitude.min()
# delta_lat = index_to_selection([chosen]).latitude.max() - index_to_selection([chosen]).latitude.min()
# print("Difference in longitude: %s" % delta_long)
# print("Difference in latitude: %s" % delta_lat)
# ```
#
# </details>
# ### Linked plots
#
# So far we have overlayed the display updates on top of the existing
# spatial distribution of earthquakes. However, there is no requirement
# that the data is overlaid and we might want to simply attach an entirely
# new, derived plot that dynamically updates to the side.
#
# Using the same principles as we have already seen, we can define a
# `DynamicMap` that returns `Histogram` distributions of earthquake
# magnitude:
# +
def histogram_callback(index):
title = 'Distribution of all magnitudes within half a degree of selection'
selected_df = index_to_selection(index)
return selected_df.hvplot.hist(y='mag', bin_range=(0,10), bins=20, color='red', title=title)
histogram = hv.DynamicMap(histogram_callback, streams=[selection_stream])
# -
# The only real difference in the approach here is that we can still use
# `.hvplot` to generate our elements instead of declaring the HoloViews
# elements explicitly. In this example, `.hvplot.hist` is used.
# The exact same principles can be used to build the scatter callback and `temporal_distribution` `DynamicMap`:
# +
def scatter_callback(index):
title = 'Temporal distribution of all magnitudes within half a degree of selection '
selected_df = index_to_selection(index)
return selected_df.hvplot.scatter('time', 'mag', color='green', title=title)
temporal_distribution = hv.DynamicMap(scatter_callback, streams=[selection_stream])
# -
# Lastly, let us define a `DynamicMap` that draws a `VLine` to mark the time at which the selected earthquake occurs so we can see which tremors may have been aftershocks immediately after that major earthquake occurred:
# +
def vline_callback(index):
if not index:
return hv.VLine(0).opts(alpha=0)
row = most_severe.iloc[index[0]]
return hv.VLine(row.time).opts(line_width=2, color='black')
temporal_vline = hv.DynamicMap(vline_callback, streams=[selection_stream])
# -
# We now have all the pieces we need to build an interactive, linked visualization of earthquake data.
# #### Exercise
#
# Test the `histogram_callback` and `scatter_callback` callback functions by supplying your chosen index, remembering that these functions require a list argument in the following cell.
# ### Putting it together
# Now we can combine the components we have already built as follows to create a dynamically updating plot together with an associated, linked histogram:
((esri * high_mag_quakes.opts(tools=['tap']) * labeller * quake_marker)
+ histogram + temporal_distribution * temporal_vline).cols(1)
# We now have a custom interactive visualization that builds on the output of `hvplot` by making use of the underlying HoloViews objects that it generates.
# ## Conclusion
#
# When exploring data it can be convenient to use the `.plot` API to quickly visualize a particular dataset. By calling `.plot` to generate different plots over the course of a session, it is possible to gradually build up a mental model of how a particular dataset is structured. While this works well for simple datasets, it can be more efficient to build a linked visualization with support for direct user interaction as a tool for more rapidly gaining insight.
#
# In the workflow presented here, building such custom interaction is relatively quick and easy and does not involve throwing away prior code used to generate simpler plots. In the spirit of 'short cuts not dead ends', we can use the HoloViews output of `hvplot` that we used in our initial exploration to build rich visualizations with custom interaction to explore our data at a deeper level.
#
# These interactive visualizations not only allow for custom interactions beyond the scope of `hvplot` alone, but they can display visual annotations not offered by the `.plot` API. In particular, we can overlay our data on top of tile sources, generate interactive textual annotations, draw shapes such a circles, mark horizontal and vertical marker lines and much more. Using HoloViews you can build visualizations that allow you to directly interact with your data in a useful and intuitive manner.
#
# In this notebook, the earthquakes plotted were either filtered early on by magnitude (`>=7`) or dynamically to analyse only the earthquakes within a small geographic distance. This allowed us to use Bokeh directly without any special handing and without having to worry about the performance issues that would be occur if we were to try to render the whole dataset at once.
#
# In the next section we will see how such large datasets can be visualized directly using Datashader.
|
examples/tutorial/06_Interlinked_Plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
import plotly.offline as pyo
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
init_notebook_mode(connected=True)
cf.go_offline()
df = pd.read_csv('players_20.csv')
# ###### Showing all the columns
pd.options.display.max_columns = None
display(df.head())
# ###### Seperating out the X and Y - Goalkeeper Skill Prediction
goalkeeper = df[df['team_position'] == 'GK']
goalkeeper.head(2)
X = goalkeeper[['age', 'gk_diving', 'gk_handling', 'gk_kicking', 'gk_reflexes', 'gk_speed', 'gk_positioning', 'movement_acceleration', 'movement_sprint_speed', 'movement_agility', 'movement_reactions', 'movement_balance', 'power_shot_power', 'power_jumping', 'power_strength', 'mentality_aggression', 'mentality_interceptions', 'mentality_positioning', 'mentality_vision', 'mentality_composure']]
y = goalkeeper['potential']
# ###### Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state = 1)
X_train.head(2)
X_test.head(2)
y_test.head(2)
y_train.head(2)
# ## Regression Models
# ### 1. Multiple Linear Regression Model
# #### Fitting the Model
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train, y_train)
# ###### Prediction from the Model
#Passing in a dataset the model has never seen before = X_Test
predictions = lm.predict(X_test)
# ###### Comparing with y_test and predictions
# +
#Converting y_test into an Array since predictions is an array as well
y_test1 = np.array(y_test)
np.set_printoptions(precision=2) #Limiting to 2 decimal places
#print(np.concatenate((predictions.reshape(len(predictions),1), y_test1.reshape(len(y_test1),1)),1))
# -
# ##### Putting the Predictions into the Dataset
X_test.head(2)
y_test.head(2)
y_test1
predictions
# +
#Adding the Actual Potential into the X_test dataset
# -
X_test['Potential Skill'] = y_test1
# +
#Adding the Predicted skill into the dataset
# -
X_test['Predicted Skill'] = predictions
X_test.head(3)
# ###### Mapping the Predicted Skill into the main dataset
## Merging on Index
df_merged = df.merge(X_test, how='outer', left_index=True, right_index=True)
#Dropping values with NAN for Predicted Skill
df_merged = df_merged.dropna(subset=['Predicted Skill'])
#Selecting on the basis of row
df_merged.loc[:, ['short_name', 'potential', 'Potential Skill', 'Predicted Skill']]
# ###### Comparing Predictions with Y_test
#Finding how far off the Predicted prices = X_Test are from Actual Prices = y_test
plt.scatter(y_test,predictions)
# ###### Model Evaluation
print(lm.intercept_)
lm.coef_
X.columns
coef_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])
coef_df
# Interpreting the coefficients:
#
# - Holding all other features fixed, a 1 unit increase in **Age** is associated with an **decrease of 0.659151** overall skill.
# - Holding all other features fixed, a 1 unit increase in **gk_diving** is associated with an **increase of 0.234021** overall skill.
#
# **And so on**
# ###### Metrics
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions))) #The lower the better
# ###### Calculating the R2
from sklearn.metrics import r2_score
r2_score(y_test, predictions)
|
1Multiple Linear Regression - Predicting the Potential.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# We are using changed data (which we removed students who applied for 'Bachelor')
df = pd.read_json('../Data2.json')
df.sort_index(inplace=True)
df.head(5)
df['papersIRAN'].value_counts().sort_index()
df[df['papersIRAN']>15]['apDegree'].value_counts()
df['papersGLOB'].value_counts().sort_index()
df[df['papersGLOB']>10]['apDegree'].value_counts()
# # Don't Have Any Paper
na=df[(df['papersGLOB']==0) & (df['papersIRAN']==0)]['apDegree'].value_counts()
na
pd.Series((na.Masters,df[df.apDegree=='Masters'].count()[0]-na.Masters),index=['No Paper','With Papers']).plot(autopct='%1.1f%%',kind='pie')
plt.title('Masters Paper')
plt.show()
pd.Series((na.Doctorate,df[df.apDegree=='Doctorate'].count()[0]-na.Doctorate),index=['No Paper','With Papers']).plot(autopct='%1.1f%%',kind='pie')
plt.title('Doctorate Paper')
plt.show()
(df.papersGLOB+df.papersIRAN).describe()
|
03_DataPreprocessing/02_PAPERS/PapersMissingData.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: '531_36'
# language: python
# name: '531_36'
# ---
# + [markdown] render=true
# # Use logical constraints with decision optimization
#
# This tutorial includes everything you need to set up decision optimization engines, build a mathematical programming model, leveraging logical constraints.
#
#
# When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.
#
# >This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**
# >
# >It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/>) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)
# and you can start using Watson Studio Cloud right away).
#
#
# Table of contents:
#
# - [Describe the business problem](#Describe-the-business-problem:--Games-Scheduling-in-the-National-Football-League)
# * [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help)
# * [Use decision optimization](#Use-decision-optimization)
# * [Step 1: Import the library](#Step-1:-Import-the-library)
# * [Step 2: Learn about constraint truth values](#Step-2:-Learn-about-constraint-truth-values)
# * [Step 3: Learn about equivalence constraints](#Step-3:-Learn-about-equivalence-constraints)
# * [Summary](#Summary)
# ****
# -
# Logical constraints let you use the _truth value_ of constraints inside the model. The truth value of a constraint
# is a binary variable equal to 1 when the constraint is satisfied, and equal to 0 when not. Adding a constraint to a model ensures that it is always satisfied.
# With logical constraints, one can use the truth value of a constraint _inside_ the model, allowing to choose dynamically whether a constraint is to be satisfied (or not).
# + [markdown] render=true
# ## How decision optimization can help
#
# * Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes.
#
# * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
#
# * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
# <br/>
#
# <u>With prescriptive analytics, you can:</u>
#
# * Automate the complex decisions and trade-offs to better manage your limited resources.
# * Take advantage of a future opportunity or mitigate a future risk.
# * Proactively update recommendations based on changing events.
# * Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
#
#
# -
# ## Use decision optimization
# ### Step 1: Import the library
#
# Run the following code to import Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
# A restart of the kernel might be needed.
# + [markdown] render=true
# ### Step 2: Learn about constraint truth values
#
# Any discrete linear constraint can be associated to a binary variable that holds the truth value of the constraint.
# But first, let's explain what a discrete constraint is
# -
# #### Discrete linear constraint
#
# A discrete linear constraint is built from discrete coefficients and discrete variables, that is variables with type `integer` or `binary`.
#
# For example, assuming x and y are integer variables:
#
# - `2x+3y == 1` is discrete
# - `x+y = 3.14` is not (because of 3.14)
# - `1.1 x + 2.2 y <= 3` is not because of the non-integer coefficients 1.1 and 2.2
# #### The truth value of an added constraint is always 1
#
# The truth value of a linear constraint is accessed by the `status_var` property. This property returns a binary which can be used anywhere a variable can. However, the value of the truth value variable and the constraint are linked, both ways:
#
# - a constraint is satisfied if and only if its truth value variable equals 1
# - a constraint is _not_ satisfied if and only if its truth value variable equals 0.
#
# In the following small model, we show that the truth value of a constraint which has been added to a model is always equal to 1.
# +
from docplex.mp.model import Model
m1 = Model()
x = m1.integer_var(name='ix')
y = m1.integer_var(name='iy')
ct = m1.add(x + y <= 3)
# acces the truth value of a linear constraint
ct_truth = ct.status_var
m1.maximize(x+y)
assert m1.solve()
print('the truth value of [{0!s}] is {1}'.format(ct, ct_truth.solution_value))
# -
# #### The truth value of a constraint not added to a model is free
#
# A constraint that is not added to a model, has no effect. Its truth value is free: it can be either 1 or 0.
#
# In the following example, both `x` and `y` are set to their upper bound, so that the constraint is not satisfied; hence the truth value is 0.
m2 = Model(name='logical2')
x = m2.integer_var(name='ix', ub=4)
y = m2.integer_var(name='iy', ub=4)
ct = (x + y <= 3)
ct_truth = ct.status_var # not m2.add() here!
m2.maximize(x+y)
assert m2.solve()
m2.print_solution()
print('the truth value of [{0!s}] is {1}'.format(ct, ct_truth.solution_value))
# #### Using constraint truth values in modeling
#
# We have learned about the truth value variable of linear constraints, but there's more.
# Linear constraints can be freely used in _expressions_: Docplex will then substitute the constraint's truth value
# variable in the expression.
#
# Let's experiment again with a toy model: in this model,
# we want to express that when `x ==3` is false, then `y ==4` must also be false.
# To express this, it suffices to say that the truth value of `y == 4` is less than or equal
# to the truth value of `x ==3`. When `x==3` is false, is truthe value is 0, hence the truth value of `y==4` is also zero, and `y` cannot be equal to 4.
#
# However, as shown in the model below, it is not necessary to use the `status_var` propert: using
# the constraints in a comparison expression works fine.
#
# As we maximize y, y has value 4 in the optimal solution (it is the upper bound), and consequently the constraint `ct_y4` is satisfied. From the inequality between truth values,
# it follows that the truth value of `ct_x2` equals 1 and x is equal to 2.
#
# Using the constraints in the inequality has silently converted each constraint into its truth value.
m3 = Model(name='logical3')
x = m3.integer_var(name='ix', ub=4)
y = m3.integer_var(name='iy', ub=4)
ct_x2 = (x == 2)
ct_y4 = (y == 4)
# use constraints in comparison
m3.add( ct_y4 <= ct_x2 )
m3.maximize(y)
assert m3.solve()
# expected solution x==2, and y==4.
m3.print_solution()
# Constraint truth values can be used with arithmetic operators, just as variables can. In the next model, we express a (slightly) more complex constraint:
#
# - either x is equal to 3, _or_ both y and z are equal to 5
#
# Let's see how we can express this easily with truth values:
m31 = Model(name='logical31')
x = m31.integer_var(name='ix', ub=4)
y = m31.integer_var(name='iy', ub=10)
z = m31.integer_var(name='iz', ub=10)
ct_x2 = (x == 3)
ct_y5 = (y == 5)
ct_z5 = (z == 5)
#either ct_x2 is true or -both- ct_y5 and ct_z5 must be true
m31.add( 2 * ct_x2 + (ct_y5 + ct_z5) == 2)
# force x to be less than 2: it cannot be equal to 3!
m31.add(x <= 2)
# maximize sum of x,y,z
m31.maximize(x+y+z)
assert m31.solve()
# the expected solution is: x=2, y=5, z=5
assert m31.objective_value == 12
m31.print_solution()
# As we have seen, constraints can be used in expressions. This includes the `Model.sum()` and `Model.dot()` aggregation methods.
#
# In the next model, we define ten variables, one of which must be equal to 3 (we dpn't care which one, for now). As we maximize the sum of all `xs` variables, all will end up equal to their upper bound, except for one.
m4 = Model(name='logical4')
xs = m4.integer_var_list(10, ub=100)
cts = [xi==3 for xi in xs]
m4.add( m4.sum(cts) == 1)
m4.maximize(m4.sum(xs))
assert m4.solve()
m4.print_solution()
# As we can see, all variables but one are set to their upper bound of 100. We cannot predict which variable will be set to 3.
# However, let's imagine that we prefer variable with a lower index to be set to 3, how can we express this preference?
#
# The answer is to use an additional expression to the objective, using a scalar product of constraint truth value
preference = m4.dot(cts, (k+1 for k in range(len(xs))))
# we prefer lower indices for satisfying the x==3 constraint
# so the final objective is a maximize of sum of xs -minus- the preference
m4.maximize(m4.sum(xs) - preference)
assert m4.solve()
m4.print_solution()
# As expected, the `x` variable set to 3 now is the first one.
# #### Using truth values to negate a constraint
#
# Truth values can be used to negate a complex constraint, by forcing its truth value to be equal to 0.
#
# In the next model, we illustrate how an equality constraint can be negated by forcing its truth value to zero. This negation forbids y to be equal to 4, as it would be without this negation.
# Finally, the objective is 7 instead of 8.
# +
m5 = Model(name='logical5')
x = m5.integer_var(name='ix', ub=4)
y = m5.integer_var(name='iy', ub=4)
# this is the equality constraint we want to negate
ct_xy7 = (y + x >= 7)
# forcing truth value to zero means the constraint is not satisfied.
# note how we use a constraint in an expression
negation = m5.add( ct_xy7 == 0)
# maximize x+y should yield both variables to 4, but x+y cannot be greater than 7
m5.maximize(x + y)
assert m5.solve()
m5.print_solution()
# expecting 6 as objective, not 8
assert m5.objective_value == 6
# now remove the negation
m5.remove_constraint(negation)
# and solve again
assert m5.solve()
# the objective is 8 as expected: both x and y are equal to 4
assert m5.objective_value == 8
m5.print_solution()
# -
# #### Summary
#
# We have seen that linear constraints have an associated binary variable, its _truth value_, whose value is linked to whether or not the constraint is satisfied.
#
# second, linear constraints can be freely mixed with variables in expression to express _meta-constraints_ that is, constraints
# about constraints. As an example, we have shown how to use truth values to negate constraints.
# #### Note: the `!=` (not_equals) operator
#
# Since version 2.9, Docplex provides a 'not_equal' operator, between discrete expressions. Of course, this is implemented using truth values, but the operator provides a convenient way to express this constraint.
m6 = Model(name='logical6')
x = m6.integer_var(name='ix', ub=4)
y = m6.integer_var(name='iy', ub=4)
# this is the equality constraint we want to negate
m6.add(x +1 <= y)
m6.add(x != 3)
m6.add(y != 4)
# forcing truth value to zero means the constraint is not satisfied.
# note how we use a constraint in an expression
m6.add(x+y <= 7)
# maximize x+y should yield both variables to 4,
# but here: x < y, y cannot be 4 thus x cannot be 3 either so we get x=2, y=3
m6.maximize(x + y)
assert m6.solve()
m6.print_solution()
# expecting 5 as objective, not 8
assert m6.objective_value == 5
# ### Step 3: Learn about equivalence constraints
#
# As we have seen, using a constraint in expressions automtically generates a truth value variable, whose value is linked to the status of the constraint.
#
# However, in some cases, it can be useful to relate the status of a constraint to an _existing_ binary variable. This is the purpose of equivalence constraints.
#
# An equivalence constraint relates an existing binary variable to the status of a discrete linear constraints, in both directions. The syntax is:
#
# `Model.add_equivalence(bvar, linear_ct, active_value, name)`
#
# - `bvar` is the existing binary variable
# - `linear-ct` is a discrete linear constraint
# - `active_value` can take values 1 or 0 (the default is 1)
# - `name` is an optional string to name the equivalence.
#
# If the binary variable `bvar` equals 1, then the constraint is satisfied. Conversely, if the constraint is satisfied, the binary variable is set to 1.
m7 = Model(name='logical7')
size = 7
il = m7.integer_var_list(size, name='i', ub=10)
jl = m7.integer_var_list(size, name='j', ub=10)
bl = m7.binary_var_list(size, name='b')
for k in range(size):
# for each i, relate bl_k to il_k==5 *and* jl_k == 7
m7.add_equivalence(bl[k], il[k] == 5)
m7.add_equivalence(bl[k], jl[k] == 7)
# now maximize sum of bs
m7.maximize(m7.sum(bl))
assert m7.solve()
m7.print_solution()
# ### Step 4: Learn about indicator constraints
#
# The equivalence constraint decsribed in the previous section links the value of an existing binary variable to the satisfaction of a linear constraint. In certain cases, it is sufficient to link from an existing binary variable to the constraint, but not the other way. This is what _indicator_ constraints do.
#
# The syntax is very similar to equivalence:
#
# `Model.add_indicator(bvar, linear_ct, active_value=1, name=None)`
#
# - `bvar` is the existing binary variable
# - `linear-ct` is a discrete linear constraint
# - `active_value` can take values 1 or 0 (the default is 1)
# - `name` is an optional string to name the indicator.
#
# The indicator constraint works as follows: if the binary variable is set to 1, the constraint is satified; if the binary variable is set to 0, anything can happen.
#
# One noteworty difference between indicators and equivalences is that, for indicators, the linear constraint need not be discrete.
# In the following small model, we first solve without the indicator: both b and x are set to their upper bound, and the final objective is 200.
#
# Then we add an indicator sttaing that when b equals1, then x must be less than 3.14; the resulting objective is 103.14, as b is set to 1, which trigger the `x <= 31.4` constraint.
#
# Note that the right-hand side constraint is _not_ discrete (because of 3.14).
# +
m8 = Model(name='logical8')
x = m8.continuous_var(name='x', ub=100)
b = m8.binary_var(name='b')
m8.maximize(100*b +x)
assert m8.solve()
assert m8.objective_value == 200
m8.print_solution()
ind_pi = m8.add_indicator(b, x <= 3.14)
assert m8.solve()
assert m8.objective_value <= 104
m8.print_solution()
# -
# ### Step 5: Learn about if-then
#
# In this section we explore the `Model.add_if_then` construct which links the truth value of two constraints:
# `Model.add_if_then(if_ct, then_ct)` ensures that, when constraint `if_ct` is satisfied, then `then_ct` is also satisfied.
# When `if_ct` is not satisfied, `then_ct` is free to be satsfied or not.
#
# The syntax is:
#
# `Model.add_if_then(if_ct, then_ct, negate=False)`
#
# - `if_ct` is a discrete linear constraint
# - `then_ct` is any linear constraint (not necessarily discrete),
# - `negate` is an optional flag to reverse the logic, that is satisfy `then_ct` if `if_ct` is not (more on this later)
#
# As for indicators, the `then_ct` need not be discrete.
#
# `Model.add_if_then(if_ct, then_ct)` is roughly equivalent to `Model.add_indicator(if_ct.status_var, then_ct)`.
# +
m9 = Model(name='logical9')
x = m9.continuous_var(name='x', ub=100)
y = m9.integer_var(name='iy', ub = 11)
z = m9.integer_var(name='iz', ub = 13)
m9.add_if_then(y+z >= 10, x <= 3.14)
# y and z are puashed to their ub, so x is down to 3.14
m9.maximize(x + 100*(y + z))
m9.solve()
m9.print_solution()
# -
# In this second variant, the objective coefficient for `(y+z)` is 2 instead of 100, so `x` domines the objective, and reache sits upper bound, while (y+z) must be less than 9, which is what we observe.
# +
# y and z are pushed to their ub, so x is down to 3.14
m9.maximize(x + 2 *(y + z))
m9.solve()
m9.print_solution()
assert abs(m9.objective_value - 118) <= 1e-2
# -
# ## Summary
#
# We have seen that linear constraints have an associated binary variable, its _truth value_, whose value is linked to whether or not the constraint is satisfied.
#
# second, linear constraints can be freely mixed with variables in expression to express _meta-constraints_ that is, constraints
# about constraints. As an example, we have shown how to use truth values to negate constraints.
#
# In addition, we have learned to use equivalence, indicator and if_then constraints.
#
#
# You learned how to set up and use the IBM Decision Optimization CPLEX Modeling for Python to formulate a Mathematical Programming model with logical constraints.
# + [markdown] render=true
# #### References
# * [Decision Optimization CPLEX Modeling for Python documentation](http://ibmdecisionoptimization.github.io/docplex-doc/)
# * [Decision Optimization on Cloud](https://developer.ibm.com/docloud/)
# * Need help with DOcplex or to report a bug? Please go [here](https://stackoverflow.com/questions/tagged/docplex)
# * Contact us at <EMAIL>"
#
# -
# Copyright © 2017-2019 IBM. Sample Materials.
|
examples/mp/jupyter/logical_cts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("data.csv")
df
# ## Let's get a sense of what our dataframe looks like
df.columns
df["Total Working Population Percentage"] = df["Total Working Pop. (Age 16+) (2010)"] / (df["Population (2010)"] + df["Total Working Pop. (Age 16+) (2010)"])
df
# ### In the above cell, we obtain the ratio of the working population to the total population in each area. Let's see who has the highest....
df.sort_values(by="Total Working Population Percentage", ascending=False, inplace=True)
df
# ### Let's evaluate each area in Pittsburgh based on how how "athletic" their working population is.
# ### Let's award 10 points to those who commute to work via Taxi/Carpool/Vanpool/Other,
# ### 20 points to those who commute to work via Motorcycle, 40 points via Bicycle, and 60 points via walking
# ###
# ### Let's see the most "athletic" regions in Pittsburgh....
# +
df["Athletic Score"] = 10 * df["Work at Home (2010)"]
+ 10 * df["Commute to Work: Other (2010)"]
+ 10 * df["Commute to Work: Taxi (2010)"]
+ 10 * df["Commute to Work: Carpool/Vanpool (2010)"]
+ 10 * df["Commute to Work: Public Transportation (2010)"]
+ 20 * df["Commute to Work: Motorcycle (2010)"]
+ 40 * df["Commute to Work: Bicycle (2010)"]
+ 60 * df["Commute to Work: Walk (2010)"]
df.sort_values(by="Athletic Score", inplace=True, ascending=False)
# df = df[["Neighborhood", "Athletic Score"]]
df.dropna(inplace=True)
df[["Neighborhood", "Athletic Score"]]
# -
# ### Here's a quick summary of the "Athletic Score" statistic throughout Pittsburgh
df["Athletic Score"].plot.box()
# ### Looks like there's a clear winner
import geopandas
# %matplotlib inline
import matplotlib.pyplot as plt
df[["Neighborhood", "Athletic Score"]]
# ### Fairywood blows everyone else out of the waters.... It isn't particularly close either.
# +
# # import dataset
# steps = pd.read_csv("steps.csv")
# # filter to important info
# num_steps = steps.groupby("neighborhood").sum()['number_of_steps']
# num_steps.sort_values(ascending=False)
# +
# # do the merge
# steps_map = neighborhoods.merge(num_steps, how='left', left_on='hood', right_on='neighborhood')
# # look at the head to confirm it merged correctly
# steps_map[['hood','number_of_steps','geometry']].head()
# -
neighborhoods = geopandas.read_file("../data/Neighborhoods/Neighborhoods_.shp")
neighborhoods.plot()
# +
# steps_map.plot(column='number_of_steps')
# -
result = neighborhoods.merge(df, how='left', left_on='hood', right_on='Neighborhood')
result[["Neighborhood", "Athletic Score"]]
result.plot(column='Athletic Score', # set the data to be used for coloring
cmap='OrRd', # choose a color palette
edgecolor="white", # outline the districts in white
legend=True, # show the legend
legend_kwds={'label': "Number of Steps"}, # label the legend
figsize=(15, 10), # set the size
missing_kwds={"color": "lightgrey"} # set disctricts with no data to gray
)
|
Sid/sid (1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing the Libraries
# +
# Importing required libraries
import re
import string
import nltk
import graphviz
import pydot
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import transformers
import bert
import tensorflow as tf
import tensorflow_hub as hub
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup, TFBertForSequenceClassification
from nltk.stem import WordNetLemmatizer
from gensim.models import fasttext
from gensim.models.fasttext import FastText
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.utils.vis_utils import plot_model
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
# -
# # Disaster Tweets Analysis
#
# Classification of the tweets whether the tweet is about a disaster that happened or whether it is just normal tweet.
# ## Loading the Dataset
# Loading the Dataset
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
train_data.head(), test_data.head()
# ## Exploring the Data
# Function to explore the types of data and missing values in the dataset
def data_explore(data, train=1):
print('Number of inputs in data: ' + str(len(data)))
print('Number of missing value in each column: \n', data.isnull().sum())
if train:
data['target'].value_counts().plot(kind='bar', title='Target Distribution')
keyword_percent = ((data['keyword'].isnull().sum()) / len(data)) * 100
location_percent = float(((data['location'].isnull().sum()) / len(data)) * 100)
print('Unique number of Keywords: ', str(data['keyword'].nunique()))
print('Unique number of Locations: ', str(data['location'].nunique()))
print('Percentage of missing values in Keyword Column: ', str(keyword_percent))
print('Percentage of missing values in Location Column: ', str(location_percent))
# Training set
data_explore(train_data)
# test set
data_explore(test_data, 0)
# ## Plotting the data distribution
# Graph for Train set for location distribution
sns.barplot(y=train_data['location'].value_counts()[:10].index,
x=train_data['location'].value_counts()[:10],
orient='h').set_title('Location Distribution for train')
# Graph for Test set for location distribution
sns.barplot(y=test_data['location'].value_counts()[:10].index,
x=test_data['location'].value_counts()[:10],
orient='h').set_title('Location Distribution for test')
# ## Preprocessing the text data for One Hot Encoding
# Preprocessing text into input format
def preprocess(text):
# Removing Punctuation from the text
punct = [words for words in text if words not in string.punctuation ]
words = ''.join(punct)
# Tokenizing the text
tokens = nltk.word_tokenize(words)
# Removing the stopwords
stopwords = nltk.corpus.stopwords.words('english')
stop_tokens = [word for word in tokens if word not in stopwords]
stop_tokens = [word.lower() for word in stop_tokens]
# Appying Lemmatization to the tokens
lemmatizer = WordNetLemmatizer()
lemmatized_output = ' '.join([lemmatizer.lemmatize(w) for w in stop_tokens])
# Removing emojis, emails, numbers
text = re.sub(r'^https?:\/\/.*[\r\n]*', '', lemmatized_output, flags=re.MULTILINE)
text = re.sub('<.*?>+', '', text)
regrex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
"]+", flags = re.UNICODE)
text = regrex_pattern.sub(r'',text)
text = ''.join([i for i in text if not i.isdigit()])
return text
train_data['preprocess_text'] = train_data['text'].apply(lambda x: preprocess(x))
test_data['preprocess_text'] = test_data['text'].apply(lambda x: preprocess(x))
# Preprocessed training data
train_data.head()
# Preprocessed test data
test_data.head()
# Converting the each input into one hot representation
representation = [one_hot(sentence, 100000) for sentence in train_data['preprocess_text']]
len(representation)
# Padding the data and converting all the inputs into same length
embedded_matrix = pad_sequences(representation, padding='pre', maxlen=15)
embedded_matrix
# Train validation split of the training data for validation
X_train, X_test, y_train, y_test = train_test_split(embedded_matrix, train_data['target'], test_size=0.2, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# ## Using Bi-Directional LSTM model for prediction
# +
# Sequential model with embedding layer and BiLSTM Layer for one hot embedding
lstm_model_1 = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim=100000,
output_dim=64,
mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.3, recurrent_dropout=0.2)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
lstm_model_1.compile(loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
lstm_model_1.summary()
# -
history = lstm_model_1.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=10)
# Performace metrics for LSTM model
y_pred = lstm_model_1.predict(X_test)
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
report_lstm_1 = classification_report(y_test, y_pred, target_names=['Class 0', 'Class 1'])
print(report_lstm_1)
cm = confusion_matrix(y_true=y_test, y_pred=y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Negative Class', 'Positive Class'])
disp.plot(values_format='d')
# ## Preprocessing the data using Fasttext
# Preprocessing the text for fasttext
train_data['preprocess_text'] = train_data['text'].apply(lambda x: nltk.word_tokenize(preprocess(x)))
train_data.head()
# ## Training the Fasttext model
# +
# Training the Fasttext Model using the dataset
embedding_size = 120
window_size = 50
min_word = 5
down_sampling = 1e-2
fast_model = FastText(sentences=train_data['preprocess_text'],
vector_size=embedding_size,
window=window_size,
min_count=min_word,
sample=down_sampling,
sg=1,
epochs=300)
fast_model.save('fast_text.model')
# -
# Fasttext vector representation for the word
print(fast_model.wv['ablaze'], fast_model.wv['ablaze'].shape)
# Loading the Fasttext model
ld_fs_model = FastText.load('fast_text.model')
print('Fasttext Model is loaded')
# finding the similar words using fasttext
ld_fs_model.wv.most_similar('disaster', topn=10)
# Plotting the fasttext model
def tsne_plot(for_word, w2v_model):
# trained fastText model dimention
dim_size = w2v_model.wv.vectors.shape[1]
arrays = np.empty((0, dim_size), dtype='f')
word_labels = [for_word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, w2v_model.wv.__getitem__([for_word]), axis=0)
# gets list of most similar words
sim_words = w2v_model.wv.most_similar(for_word, topn=10)
# adds the vector for each of the closest words to the array
for wrd_score in sim_words:
wrd_vector = w2v_model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
# fit 2d PCA model to the similar word vectors
model_pca = PCA(n_components = 10).fit_transform(arrays)
# Finds 2d coordinates t-SNE
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(model_pca)
# Sets everything up to plot
df_plot = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words_name': word_labels,
'words_color': color_list})
# plot dots with color and position
plot_dot = sns.regplot(data=df_plot,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df_plot['words_color']
}
)
# Adds annotations with color one by one with a loop
for line in range(0, df_plot.shape[0]):
plot_dot.text(df_plot["x"][line],
df_plot['y'][line],
' ' + df_plot["words_name"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df_plot['words_color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for word "{}'.format(for_word.title()) +'"')
tsne_plot('disaster', ld_fs_model)
# Embedding the word using the fasttext index for each word
def embedding_matrix(sentence):
embedding_matrix = np.zeros(len(sentence))
for i, word in enumerate(sentence):
try:
embedding_matrix[i] = ld_fs_model.wv.key_to_index[word]
except:
embedding_matrix[i] = 0
return embedding_matrix
X = train_data['preprocess_text'].apply(lambda x: embedding_matrix(x))
X.shape
# Padding the input sequence to get same length for all the inputs
embedded_matrix_X = pad_sequences(X, padding='pre', maxlen=50, dtype='float64')
embedded_matrix_X
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(embedded_matrix_X, train_data['target'], test_size=0.2, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# +
# Sequential model with embedding layer and BiLSTM Layer for fasttext embedding
lstm_model_2 = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim=100000,
output_dim=64,
mask_zero=True),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout=0.3, recurrent_dropout=0.2)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
lstm_model_2.compile(loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=tf.keras.optimizers.Adam(5e-5),
metrics=['accuracy'])
lstm_model_2.summary()
# -
history = lstm_model_2.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=7, batch_size=60)
# Performace metrics for LSTM model
y_pred = lstm_model_2.predict(X_test)
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
report_lstm_2 = classification_report(y_test, y_pred, target_names=['Class 0', 'Class 1'])
print(report_lstm_2)
cm = confusion_matrix(y_true=y_test, y_pred=y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Negative Class', 'Positive Class'])
disp.plot(values_format='d')
# ## Using BERT Pre-Trained model for preprocessing
# Downloading the BERT Model for tokenization
BertTokenizer = bert.bert_tokenization.FullTokenizer
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4",
trainable=False)
vocabulary_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
to_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = BertTokenizer(vocabulary_file, to_lower_case)
# Loading the Dataset
train_data['preprocess_text'] = train_data['text'].apply(lambda x: preprocess(x))
# Converting the tokens into token ids
def tokenize_reviews(text_reviews):
return tokenizer.convert_tokens_to_ids(tokenizer.wordpiece_tokenizer.tokenize(text_reviews))
tokens = tokenizer.convert_tokens_to_ids('disaster')
tokens
X = train_data['preprocess_text'].apply(lambda x: tokenize_reviews(x))
len(X)
# Padding the input sequence to get same length for all the inputs
embedded_matrix_bert = pad_sequences(X, padding='pre', maxlen=35, dtype='float64')
embedded_matrix_bert, len(tokenizer.vocab)
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(embedded_matrix_bert, train_data['target'], test_size=0.2, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# +
# Sequential model with embedding layer, Conv1D layer and Bi-LSTM layer for BERT embedding
cnn_model = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim=30522,
output_dim=128,
mask_zero=True),
tf.keras.layers.Conv1D(filters=128, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Conv1D(filters=64, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, dropout=0.3, recurrent_dropout=0.2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
cnn_model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=tf.keras.optimizers.Adam(5e-5),
metrics=['accuracy'])
cnn_model.summary()
# -
history = cnn_model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=4, batch_size=10)
# Performace metrics for Conv1D and LSTM model
y_pred = cnn_model.predict(X_test)
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
cnn_report = classification_report(y_test, y_pred, target_names=['Class 0', 'Class 1'])
print(cnn_report)
cm = confusion_matrix(y_true=y_test, y_pred=y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Negative Class', 'Positive Class'])
disp.plot(values_format='d')
# ## BERT Model for Training
train_data = pd.read_csv('train.csv')
train_data.head()
# ### Preprocessing text for BERT
# Preprocessing text into input format
def preprocess(text):
# Removing Punctuation from the text
punct = [words for words in text[3] if words not in string.punctuation ]
words = ''.join(punct)
# Tokenizing the text
tokens = nltk.word_tokenize(words)
tokens.append(str(text[1]))
tokens.append(str(text[2]))
# Removing emojis, emails, numbers
lemmatized_output = ' '.join(tokens)
text = re.sub(r'^https?:\/\/.*[\r\n]*', '', lemmatized_output, flags=re.MULTILINE)
text = re.sub('<.*?>+', '', text)
regrex_pattern = re.compile(pattern = "["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
"]+", flags = re.UNICODE)
text = regrex_pattern.sub(r'',text)
text = ''.join([i for i in text if not i.isdigit()])
return text
train_data['preprocess_text'] = train_data.apply(lambda x: preprocess(x), axis=1)
train_data.head()
# Train Test split
X_train, X_test, y_train, y_test = train_test_split(train_data['preprocess_text'], train_data['target'], test_size=0.2, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# +
import tensorflow_text
max_seq_length = 128
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string) # Creating a input layer
preprocessor = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3") # Downloading BERT model for embedding
encoder_inputs = preprocessor(text_input)
encoder = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4",
trainable=True) # Downloading the BERT model for training
outputs = encoder(encoder_inputs)
pooled_output = outputs["pooled_output"] # [batch_size, 768].
sequence_output = outputs["sequence_output"] # [batch_size, seq_length, 768].
# -
# Adding Dense Layer and output layer for classification
dense_1 = tf.keras.layers.Dense(64, activation='relu')(pooled_output)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='Output')(dense_1)
# Compiling the BERT Model
bert_model = tf.keras.Model(text_input, output)
bert_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()])
bert_model.summary()
# Training the BERT model
epochs = 2
history = bert_model.fit(X_train, y_train,
validation_data=(X_test, y_test),
epochs=epochs,
verbose=1)
# +
import os
# Saving and loading the model
bert_model.save('./bert')
bert_model = tf.keras.models.load_model('./bert')
bert_model.summary()
# -
# Performace metrics for BERT model
y_pred = bert_model.predict(X_test)
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
bert_report = classification_report(y_test, y_pred, target_names=['Class 0', 'Class 1'])
print(bert_report)
cm = confusion_matrix(y_true=y_test, y_pred=y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Negative Class', 'Positive Class'])
disp.plot(values_format='d')
|
DisasterTweets.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
a <- c(1,2,3,4)
typeof(a)
a <- c(1,2,3,4,"hello")
typeof(a)
#slicing
A <- seq(1,15,by=2)
A
A[1:5]
A[c(2,5,8)] # Notice what's it does.....c here concatenates
A[c(2,5,8)]=NA # See what happens
A
mat_y <- matrix(1:20, nrow=5,ncol=4)
mat_A <- matrix(seq(1:20),nrow=4,ncol=5,byrow=T,dimnames=list(c("row1","row2","row3","row4"),c("col1","col2","col3","col4",
"col5")))
# byrow=T...in next command i'll check whats default..if not specified
mat_A
mat_A <- matrix(seq(1:20),nrow=4,ncol=5,dimnames=list(c("row1","row2","row3","row4"),c("col1","col2","col3","col4",
"col5")))
mat_A
# so by default it's column wise
mat_A[,4]; mat_A[1:2,3:4]
mat_A[1:3,]
# Now see what happens using 'rep'
rep(mat_A,3)
d <- c(1,2,3,4)
e <- c("red", "white", "red", NA)
f <- c(TRUE,TRUE,TRUE,FALSE)
mydata <- data.frame(d,e,f)
names(mydata) <- c("ID","Color","Passed") # variable names
mydata
data() # opens document of data sets
# Creating data frame
dd <- data.frame(mat_A)
class(mat_A)
typeof(dd)
data <- airquality
str(data)
d.frame1 <- data.frame(CustomerId = c(1:6), Product = c(rep("TV", 3),
rep("Radio", 3)))
d.frame2 <- data.frame(CustomerId = c(2, 4, 6), State = c(rep("Goa", 2),
rep("Delhi", 1)))
# Inner join
merge(d.frame1,d.frame2)
#Outer join
merge(x = d.frame1, y = d.frame2, by = "CustomerId", all = TRUE)
# Left outer
merge(x = d.frame1, y = d.frame2, by = "CustomerId", all.x = TRUE)
# Right outer
merge(x = d.frame1, y = d.frame2, by = "CustomerId", all.y =TRUE)
# Cross join
merge(x = d.frame1, y = d.frame2, by = NULL)
# Conditionals:
x <- 1
y <- 1
(x ==1) & (y == 1)
s <- 1:6
(s > 2 ) & (s < 5)
(s > 2) && (s <5)
# this is nice...one & vs double &
x <- 1
y <- 1
(x ==1) | (y == 1)
s <- 1:6
(s > 2 ) | (s < 5)
(s > 2) || (s <5)
x <- 5
# ex.1 of-else
if (x < 5) {print("x is less than 5")} else {"x is greater or equal to 5"}
# ex.2 of if-else
a = c(5,7,2,9)
ifelse(a %% 2 == 0,"even","odd")
# ex.3 of if-else
data <- data.frame(a=c(0,0,2,3),b=c(0,5,0,8))
transform(data,mulm=ifelse(a> 0 & b>0,log(a*b), NA))
# +
# apply:Returns a vector or array or list of values obtained by applying a function to margins of an array or matrix.
# Syntax : apply(x,margin,function)
Data <- mtcars
Col.mean <- apply(Data,2,mean)
Row.mean <- apply(Data,1,mean)
# -
check_length <- apply(Data, 2, function(x)length(x[x>20])) # function kya h??
check_length
# SAPPLY: For vector and list modification / transformation. It applies function over list and vector and returns vector
Data <-mtcars
sapply(1:3, function(x) x^2)
sapply(Data,mean)
l=c(a=1:10,b=11:20) # mean of values using sapply
sapply(l,mean)
lapply(l,mean)
data("mtcars")
mydata <- mtcars
filter(mydata, cyl > 4 & gear > 4 )
# +
#Simple pie chart
slices <- c(25, 24, 22, 16,13 )
lbls <- c("wheat", "rice", "chickpea", "soyabean", "pea")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),main="Crops production – 2016")
# +
#3D Pie chart
library(plotrix)
pie3D(slices,labels=lbls,explode=0.1,main="Crop production – 2016")
# -
#Bar Plots:
barplot(table(iris_data$Species))
#Stacked bar plots
counts <- table(mtcars$vs, mtcars$gear)
barplot(counts, main="Car Distribution by Gears and VS", xlab="Number of Gears",
col=c("darkblue","red"), legend = rownames(counts))
#Grouped bar plots
counts <- table(mtcars$vs, mtcars$gear)
barplot(counts, main="Car Distribution by Gears and VS",
xlab="Number of Gears", col=c("darkblue","red"),
legend = rownames(counts),beside=T)
#boxplot
boxplot(mpg~cyl,data=mtcars, main="Car Milage Data", xlab="Number of Cylinders",ylab="Miles Per Gallon")
#Notched Box plot**
boxplot(len~supp*dose, data=ToothGrowth, notch=TRUE, col=(c("gold","darkgreen")),
main="Tooth Growth", xlab="Suppliment and Dose")
#Notched Box plot** <<< notch=FALSE >>>
boxplot(len~supp*dose, data=ToothGrowth, notch=FALSE, col=(c("gold","darkgreen")),
main="Tooth Growth", xlab="Suppliment and Dose")
#Outlier Detection
outlier_values <- boxplot.stats(mtcars$wt)$out
boxplot(mtcars$wt)
mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6)
# Histogram
# Frequency distribution of quantitative variable
Sepal.L <- iris$Sepal.Length
hist(Sepal.L)
table(Sepal.L)
hist(Sepal.L,breaks=c(4:8))
# Line charts
x <- c(1:5)
y <- x * 2 # create some data
par(pch=23, col="green4") # plotting symbol and color
par(mfrow=c(2,4)) # all plots on one page
opts = c("p","l","o","b","c","s","S","h")
for(i in 1:length(opts)){
heading = paste("type=",opts[i])
plot(x, y, type="n", main=heading)
lines(x, y, type=opts[i])
}
plot(wt, mpg, main="Scatterplot ", xlab="Car Weight ", ylab="Miles Per Gallon ",pch=22)
abline(lm(mpg~wt), col="red") # regression line (y~x)
# Scatterplot 3D
library(scatterplot3d)
attach(iris)
scatterplot3d(Sepal.Length,Sepal.Width,Petal.Length, main="3D Scatterplot")
library(car)
scatterplot.matrix(~mpg+disp+drat+wt|cyl, data=mtcars,
main="Three Cylinder Options")
pairs(~mpg+disp+drat+wt,data=mtcars, main="Scatterplot Matrix")
abline(lm(mpg~wt), col="red") # regression line (y~x)
# 2D scatter plot
plot(wt, mpg, main="Scatterplot ", xlab="Car Weight ", ylab="Miles Per Gallon ",
pch=22)
|
DataVisualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Segmentierung mit Stardist
# Dieses Notebook ist dem 2D-Beispiel https://github.com/mpicbg-csbd/stardist/tree/master/examples/2D der GitHub Implementierung entnommen.
#
# Nach dem Training können wir nun das Model auf das Test Set anwenden und evaluieren.
# +
from __future__ import print_function, unicode_literals, absolute_import, division
import sys
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from glob import glob
from tifffile import imread
from csbdeep.utils import Path, normalize
from csbdeep.io import save_tiff_imagej_compatible
from stardist import random_label_cmap, _draw_polygons, export_imagej_rois
from stardist.models import StarDist2D
#np.random.seed(6)
lbl_cmap = random_label_cmap()
# %env CUDA_DEVICE_ORDER=PCI_BUS_ID
# %env CUDA_VISIBLE_DEVICES=3
# -
# # Data
#
# We assume that data has already been downloaded in via notebook [1_data.ipynb](1_data.ipynb).
# We now load images from the sub-folder `test` that have not been used during training.
# +
X = sorted(glob('/extdata/readonly/f-prak-v15/e-coli-swarming/test/input/*.tif'))
X = list(map(imread,X))
n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
axis_norm = (0,1) # normalize channels independently
# axis_norm = (0,1,2) # normalize channels jointly
if n_channel > 1:
print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
# -
fig, ax = plt.subplots(1, 2, figsize=(9.5, 5))
for i,(a,x) in enumerate(zip(ax.flat, X)):
a.imshow(x if x.ndim==2 else x[...,0], cmap='gray')
a.set_title(i)
[a.axis('off') for a in ax.flat]
plt.tight_layout()
None;
# # Load trained model
#
# If you trained your own StarDist model (and optimized its thresholds) via notebook [2_training.ipynb](2_training.ipynb), then please set `demo_model = False` below.
model = StarDist2D(None, name='mystardist-1', basedir='models')
#model.load_weights("weights_last.h5")
None;
# ## Prediction
#
# Make sure to normalize the input image beforehand or supply a `normalizer` to the prediction function.
#
# Calling `model.predict_instances` will
# - predict object probabilities and star-convex polygon distances (see `model.predict` if you want those)
# - perform non-maximum suppression (with overlap threshold `nms_thresh`) for polygons above object probability threshold `prob_thresh`.
# - render all remaining polygon instances in a label image
# - return the label instances image and also the details (coordinates, etc.) of all remaining polygons
img = normalize(X[0], 1, 99.8, axis=axis_norm)
labels, details = model.predict_instances(img)
plt.figure(figsize=(8,8))
plt.imshow(img if img.ndim==2 else img[...,0], clim=(0,1), cmap='gray')
plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)
plt.axis('off');
# +
# save_tiff_imagej_compatible('example_image.tif', img, axes='YX')
# save_tiff_imagej_compatible('example_labels.tif', labels, axes='YX')
# export_imagej_rois('example_rois.zip', details['coord'])
# -
# # Example results
def example(model, i, show_dist=True):
img = normalize(X[i], 1, 99.8, axis=axis_norm)
labels, details = model.predict_instances(img)
plt.figure(figsize=(13,10))
img_show = img if img.ndim==2 else img[...,0]
coord, points, prob = details['coord'], details['points'], details['prob']
plt.subplot(121); plt.imshow(img_show, cmap='gray'); plt.axis('off')
a = plt.axis()
_draw_polygons(coord, points, prob, show_dist=show_dist)
plt.axis(a)
plt.subplot(122); plt.imshow(img_show, cmap='gray'); plt.axis('off')
plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)
plt.tight_layout()
plt.show()
example(model, 0)
example(model, 1)
example(model, 1, False)
|
04_stardist/3_prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="lbFmQdsZs5eW"
# ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated
# ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position.
# ATTENTION: Please use the provided epoch values when training.
# Import all the necessary files!
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
from os import getcwd
# + colab={} colab_type="code" id="1xJZ5glPPCRz"
path_inception = f"{getcwd()}/../tmp2/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
# Import the inception model
from tensorflow.keras.applications.inception_v3 import InceptionV3
# Create an instance of the inception model from the local pre-trained weights
local_weights_file = path_inception
pre_trained_model = InceptionV3(
input_shape = (150, 150, 3),
include_top = False,
weights = None
)
pre_trained_model.load_weights(local_weights_file)
# Make all the layers in the pre-trained model non-trainable
for layer in pre_trained_model.layers:
layer.trainable = False
# Print the model summary
pre_trained_model.summary()
# Expected Output is extremely large, but should end with:
#batch_normalization_v1_281 (Bat (None, 3, 3, 192) 576 conv2d_281[0][0]
#__________________________________________________________________________________________________
#activation_273 (Activation) (None, 3, 3, 320) 0 batch_normalization_v1_273[0][0]
#__________________________________________________________________________________________________
#mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_275[0][0]
# activation_276[0][0]
#__________________________________________________________________________________________________
#concatenate_5 (Concatenate) (None, 3, 3, 768) 0 activation_279[0][0]
# activation_280[0][0]
#__________________________________________________________________________________________________
#activation_281 (Activation) (None, 3, 3, 192) 0 batch_normalization_v1_281[0][0]
#__________________________________________________________________________________________________
#mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_273[0][0]
# mixed9_1[0][0]
# concatenate_5[0][0]
# activation_281[0][0]
#==================================================================================================
#Total params: 21,802,784
#Trainable params: 0
#Non-trainable params: 21,802,784
# + colab={} colab_type="code" id="CFsUlwdfs_wg"
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
# Expected Output:
# ('last layer output shape: ', (None, 7, 7, 768))
# + colab={} colab_type="code" id="-bsWZWp5oMq9"
# Define a Callback class that stops training once accuracy reaches 97.0%
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.97):
print("\nReached 97.0% accuracy so cancelling training!")
self.model.stop_training = True
# + colab={} colab_type="code" id="BMXb913pbvFg"
from tensorflow.keras.optimizers import RMSprop
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.2)(x)
# Add a final sigmoid layer for classification
x = layers.Dense (1, activation='sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(optimizer = RMSprop(lr=0.0001),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
model.summary()
# Expected output will be large. Last few lines should be:
# mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_248[0][0]
# activation_251[0][0]
# activation_256[0][0]
# activation_257[0][0]
# __________________________________________________________________________________________________
# flatten_4 (Flatten) (None, 37632) 0 mixed7[0][0]
# __________________________________________________________________________________________________
# dense_8 (Dense) (None, 1024) 38536192 flatten_4[0][0]
# __________________________________________________________________________________________________
# dropout_4 (Dropout) (None, 1024) 0 dense_8[0][0]
# __________________________________________________________________________________________________
# dense_9 (Dense) (None, 1) 1025 dropout_4[0][0]
# ==================================================================================================
# Total params: 47,512,481
# Trainable params: 38,537,217
# Non-trainable params: 8,975,264
# + colab={} colab_type="code" id="HrnL_IQ8knWA"
# Get the Horse or Human dataset
path_horse_or_human = f"{getcwd()}/../tmp2/horse-or-human.zip"
# Get the Horse or Human Validation dataset
path_validation_horse_or_human = f"{getcwd()}/../tmp2/validation-horse-or-human.zip"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
import shutil
shutil.rmtree('/tmp')
local_zip = path_horse_or_human
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/training')
zip_ref.close()
local_zip = path_validation_horse_or_human
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation')
zip_ref.close()
# + colab={} colab_type="code" id="y9okX7_ovskI"
# Define our example directories and files
train_dir = '/tmp/training'
validation_dir = '/tmp/validation'
train_horses_dir = os.path.join(train_dir, 'horses')
train_humans_dir = os.path.join(train_dir, 'humans')
validation_horses_dir = os.path.join(validation_dir, 'horses')
validation_humans_dir = os.path.join(validation_dir, 'humans')
train_horses_fnames = os.listdir(train_horses_dir)
train_humans_fnames =os.listdir(train_humans_dir)
validation_horses_fnames = os.listdir(validation_horses_dir)
validation_humans_fnames = os.listdir(validation_humans_dir)
print(len(train_horses_fnames))
print(len(train_humans_fnames))
print(len(validation_horses_fnames))
print(len(validation_humans_fnames))
# Expected Output:
# 500
# 527
# 128
# 128
# + colab={} colab_type="code" id="O4s8HckqGlnb"
# Add our data-augmentation parameters to ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255.,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator( rescale = 1.0/255. )
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size = 10,
class_mode = 'binary',
target_size = (150, 150))
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150))
# Expected Output:
# Found 1027 images belonging to 2 classes.
# Found 256 images belonging to 2 classes.
# + colab={} colab_type="code" id="Blhq2MAUeyGA"
# Run this and see how many epochs it should take before the callback
# fires, and stops training at 97% accuracy
callbacks = myCallback()
history = model.fit_generator(
train_generator,
validation_data = validation_generator,
steps_per_epoch = 100,
epochs = 3,
validation_steps = 50,
verbose = 2,
callbacks=[callbacks])
# + colab={} colab_type="code" id="C2Fp6Se9rKuL"
# %matplotlib inline
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
# -
# # Submission Instructions
# +
# Now click the 'Submit Assignment' button above.
# -
# # When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.
# + language="javascript"
# <!-- Save the notebook -->
# IPython.notebook.save_checkpoint();
# + language="javascript"
# IPython.notebook.session.delete();
# window.onbeforeunload = null
# setTimeout(function() { window.close(); }, 1000);
|
Exercise_3_Horses_vs_humans_using_Transfer_Learning_Question-FINAL.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jalammar/jalammar.github.io/blob/master/notebooks/cv/01_image_basics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TsRc6kUMhjXo" colab_type="code" colab={}
from matplotlib import pyplot as plt
import numpy as np
# + [markdown] id="K8ozdrg8jin8" colab_type="text"
# ## Black Image
# + id="7oVfS0_ChjXt" colab_type="code" colab={}
black = np.zeros([10,10])
# + id="p8ZQ4V_riT1P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="66b53d3b-1891-437d-a3f3-1b0eeed30605"
black
# + id="uLOXbKAhhjXy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="ca3af530-8cdd-412d-f958-a230db928d43"
plt.imshow(np.zeros([10,10]), cmap="gray", vmin=0, vmax=255)
# + [markdown] id="y54jrmPFjggl" colab_type="text"
# ## White Image
# + id="FcUA6p4khjYA" colab_type="code" colab={}
white = np.full((10,10), 255)
# + id="tUYJvBLkhjYC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="1a86d807-b5ed-4190-dfd5-5af2171dee59"
white
# + id="LP3nOhWNhjYF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5c8dff4c-0536-48e7-b335-aadcd1484949"
white.shape
# + id="6tIyfxX0hjYJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="858fcd54-57e6-4344-c657-0a587827a885"
plt.imshow(white, cmap="gray", vmin=0, vmax=255)
# + [markdown] id="vMxctTkMjdcB" colab_type="text"
# ## Gray Image
# + id="x0UkCvRwhjYM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="2dec6fd6-bfda-49f9-b0af-f62a46023f27"
gray = np.full((10,10), 170)
gray
# + id="TmdodhwDhjYP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="265d66a1-5abc-44a7-c13d-bf55bfcb6859"
plt.imshow(gray, cmap="gray", vmin=0, vmax=255)
# + [markdown] id="EwkNJOvHhjYR" colab_type="text"
# ## Addressing Pixels
# + id="zD6sfCFVhjYS" colab_type="code" colab={}
gray[0,0] = 0
# + id="kNuUKyCWhjYV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="4f77bfda-0b27-48ea-e37d-c4682abcaed8"
gray
# + id="hn8_yl4whjYX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="fb8cb1d7-4068-4848-c00f-674526294c5f"
plt.imshow(gray, cmap="gray", vmin=0, vmax=255)
# + [markdown] id="Ijnc-UZxhjYb" colab_type="text"
# ## Addressing Ranges
# + id="PN_ZQp9yhjYc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="b90be76e-2d02-46a1-d43e-5500d090439d"
gray
# + id="ed0vz0ZXhjYf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="c1f9551f-6c8d-47e7-d701-d8f03afc1f63"
gray[0:8,0:2] = 0
gray
# + id="OrQux2J0hjYh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="77f9af00-94cf-40ef-bdd5-a693054632f6"
plt.imshow(gray, cmap="gray", vmin=0, vmax=255)
# + [markdown] id="HngU9xQFhjYk" colab_type="text"
# ## Colors
# + id="FCYCBWR2hjYl" colab_type="code" colab={}
rgb = np.zeros((10,10,3))
# + id="7Fx2HSvZhjYn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="a94b2b30-38d1-46d7-f864-f24ebac8b710"
plt.imshow(rgb, vmin=0, vmax=255)
# + id="C_oy8KD7hjYt" colab_type="code" colab={}
rgb[:,:,2] = 255
# + id="IuKkpr0fhjYw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="243c7e6a-07ea-46b5-d9f5-453fd4238157"
plt.imshow(rgb, vmin=0, vmax=255)
# + id="J1MUANGthjY1" colab_type="code" colab={}
rgb[0,0,0] = 170
# + id="dzu2qf17hjY8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="73232b57-a194-4994-a006-5e4394b14c58"
plt.imshow(rgb, vmin=0, vmax=255)
# + id="7sDQ4j46hjY_" colab_type="code" colab={}
|
notebooks/cv/01_image_basics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Zz99CIhTWFdc"
# # Code Signal Exploring the Waters
# + [markdown] id="b8bG0ym-WFM3"
# ## Alternating Sums (14)
# Several people are standing in a row and need to be divided into two teams. The first person goes into team 1, the second goes into team 2, the third goes into team 1 again, the fourth into team 2, and so on.
#
# You are given an array of positive integers - the weights of the people. Return an array of two integers, where the first element is the total weight of team 1, and the second element is the total weight of team 2 after the division is complete.
#
# ### Example
#
# For a = [50, 60, 60, 45, 70], the output should be
# solution(a) = [180, 105].
# + id="lfxSAewuWCob"
def alternatingSums(a):
#Sum every other person.
team1=[] #Team one starts with the first person
team2=[] #Team two starts with the second person
for i in range(len(a)):
if i%2 == 0:
team1.append(a[i])
if i%2 == 1:
team2.append(a[i])
return (sum(team1)), (sum(team2))
# + [markdown] id="1kRzkIjaWMig"
# ## Add Border (15)
# Given a rectangular matrix of characters, add a border of asterisks(*) to it.
#
# Example
#
# For
#
#
#
# ```
# picture = ["abc",
# "ded"]
# ```
#
#
#
# the output should be
#
#
#
# ```
# solution(picture) = ["*****",
# "*abc*",
# "*ded*",
# "*****"]
# ```
#
#
#
# + id="ElMmFAT7WM6w"
def addBorder(picture):
border = ["*"*(len(picture[0])+2)]
for word in picture:
border.append("*"+word+"*")
border.append(border[0])
return border
# + [markdown] id="HuWXl8tzWORk"
# ## Are Similar? (16)
# Two arrays are called similar if one can be obtained from another by swapping at most one pair of elements in one of the arrays.
#
# Given two arrays a and b, check whether they are similar.
#
# ### Example
#
# For a = [1, 2, 3] and b = [1, 2, 3], the output should be
# solution(a, b) = true.
#
# The arrays are equal, no need to swap any elements.
#
# For a = [1, 2, 3] and b = [2, 1, 3], the output should be
# solution(a, b) = true.
#
# We can obtain b from a by swapping 2 and 1 in b.
#
# For a = [1, 2, 2] and b = [2, 1, 1], the output should be
# solution(a, b) = false.
#
# Any swap of any two elements either in a or in b won't make a and b equal.
# + id="VYkOx-hDWOoi"
def areSimilar(a, b):
count = 0
swap = []
for i in range(len(a)):
if a[i] not in b:
return False
elif a[i] != b[i]:
count+=1
swap.append([a[i],b[i]])
if count > 2:
return False
if count == 0 or count == 2 and swap[0] == swap[1][::-1]:
return True
else:
return False
# + [markdown] id="tNlWw0SaWPu9"
# ## Array Change (17)
# You are given an array of integers. On each move you are allowed to increase exactly one of its element by one. Find the minimal number of moves required to obtain a strictly increasing sequence from the input.
#
# ### Example
#
# For inputArray = [1, 1, 1], the output should be
# solution(inputArray) = 3.
# + id="7-cKtEKWWQWN"
def arrayChange(inputArray):
a = 0
newArray=inputArray[:]
for i in range(len(inputArray)-1):
if newArray[i]>=newArray[i+1]:
a+=newArray[i]-newArray[i+1]+1
newArray[i+1] = newArray[i]+1
return a
# + [markdown] id="Xl-NTXKEWRQr"
# ## Palindrome Rearranging (18)
# Given a string, find out if its characters can be rearranged to form a palindrome.
#
# ### Example
#
# For inputString = "aabb", the output should be
# solution(inputString) = true.
#
# We can rearrange "aabb" to make "abba", which is a palindrome.
# + id="6B_0XnuIWR2m"
def palindromeRearranging(inputString):
#check if the inputString can be rearranged into a palindrome.
countString = []
done = []
for i in inputString:
if i in done:
continue
if inputString.count(i)%2==1:
countString.append(inputString.count(i))
done.append(i)
if len(countString)>1:
return False
else:
return True
|
CodeSignal/Arcade/Intro/04_Exploring_the_Waters.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="title_ID"></a>
# # JWST Pipeline Validation Notebook: MIRI view outlier_detection
#
# <span style="color:red"> **Instruments Affected**</span>: e.g., FGS, MIRI, NIRCam, NIRISS
#
# ### Table of Contents
#
#
# <div style="text-align: left">
#
# <br> [Introduction](#intro)
# <br> [JWST CalWG Algorithm](#algorithm)
# <br> [Defining Terms](#terms)
# <br> [Test Description](#description)
# <br> [Data Description](#data_descr)
# <br> [Imports](#imports)
# <br> [Loading the Data](#data_load)
# <br> [Run the Pipeline](#pipeline)
# <br> [Perform Tests or Visualization](#testing)
# <br> [About This Notebook](#about)
# <br>
#
# </div>
# <a id="intro"></a>
# # Introduction
#
# This notebook allows visual inspection of a set of dithered images that are combined as part of calwebb_image3. The notebook will take a set of eight simulated images, starting with the uncal format files that are the output of MIRISim. These images will have a randomized location for about 50 point sources and no extended sources.
#
# These data files will be processed through calwebb_detector1, calwebb_image2 and calwebb_image3 and the output *i2d data file will be inspected. Past versions of the pipeline had the background subtracted twice, so the background levels were strongly negative, and other versions have had the background pixels wrongly flagged by outlier_detection. This notebook is simply a visual inspection to be sure there is nothing obviously wrong with the output. While this notebook will look at the output of calwebb_image3 as a whole, the main task is to find where outlier_detection is incorrectly flagging pixels.
#
# > Pipeline documentation: https://jwst-pipeline.readthedocs.io/en/latest/jwst/outlier_detection/main.html
#
# > Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/outlier_detection
#
# [Top of Page](#title_ID)
# <a id="algorithm"></a>
# # JWST CalWG Algorithm
#
# The outlier detection algorithm is defined in the confluence page listed here. It is used to reject outlier pixels found when comparing multiple dithered images together. Images are compared in overlapping regions and any pixel that seems to be an outlier (based on iterative sigma clipping) can be flagged and rejected from any combined set or mosaic of images.
#
# > https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Outlier+Detection
#
#
# [Top of Page](#title_ID)
# <a id="terms"></a>
# # Defining Terms
#
# Here are some of the terms that will be used in this notebook.
#
# JWST: <NAME> Space Telescope
#
# MIRI: Mid-Infrafred Instrument
#
# MIRISim: MIRI data simulator
#
#
# [Top of Page](#title_ID)
# <a id="description"></a>
# # Test Description
#
# This test takes a set of simulated images (at different dithered positions) and proceses them through all three stages of the Imager pipeline: calwebb_detector1, calwebb_image2, and calwebb_image3. The tests being run here look at the output of the full image3 pipeine (the combined i2d data), check that the source_catalog output catalog marks the locations of our point sources, and checks on the number of pixels that are flagged in the outlier_detection step to be sure that the step is not flagging too many pixels.
#
# [Top of Page](#title_ID)
# <a id="data_descr"></a>
# # Data Description
#
# The set of data used in this particular test were created with the MIRI Data Simulator (MIRISim). The simulator created eight imaging mode files, two exposures each at four different dither positions, using the specified filter. There are approximately 50 point sources scattered through the images.
#
#
# [Top of Page](#title_ID)
# <a id="tempdir"></a>
# # Set up Temporary Directory
# The following cell sets up a temporary directory (using python's `tempfile.TemporaryDirectory()`), and changes the script's active directory into that directory (using python's `os.chdir()`). This is so that, when the notebook is run through, it will download files to (and create output files in) the temporary directory rather than in the notebook's directory. This makes cleanup significantly easier (since all output files are deleted when the notebook is shut down), and also means that different notebooks in the same directory won't interfere with each other when run by the automated webpage generation process.
#
# If you want the notebook to generate output in the notebook's directory, simply don't run this cell.
#
# If you have a file (or files) that are kept in the notebook's directory, and that the notebook needs to use while running, you can copy that file into the directory (the code to do so is present below, but commented out).
#
# [Top of Page](#title_ID)
# +
# Create a temporary directory to hold notebook output, and change the working directory to that directory.
from tempfile import TemporaryDirectory
import os
import shutil
data_dir = TemporaryDirectory()
# If you have files that are in the notebook's directory, but that the notebook will need to use while
# running, copy them into the temporary directory here.
#
# files = ['name_of_file']
# for file_name in files:
# shutil.copy(file_name, os.path.join(data_dir.name, file_name))
os.chdir(data_dir.name)
print(data_dir.name)
# -
# <a id="imports"></a>
# # Imports
# List the package imports and why they are relevant to this notebook.
#
#
# * astropy.io for opening fits files
# * astropy visualization for viewing images
# * jwst pipeline to get the pipeline stages being tested
# * jwst.datamodels for building model for JWST Pipeline
# * ci_watson tools to retrieve data from artifactory
# * matplotlib.pyplot.plt to generate plots
#
#
# [Top of Page](#title_ID)
# +
from astropy.io import fits
from astropy.visualization import LogStretch, PercentileInterval, ManualInterval, LinearStretch
from astropy import table
from astropy.visualization import (MinMaxInterval, SqrtStretch,
ImageNormalize)
from ci_watson.artifactory_helpers import get_bigdata
import glob
from jwst.pipeline import Detector1Pipeline, Image2Pipeline, Image3Pipeline
from jwst import associations
from jwst.associations.lib.rules_level3_base import DMS_Level3_Base
from jwst.associations import asn_from_list
from jwst.datamodels import RampModel, ImageModel, dqflags
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import os
# -
# <a id="data_load"></a>
# # Loading the Data
#
# ### Data for internal use: Artifactory method
# Artifactory should be used for data that is for internal use only.
#
# The MIRISim data and any needed reference files to use with this simulated data are stored in artifactory.
#
# [Top of Page](#title_ID)
# +
print("Downloading input file 1")
input_file1 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq1_MIRIMAGE_F1130Wexp1.fits')
print("Downloading input file 2")
input_file2 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq1_MIRIMAGE_F1130Wexp2.fits')
print("Downloading input file 3")
input_file3 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq2_MIRIMAGE_F1130Wexp1.fits')
print("Downloading input file 4")
input_file4 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq2_MIRIMAGE_F1130Wexp2.fits')
print("Downloading input file 5")
input_file5 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq3_MIRIMAGE_F1130Wexp1.fits')
print("Downloading input file 6")
input_file6 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq3_MIRIMAGE_F1130Wexp2.fits')
print("Downloading input file 7")
input_file7 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq4_MIRIMAGE_F1130Wexp1.fits')
print("Downloading input file 8")
input_file8 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'outlier_detection',
'outlier_detection_miri_test',
'starfield_50star4ptdither_seq4_MIRIMAGE_F1130Wexp2.fits')
#This readnoise file is needed for use with simulated data which has higher readnoise than actual data.
readnoise = get_bigdata('jwst_validation_notebooks',
'validation_data',
'jump',
'jump_miri_test',
'jwst_mirisim_readnoise.fits')
print("Finished Downloads")
input_files=[input_file1,input_file2,input_file3,input_file4,input_file5,input_file6,input_file7,input_file8]
# -
# <a id="pipeline"></a>
# # Run the Steps or Pipeline
#
# The sections below will run the data through multiple pipeline steps.
#
# [Top of Page](#title_ID)
# +
# Run the calwebb_detector1 pipeline
# set up pipeline parameters
rej_thresh=10.0 # rejection threshold for jump step
print('There are ', len(input_files), ' images.')
# set up pipeline parameters for input
pipe1 = Detector1Pipeline()
pipe1.jump.rejection_threshold = rej_thresh
pipe1.jump.override_readnoise = readnoise
pipe1.ramp_fit.override_readnoise = readnoise
pipe1.refpix.skip = True # needs update to simulator for this to work properly with simulated data
slopelist = []
# loop over list of files
for file in input_files:
# set up output file name
base, remainder = file.split('.')
outname = base
pipe1.jump.output_file = outname+'.fits'
#pipe1.ramp_fit.output_file = outname+'.fits'
pipe1.output_file = outname+'.fits'
# Run pipeline on each file
rampfile = pipe1.run(file)
slopelist.append(rampfile)
# Close the input files
#file.close()
print('Detector 1 steps completed on all files.')
print(slopelist)
# +
# Run Calwebb_image2 on output files from detector1
print('There are ', len(slopelist), ' images.')
# create an object for the pipeline
pipe2 = Image2Pipeline()
callist = []
# cycle through files
for rampfile in slopelist:
filename = rampfile.meta.filename
# Set pipeline parameters
pipe2.save_results = True
pipe2.output_file = filename +'_cal.fits'
pipe2.resample.save_results = True
pipe2.suffix = None
calfile = pipe2.run(rampfile)
callist.append(calfile)
print(callist)
# -
# ### Create association table of data output from calwebb_image2.
#
# The calwebb_image3 pipeline takes in an association table of a set of images which should be combined. The association table can also be used to specify a background image to be subtracted or a source catalog to be used within the pipeline (sourcecat is not typically used with MIRI).
# +
# use asn_from_list to create association table
import glob
calfiles = glob.glob('starfield*_cal.fits')
asn = asn_from_list.asn_from_list(calfiles, rule=DMS_Level3_Base, product_name='starfield_50star4ptdither_combined.fits')
# use this if you need to add non'science' exposure types
#asn['products'][0]['members'][1]['exptype'] = 'background'
#asn['products'][0]['members'][2]['exptype'] = 'sourcecat'
# dump association table to a .json file for use in image3
with open('starfield_50star4ptdither_asnfile.json', 'w') as fp:
fp.write(asn.dump()[1])
print(asn)
# -
# ### Run calwebb_image3 on the association table, setting any specific parameters.
# +
# use association table created in previous step with calwebb_image3
# set any specific parameters
# tweakreg parameters to allow data to run
fwhm=3.762 # Gaussian kernel FWHM of objects expected, default=2.5
minobj=5 # minimum number of objects needed to match positions for a good fit, default=15
snr= 250 # signal to noise threshold, default=5
sigma= 3 # clipping limit, in sigma units, used when performing fit, default=3
fit_geom='shift' # ftype of affine transformation to be considered when fitting catalogs, default='general'
use2dhist=False # boolean indicating whether to use 2D histogram to find initial offset, default=True
pipe3=Image3Pipeline()
pipe3.tweakreg.kernel_fwhm = fwhm
pipe3.tweakreg.snr_threshold = snr
pipe3.tweakreg.minobj = minobj
pipe3.tweakreg.sigma = sigma
pipe3.tweakreg.fitgeometry = fit_geom
pipe3.tweakreg.use2dhist = use2dhist
pipe3.source_catalog.kernel_fwhm = fwhm
pipe3.source_catalog.snr_threshold = snr
pipe3.skymatch.save_results = True
pipe3.outlier_detection.save_results = True
pipe3.resample.save_results = True
pipe3.source_catalog.save_results = True
pipe3.save_results = True
# run Image3
pipe3.run('starfield_50star4ptdither_asnfile.json')
print('Image 3 pipeline finished.')
# -
# <a id="testing"></a>
# # Perform Tests or Visualization
#
# View the image and check number of pixels being flagged as outliers.
#
# [Top of Page](#title_ID)
# +
# read in i2d file
im_i2d = ImageModel('starfield_50star4ptdither_combined_i2d.fits')
viz1 = LogStretch()
viz2 = LogStretch() + ManualInterval(-5,5)
norm = ImageNormalize(im_i2d.data, interval=MinMaxInterval(),
stretch=LinearStretch())
plt.figure(figsize=(15,15))
#plt.imshow(viz2(im_i2d.data),cmap='gray')
plt.imshow(im_i2d.data,origin='lower',norm=norm,vmin=-5, vmax=4)
plt.colorbar()
# -
# ### Image examination
# The image output should have the Four Quadrant phase masks on the left of the image masked out (values of 0). The image area should be smooth in the background regions with multiple point sources bright against the background. Passing criteria for the Lyot mask region are still being determined.
# ## Check output of source catalog against image
photfile = 'starfield_50star4ptdither_combined_cat.ecsv'
data = table.Table.read(photfile, format='ascii', comment='#')
print(len(data),' sources detected')
# +
# read in ecsv photom file
from astropy.visualization import LogStretch, PercentileInterval, ManualInterval
from astropy import table
from matplotlib.colors import LogNorm
from astropy.visualization import (MinMaxInterval, SqrtStretch,
ImageNormalize)
viz1 = LogStretch()
viz2 = LogStretch() + ManualInterval(-12,100)
norm = ImageNormalize(im_i2d.data, interval=MinMaxInterval(),
stretch=SqrtStretch())
plt.figure(figsize=(15,15))
#plt.imshow(viz2(im_i2d.data),cmap='gray')
plt.imshow(im_i2d.data,origin='lower',norm=norm,vmin=-12, vmax=100)
plt.colorbar()
plt.scatter(data['xcentroid'], data['ycentroid'],lw=1, s=10,color='red')
# -
# ### Check over source catalog match
# If the red dots marking sources found in image above are centered on the point sources, the test passes.
# ### Check DQ Flagging of outlier detection
# Read in the individual crf files which are output from outlier_detection and check the dq extension to see how many pixels out of each image are flagged as outliers.
# +
# Read in each crf file output from outlier_detection and see percentage of pixels flagged as outlier
outlierfiles = glob.glob('starfield*_crf.fits')
flag_thresh = 1.0 # Percentage above which user should be notified of high percentage of flagged pixels
for crffile in outlierfiles:
file = ImageModel(crffile)
nx = file.meta.subarray.xsize
ny = file.meta.subarray.ysize
filename = file.meta.filename
print(filename)
numpix = nx * ny
# Test that all pixels flagged with OUTLIER are also flagged as DO_NOT_USE
outlierarray = (file.dq & dqflags.pixel['OUTLIER'] > 0)
badarray = (file.dq & dqflags.pixel['DO_NOT_USE'] > 0)
assert outlierarray.all() == badarray.all()
# Count number of pixels flagged as OUTLIER
jumpcount = (file.dq & dqflags.pixel['OUTLIER'] > 0).sum()
print('There are ', jumpcount, ' pixels flagged as outliers.')
#print('Value of dq flag for pixel 784, 672: ', file.dq[672, 784])
percentflagged = (jumpcount / numpix) * 100.
print('The percentage of pixels flagged is ', percentflagged)
if percentflagged > flag_thresh:
print('This percentage is higher than it should be. Review data through outlier step')
print('\n')
# -
# The output above should show percentages below some chosen threshold. If the percentage of pixels flagged as outliers are above the set threshold, there will be error messages printed. If no error messages are printed, this test is presumed to pass.
# <a id="about_ID"></a>
# ## About this Notebook
# **Author:** <NAME>, Senior Staff Scientist, MIRI Branch
# <br>**Updated On:** 02/17/2021
# [Top of Page](#title_ID)
# <img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
|
jwst_validation_notebooks/outlier_detection/jwst_outlier_detection_miri_test/jwst_outlier_detection_view_miri_test.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pandasworkshop
# language: python
# name: pandasworkshop
# ---
# # Exercise 03-01 | Gross domestic product
#
# The task: Find and load data on GDP.
#
#
# 1. Load Pandas
import pandas as pd
# 2. Query a search engine
#
# Now we know about the "datapackage" format, let's see whether there exists a prepared dataset in this format (which would help us loading it).
#
# * [Duck Duck Go](https://duckduckgo.com/?q=gdp+datapackage)
# * [Google Search](https://www.google.com/search?q=gdp+datapackage)
#
# And indeed, we find the site: https://github.com/datasets/gdp as first or among the top results.
#
# 3. Install "pandas-datapackage-reader" to access the datapackage format.
# !pip install pandas-datapackage-reader
# 4. Import the pandas datapackage adapter and load the JSON file from the GDP dataset repository.
import pandas_datapackage_reader as pdr
df = pdr.read_datapackage("https://raw.githubusercontent.com/datasets/gdp/master/datapackage.json")
# 5. Confirm the successful load of the data, by using head, tail and info.
df.head()
df.tail()
df.info()
|
Chapter03/Exercise03.01/Exercise03.01.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
dataset = pd.read_csv(r"C:\Users\LENOVO 2LIN\Visual Studio Code\git-project-folder\Detecting-Phishing-Websites-With-ML\data\phishing.csv")
x = dataset.iloc[ : , :-1].values
y = dataset.iloc[:, -1:].values
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.25, random_state = 85 )
from sklearn.model_selection import GridSearchCV
parameters = [{'n_estimators': [100, 700],'max_features': ['sqrt', 'log2'],'criterion' :['gini','entropy']}]
grid_search = GridSearchCV(RandomForestClassifier(), parameters,cv =5, n_jobs= -1)
grid_search.fit(x_train, y_train)
print("Best Accurancy =" +str( grid_search.best_score_))
print("best parameters =" + str(grid_search.best_params_))
classifier = RandomForestClassifier(n_estimators = 100, criterion = "gini", max_features = 'log2', random_state = 4)
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_test)
x_test
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
prepared_df = [1,-1,1,1,1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,1,1,0,1,1,1,1,-1,-1,0,-1,1,0,1]
p = np.array(prepared_df).reshape(-1, 1).T
print(p)
y_pred = classifier.predict(p)
print(y_pred)
import sys
sys.path.insert(0, r'C:\Users\LENOVO 2LIN\Visual Studio Code\git-project-folder\Detecting-Phishing-Websites-With-ML\Feature-Extraction')
import feature_extraction
def predict_website(url,classifier):
url1 = r'{}'.format(url)
feature = feature_extraction.features(url1)
f= np.array(feature).reshape(-1, 1).T
y_pred = classifier.predict(f)
return y_pred
predict_website("https://www.youtube.com/",classifier)
import pickle
with open("model.bin", 'wb') as f_out:
pickle.dump(classifier, f_out)
f_out.close()
# +
##loading the model from the saved file
with open('model.bin', 'rb') as f_in:
model = pickle.load(f_in)
predict_website("https://stackoverflow.com/questions/4415259/convert-regular-python-string-to-raw-string",classifier)
# -
pickle.dump(classifier, open('model.pkl','wb'))
|
Model/final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/timothyolano/Linear-Algebra-58019/blob/main/Python_Exercise1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="GFqBgagvBOmS"
# # Matrix and it's operation
# + colab={"base_uri": "https://localhost:8080/"} id="EN-r5fbxBaKG" outputId="52b6786f-8c92-4fd3-f781-e69465f88631"
import numpy as np
a = np.array(([-5, 0], [4,1]))
b = np.array(([6, -3], [2, 3]))
print("a= ")
print(a)
print("b= ")
print(b)
print("A+B: ")
print(a+b)
print("B-A: ")
print(b-a)
print("A-B: ")
print(a-b)
|
Python_Exercise1.ipynb
|