code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
LICENSE MIT
2020
<NAME>
Website : http://www.covidtracker.fr
Mail : <EMAIL>
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
I'm currently cleaning the code, please ask me if something is not clear enough.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
import pandas as pd
import plotly.graph_objects as go
import france_data_management as data
from datetime import datetime
from datetime import timedelta
import plotly
import math
import os
df, df_confirmed, dates, df_new, df_tests, df_deconf, df_sursaud, df_incid, df_tests_viros = data.import_data()
# +
#df = df.groupby(["dep", "jour"]).first().reset_index()
# +
df_departements = df.groupby(["jour", "departmentName"]).sum().reset_index()
df_incid_departements = df_incid[df_incid["cl_age90"]==0].groupby(["jour", "departmentName"]).sum().reset_index()
df_new_departements = df_new.groupby(["jour", "departmentName"]).sum().reset_index()
departements = list(dict.fromkeys(list(df_departements['departmentName'].values)))
dates_incid = list(dict.fromkeys(list(df_incid['jour'].values)))
last_day_plot = (datetime.strptime(max(dates), '%Y-%m-%d') + timedelta(days=1)).strftime("%Y-%m-%d")
departements_nb = list(dict.fromkeys(list(df_tests_viros['dep'].values)))
# +
lits_reas = pd.read_csv('data/france/lits_rea.csv', sep=",")
df_departements_lits = df_departements.merge(lits_reas, left_on="departmentName", right_on="nom_dpt")
# -
def cas_journ(departement):
df_incid_dep = df_incid_departements[df_incid_departements["departmentName"] == departement]
df_incid_dep_rolling = df_incid_dep["P"].rolling(window=7, center=True).mean()
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "cas_journ_"+departement, [0, df_incid_dep["P"].max()]
title = "<b>Cas positifs</b> au Covid19 - <b>" + departement + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"],
y = df_incid_dep_rolling,
name = "Nouveaux décès hosp.",
marker_color='rgb(8, 115, 191)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(8, 115, 191, 0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates_incid[-4]],
y = [df_incid_dep_rolling.values[-4]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='rgb(8, 115, 191)',
marker_size=15,
opacity=1,
showlegend=False
))
fig.add_trace(go.Scatter(
x = df_incid_dep["jour"],
y = df_incid_dep["P"],
name = "",
mode="markers",
marker_color='rgb(8, 115, 191)',
line_width=3,
opacity=0.4,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates_incid[-4], y = df_incid_dep_rolling.values[-4], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_incid_dep_rolling.values[-4], "cas quotidiens<br></b>en moyenne du {} au {}.".format(datetime.strptime(dates_incid[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates_incid[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(8, 115, 191)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=1,
ax=-40,
ay=-70,
arrowcolor="rgb(8, 115, 191)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image("images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
def hosp_journ(departement):
df_dep = df_departements[df_departements["departmentName"] == departement]
#df_incid_reg_rolling = df_incid_reg["P"].rolling(window=7, center=True).mean()
range_x, name_fig = ["2020-03-29", last_day_plot], "hosp_journ_"+departement
title = "Personnes <b>hospitalisées</b> pour Covid19 - <b>" + departement +"</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_dep["jour"],
y = df_dep["hosp"],
name = "Nouveaux décès hosp.",
marker_color='rgb(209, 102, 21)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [df_dep["hosp"].values[-1]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='rgb(209, 102, 21)',
marker_size=15,
opacity=1,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_dep["hosp"].values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_dep["hosp"].values[-1], "personnes<br>hospitalisées</b><br>le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(209, 102, 21)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-50,
ay=-90,
arrowcolor="rgb(209, 102, 21)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image("images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
# +
def hosp_comparaison_vagues(departement):
df_dep = df_departements[df_departements["departmentName"] == departement]
#df_incid_reg_rolling = df_incid_reg["P"].rolling(window=7, center=True).mean()
range_x, name_fig = ["2020-03-29", last_day_plot], "hosp_comp_vagues_"+departement
title = ""#"<b>Personnes hospitalisées</b> pour Covid19 - " + departement
fig = go.Figure()
premiere_vague = df_dep[ df_dep["jour"] < "2020-08"]["hosp"].max()
premiere_vague_date = df_dep[ df_dep["hosp"] == premiere_vague]["jour"].min()
deuxieme_vague = df_dep[ df_dep["jour"] > "2020-09"]["hosp"].max()
deuxieme_vague_date = df_dep[ (df_dep["hosp"] == deuxieme_vague) & (df_dep["jour"] > "2020-09")]["jour"].min()
color_deuxieme_vague = "green"
if deuxieme_vague > premiere_vague:
color_deuxieme_vague = "red"
hosp_values = df_dep["hosp"].values
trace_to_add = [max(0, hosp - premiere_vague) for hosp in hosp_values]
#deuxieme_vague += [df_dep[ df_dep["jour"] > "2020-09"]["hosp"].max()]
color = ["red" if hosp > premiere_vague else "rgb(209, 102, 21)" for hosp in df_dep["hosp"].values]
fig.add_trace(go.Bar(
x = df_dep["jour"],
y = df_dep["hosp"].values - trace_to_add,
name = "Nouveaux décès hosp.",
marker_color="orange",
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Bar(
x = df_dep["jour"],
y = trace_to_add,
name = "Nouveaux décès hosp.",
marker_color="red",
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_shape(
type="line",
x0="2000-01-01",
y0=premiere_vague,
x1="2030-01-01",
y1=premiere_vague,
opacity=1,
#fillcolor="orange",
line=dict(
dash="dash",
color="black",
width=1,
)
)
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18), range=["2020-03-15", last_day_plot])
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
bargap=0,
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='stack',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=-0.08,
xref='paper',
yref='paper',
text="Date : {}. Source : Santé publique France. Auteur : <NAME> - covidtracker.fr - nombre d'hospitalisations".format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = deuxieme_vague_date, y = deuxieme_vague, # annotation point
xref='x1',
yref='y1',
text="Deuxième vague",
xshift=-5,
yshift=10,
xanchor="center",
align='center',
font=dict(
color=color_deuxieme_vague,
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-150,
ay=-50,
arrowcolor=color_deuxieme_vague,
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),dict(
x = premiere_vague_date, y = premiere_vague, # annotation point
xref='x1',
yref='y1',
text="Première vague",
xshift=0,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="black",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=0,
ay=-50,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
))
fig.write_image("images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=1000, height=700)
print("> " + name_fig)
#hosp_comparaison_vagues("Savoie")
# +
def rea_journ(departement):
df_dep = df_departements[df_departements["departmentName"] == departement]
range_x, name_fig = ["2020-03-29", last_day_plot], "rea_journ_" + departement
title = "Personnes en <b>réanimation</b> pour Covid19 - <b>" + departement + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = dates,
y = df_dep["rea"],
name = "Nouveaux décès hosp.",
marker_color='rgb(201, 4, 4)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(201, 4, 4,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [df_dep["rea"].values[-1]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='rgb(201, 4, 4)',
marker_size=15,
opacity=1,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=10,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_dep["rea"].values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_dep["rea"].values[-1], "personnes<br>en réanimation</b><br>le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(201, 4, 4)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-50,
ay=-90,
arrowcolor="rgb(201, 4, 4)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image("images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
#rea_journ("Isère")
# +
def dc_journ(departement):
df_dep = df_new_departements[df_new_departements["departmentName"] == departement]
dc_new_rolling = df_dep["incid_dc"].rolling(window=7).mean()
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "dc_journ_"+departement, [0, df_dep["incid_dc"].max()]
title = "Décès <b>hospitaliers quotidiens</b> du Covid19 - <b>" + departement + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_dep["jour"],
y = dc_new_rolling,
name = "Nouveaux décès hosp.",
marker_color='black',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(0,0,0,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [dc_new_rolling.values[-1]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='black',
marker_size=15,
opacity=1,
showlegend=False
))
#
fig.add_trace(go.Scatter(
x = df_dep["jour"],
y = df_dep["incid_dc"],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='black',
line_width=3,
opacity=0.4,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = dc_new_rolling.values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % math.trunc(round(dc_new_rolling.values[-1], 2)), "décès quotidiens</b><br>en moyenne<br>du {} au {}.".format(datetime.strptime(dates[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="black",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-50,
ay=-90,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image("images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
#dc_journ("Paris")
# +
def saturation_rea_journ(dep):
df_dep = df_departements_lits[df_departements_lits["departmentName"] == dep]
df_saturation = 100 * df_dep["rea"] / df_dep["LITS_y"]
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "saturation_rea_journ_"+dep, [0, df_saturation.max()]
title = "<b>Occupation des réa.</b> par les patients Covid19 - " + dep
fig = go.Figure()
colors_sat = ["green" if val < 40 else "red" if val > 80 else "orange" for val in df_saturation.values]
fig.add_trace(go.Bar(
x = df_dep["jour"],
y = df_saturation,
name = "Saturation",
marker_color=colors_sat,
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(8, 115, 191, 0.3)",
showlegend=False
))
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
text='Date : {}. Source : Santé publique France. Auteur : guillaumerozier.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_saturation.values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_saturation.values[-1], " %</b> des lits de réa. occupés par<br>des patients Covid19 le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color=colors_sat[-1],
size=20
),
opacity=1,
ax=-70,
ay=-70,
arrowcolor=colors_sat[-1],
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image("images/charts/france/departements_dashboards/{}.jpeg".format(name_fig), scale=1.5, width=750, height=500)
print("> " + name_fig)
# +
import cv2
for dep in departements:
cas_journ(dep)
hosp_journ(dep)
rea_journ(dep)
dc_journ(dep)
hosp_comparaison_vagues(dep)
saturation_rea_journ(dep)
im1 = cv2.imread('images/charts/france/departements_dashboards/cas_journ_{}.jpeg'.format(dep))
im2 = cv2.imread('images/charts/france/departements_dashboards/hosp_journ_{}.jpeg'.format(dep))
im3 = cv2.imread('images/charts/france/departements_dashboards/rea_journ_{}.jpeg'.format(dep))
im4 = cv2.imread('images/charts/france/departements_dashboards/dc_journ_{}.jpeg'.format(dep))
im_haut = cv2.hconcat([im1, im2])
#cv2.imwrite('images/charts/france/tests_combinaison.jpeg', im_h)
im_bas = cv2.hconcat([im3, im4])
im_totale = cv2.vconcat([im_haut, im_bas])
cv2.imwrite('images/charts/france/departements_dashboards/dashboard_jour_{}.jpeg'.format(dep), im_totale)
os.remove('images/charts/france/departements_dashboards/cas_journ_{}.jpeg'.format(dep))
#os.remove('images/charts/france/departements_dashboards/hosp_journ_{}.jpeg'.format(dep))
os.remove('images/charts/france/departements_dashboards/rea_journ_{}.jpeg'.format(dep))
os.remove('images/charts/france/departements_dashboards/dc_journ_{}.jpeg'.format(dep))
# -
for dep in departements:
saturation_rea_journ(dep)
"""for idx,dep in enumerate(departements):
numero_dep = df[df["departmentName"] == dep]["dep"].values[-1]
heading = "<!-- wp:heading --><h2 id=\"{}\">{}</h2><!-- /wp:heading -->\n".format(dep, dep + " (" + numero_dep + ")")
string = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/dashboard_jour_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/dashboard_jour_{}.jpeg\" width=\"75%\"> </a></p>\n".format(dep, dep)
string2 = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/heatmaps_deps/heatmap_taux_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/heatmaps_deps/heatmap_taux_{}.jpeg\" width=\"60%\"> </a></p>\n".format(numero_dep, numero_dep)
string_saturation = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/saturation_rea_journ_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/departements_dashboards/saturation_rea_journ_{}.jpeg\" width=\"60%\"> </a></p>\n".format(dep, dep)
space = "<!-- wp:spacer {\"height\":50} --><div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer\"></div><!-- /wp:spacer -->"
retourmenu="<a href=\"#Menu\">Retour au menu</a>"
print(space+retourmenu+heading+string+string2+string_saturation)
"""
"""#print("<!-- wp:buttons --><div class=\"wp-block-buttons\">\n")
output = ""
for dep in departements:
numero_dep = df[df["departmentName"] == dep]["dep"].values[-1]
output+= "<a href=\"#{}\">{} ({})</a> • ".format(dep, dep, numero_dep)
#print(output[:-2])
"""
#print("<!-- /wp:buttons -->")
| covid19_departements_dashboards.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 第一周 NMA 课程总结
#
# 本周的课程涉及到:
#
# - 计算神经模型的概念与基本类型
# - 模型拟合的基本方法
# - 最小二乘法
# - 极大似然法
# - 线性模型与广义线性模型的实现
# - ICA降维的实现与tSNE方法
# 构建基本运行环境
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import scipy
import pandas as pd
import ipywidgets as widgets
import os
# ## 1. 计算神经模型的概念与基本类型
#
# ### 模型的定义
#
# 是对现实世界的抽象, 具有简洁和能够控制的优点
#
# ### 计算模型研究的基本逻辑
#
# 观测现象 → 提出模型假设 → 构建模型 → 执行模型做出预期 → 实验验证
#
# ### 模型研究问题的不同水平
#
# - 人类行为
# - 中枢神经
# - 神经系统不同模块
# - 神经元网络
# - 神经元
# - 受体
# - 分子
#
# ### 不同类型的模型
#
# 研究当中使用的模型分为三类:
# - What: 描述研究数据
# - How: 描述计算机制
# - Why: 证实制定模型的计算机制为什么是最优的
#
# 举例: 神经元电发放时间间隔模型
# - What: 对数函数可以准确描述电发放Interval的分布
# - How: LIF 法则能够准确建立神经元的生理机制与Interval的对数分布模式之间的联系
# - Why: 使用香农信息熵来证明 Interval 的对数分布模式能够在有限资源的情况下传递尽可能多的信息
# ## 2. 计算模型建模简单实践
#
# 本章我们尝试完整执行一个计算模型研究的基本过程. 一个完整的计算模型研究流程如下:
#
# ### 定义研究问题
#
# 1. 找到一个实验**现象**并找到相关的待回答的**问题**
# 2. 了解这个问题当前的研究**进展**
# 3. 定义问题的基本**切入点**
# 4. 通过探索性分析, 使用数学工具构建**假设**
#
# ### 模型构建
#
# 1. 选择合适的建模**工具 (数学模型)**
# 2. 规划模型构建
# 3. 完成模型构建
#
#
# ### 模型测试
#
# 1. 模型是否能够**表征**认知过程
# 2. 评估模型的**准确性** (计算模型不关心效率)
#
# ### 模型发布
#
# 1. 在平台发布模型
# +
# 获取练习数据
fname="W1D2_data.npz"
if not os.path.exists(fname):
# !wget https://osf.io/c5xyf/download -O $fname
filez = np.load(file=fname, allow_pickle=True)
judgments = filez['judgments']
opticflow = filez['opticflow']
vestibular = filez['vestibular']
# -
# ### 2.1 探索数据以确定研究问题, 并构建研究假设
#
# **现象**: 我们发现在我们从窗口能看到对面的火车时, 火车刚开始启动时, 或当对面当火车开始启动时, 看着窗外的乘车者会产生是外界环境在动而非自己在动的错觉. 我们想知道这种错觉的产生机制.
#
# **实验**:我们假设有两辆火车, 被试坐在其中的一辆火车上. 当其中一辆火车从速度为 0 到 1/m 的过程当中, 我们请被试拨动自己手上的两个代表火车运动速度的滑块, 并记录其移动速度, 从而判断被试的感知是否准确. 每个条件下进行 100 次实验.
#
# 接下来我们使用程序来描述这个现象 (What 模型)
#
# #### 2.1.1 探索数据
#
# judgment 数据为实验当中的数据
judgments_table = pd.DataFrame(judgments)
print(judgments_table.head())
print('\nThe dim of judgments is ' + str(judgments.shape))
# 这个矩阵为 5 列 200 行, 表示做了 200 次观测, 其中收集到的数据依次为 `[实验条件, 被试是否移动, 外界环境(对面的车)是否移动, 被试判断自己的移动速度, 被试判断对面的移动速度]`
dist_judgment = sb.scatterplot(judgments[:, 3], judgments[:, 4], hue = judgments[:, 0])
dist_judgment.set(xlabel = 'perceived world motion', ylabel = 'perceived self motion')
# 从图中我们可以看出两种实验条件下的各个观测, 显然被试并不能准确区分两种移动的情况.
#
# 我们进而通过阅读文献了解这个领域的**进展**, 得知个体对速度判断来源于两个感知器官的信息, 包括视觉刺激和前庭的感应, 而前庭主要感受到的是加速度信息.
#
# 我们可以先模拟数据来看一下两种感知信息的特征.
#
# 在物理模型中, 加速度常用 gamma 分布来进行描述. 在有了加速度后, 我们可以使用其来计算速度.
# +
from scipy.stats import gamma
a = gamma.pdf(np.arange(0, 10, 0.001), 2.5, 0)
dt = 0.001
v = np.cumsum(a * dt)
# -
# 把图画出来
time = np.arange(0, 10, 0.001)
plt.plot(time, a, label='acceleration')
plt.plot(time, v, label='speed')
plt.xlabel('time')
plt.ylabel('motion')
plt.legend()
plt.show()
# 理想情况下通过比较视觉刺激和前庭刺激, 我们可以准确判断出个体自身是否在移动. 但是显然因为头会不停运动, 导致前庭不能感知到任何信号就立刻判断身体正在移动, 而头动和神经系统的传输成为了准确感知加速度的噪声. 从而我们产生一个**假设**, 前庭对加速度感知的信号传入后, 脑存在一个感知的阈值. 只有前庭感受的加速度信息超过了该阈值, 脑才能判断自身正在移动, 而系统噪声造成的误差会导致有的时候加速度超过了这个阈值, 有时则没有, 从而导致判断的失误.
#
# 那么是否存在这个噪声? 我们的实验数据当中记录了被试的视觉信息和前庭信息, 我们可以使用这些数据来去了解一下.
# +
# 视觉信息
worldMove_opt = opticflow[0:99, :]
selfMove_opt = opticflow[100:199, :]
# 前庭信息
worldMove_vesti = vestibular[0:99, :]
selfMove_vesti = vestibular[100:199, :]
# +
print("Opt")
plt.subplot(1,2,1)
time = np.arange(0, 10000, 100)
for i in range(0, 99):
plt.plot(time, worldMove_opt[i, :], label = "World Move")
plt.xlabel("Time")
plt.ylabel("Signal")
plt.subplot(1,2,2)
time = np.arange(0, 10000, 100)
for i in range(0, 99):
plt.plot(time, selfMove_opt[i, :], label = "World Move")
plt.xlabel("Time")
plt.ylabel("Signal")
# +
print("Vestibular")
plt.subplot(1,2,1)
time = np.arange(0, 10000, 100)
for i in range(0, 99):
plt.plot(time, worldMove_vesti[i, :], label = "World Move")
plt.xlabel("Time")
plt.ylabel("Signal")
plt.subplot(1,2,2)
time = np.arange(0, 10000, 100)
for i in range(0, 99):
plt.plot(time, selfMove_vesti[i, :], label = "World Move")
plt.xlabel("Time")
plt.ylabel("Signal")
# -
# 我们可以轻易看出, 由于噪声的影响, 两种信号对两种场景均没有区分度, 尤其是对于加速度这种对我们有价值的信息. 当然, 我们在真是世界当中还是能够区分自己移动的信号的, 那势必意味着我们的身体当中对于加速度信息的噪声有去噪的能力. 为了验证我们的假设, 我们可以做一个滤波器来看看查看去噪后的数据是否能够反映出真实的加速度特征.
#
# 最简单的去噪器是平均窗口滤波器, 我们尝试来实现它.
# +
def moving_window(input_signal, window):
"""
Moving window filter, select each window of hold signal and average them.
args:
input_signal
window
outputs:
filted_signal : A array which come from signal is averaged by window
"""
supplement = np.zeros(window - 1)
signal = np.concatenate([supplement, input_signal])
filted_signal = []
for i in range(0, len(input_signal)):
filted_signal.append(signal[i:(window+i)].mean())
return filted_signal
filted_signal = moving_window(vestibular[1,:], 15)
time = np.arange(0, 10000, 100)
plt.plot(time, filted_signal)
# -
# 我们发现比起刚才嘈杂的信号, 这个信号的趋势清晰了很多, 而这确实有助于判断身体是否发生了移动.
#
# 回到我们刚才的问题上, 我们还认为存在一个阈值来使得身体可以检测是否发生了移动. 假设我们设定一个阈值, 使得信号中检测到这个阈值就会发放我们身体移动了的信号.
# +
def movement_detector(filted_signal, thresholds):
if max(filted_signal) > thresholds:
results = 1
else:
results = 0
return results
movement_detector(filtered_signal, 0.3)
# -
# 这样我们可以看一下在特定阈值特定窗口下, 这个模型根据被试的前庭信息有多大的可能探测出身体确实在移动:
# +
def detection_ratio(input_signal_matrix, window, thresholds):
detect_result = []
for i in range(0, len(input_signal_matrix[:,1])):
filted_signal = moving_window(input_signal_matrix[i,:], window)
detect_result.append(movement_detector(filted_signal, thresholds))
ratio = np.mean(detect_result)
return ratio
detection_ratio(vestibular, 15, 0.4)
# -
# ## 3. 模型估计
#
# 当我们构建了一个模型之后, 我们常常需要找到模型当中的最优参数. 当我们选用一个模型的时候, 我们相当于是选择了模型的形状 (能够解决的问题), 但是通过调整模型的参数, 我们才能提高模型所描述的变量和数据的一致性.
#
# 因此模型的参数估计的目标, 是提高模型的解释度.
#
# ### 3.1 均方误差 (Mean Squared Error, MSE)
#
# 你可能已经熟知最小二乘法作为线性回归模型的参数优化方法, 不过我们在这里可以重新回顾一下.
#
# 参数优化的目标是致力于提高模型的解释度, 因此我们需要使用一个数学公式来构建参数与解释度之间的关系, 并通过最优化 (optimization) 来得到最高的解释度. 这个数学公式我们通常称为目标函数. 而均方误差是我们在解决参数优化时最常用的目标函数. 它的公式形式是
#
# $$
# \min _{\theta} \frac{1}{N} \sum_{n=1}^{N}\left(y_{n}-\theta x_{n}\right)^{2}
# $$
#
# 也就是说, 我们需要找到使得估计值与观测值的方差和最小的参数 $\theta$ .
#
# 我们接下来构建一个服从 $y = 1.2x$ 线性关系, 并有 `[0,10)` 高斯噪声的数据, 并对它进行估计, 看我们是否可以准确估计出来这些参数.
# +
# 生成模拟变量
theta = 1.2
n_sample = 30
x = 10 * np.random.rand(n_sample)
noise = np.random.rand(n_sample)
y = theta * x + noise
sb.scatterplot(x, y)
# -
# 参数估计的过程:
#
# 1. 构建目标函数
# 2. 对目标函数进行最优化
# +
def mse(x, y, theta_hat):
y_variance = []
for i in range(0, len(x)):
y_variance.append(y[i] - (theta_hat * x[i]))
mse = np.mean(np.square(y_variance))
return mse
theta_hats = [0.75, 1.0, 1.5]
for theta_hat in theta_hats:
print(f"theta_hat of {theta_hat} has an MSE of {mse(x, y, theta_hat):.2f}")
# -
# 我们已经构建出了 MSE 和 theta 之间的关系. 进一步我们希望能找到 theta 值的最优解. 因为我们的模拟数据中, 取的 theta 为 1.2 , 我们可以先看看把 最优值和附近的范围 `x = [-2, 4]` 先画出来看看.
# +
theta_grid = np.linspace(-2, 4)
mse_hat_grid = [mse(x, y, theta_hat) for theta_hat in theta_grid]
sb.scatterplot(theta_grid, mse_hat_grid)
# -
| Lessons Summary/W1Summary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from doctest import run_docstring_examples
from datetime import datetime
# # Day 1: Report Repair
#
# Tipp: diese Aufgabe kann u.A. mit zwei bzw. drei verschachtelten For-Schleifen gelöst werden.
#
# Quelle: https://adventofcode.com/2020/day/1
#
# ## Part One
#
# After saving Christmas five years in a row, you've decided to take a vacation at a nice resort on a tropical island. Surely, Christmas will go on without you.
#
# The tropical island has its own currency and is entirely cash-only. The gold coins used there have a little picture of a starfish; the locals just call them stars. None of the currency exchanges seem to have heard of them, but somehow, you'll need to find fifty of these coins by the time you arrive so you can pay the deposit on your room.
#
# To save your vacation, you need to get all fifty stars by December 25th.
#
# Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
#
# Before you leave, the Elves in accounting just need you to fix your expense report (your puzzle input); apparently, something isn't quite adding up.
#
# Specifically, they need you to find the two entries that sum to 2020 and then multiply those two numbers together.
#
# For example, suppose your expense report contained the following:
#
# 1721
# 979
# 366
# 299
# 675
# 1456
#
# In this list, the two entries that sum to 2020 are 1721 and 299. Multiplying them together produces 1721 * 299 = 514579, so the correct answer is 514579.
#
# Of course, your expense report is much larger. Find the two entries that sum to 2020; what do you get if you multiply them together?
#
# Your puzzle answer was 1020084.
def report_repair_p1(expenses, zielsumme=2020):
"""
Findet die zwei Zahlen aus einer Liste von Zahlen, die addiert "zielsumme" ergeben und gibt das Produkt zurück.
Tests:
1721 * 299 = 514579
>>> print(report_repair_p1([1721, 979, 366, 299, 675, 1456], 2020))
514579
"""
pass
run_docstring_examples(report_repair_p1, locals())
# ## Part Two
#
# The Elves in accounting are thankful for your help; one of them even offers you a starfish coin they had left over from a past vacation. They offer you a second one if you can find three numbers in your expense report that meet the same criteria.
#
# Using the above example again, the three entries that sum to 2020 are 979, 366, and 675. Multiplying them together produces the answer, 241861950.
#
# In your expense report, what is the product of the three entries that sum to 2020?
#
# Your puzzle answer was 295086480.
def report_repair_p2(expenses, zielsumme=2020):
"""
Findet die drei Zahlen aus einer Liste von Zahlen, die addiert "zielsumme" ergeben und gibt das Produkt zurück.
Tests:
979 + 366 + 675 = 2020
979 * 366 * 675 = 241861950
>>> print(report_repair_p2([1721, 979, 366, 299, 675, 1456], 2020))
241861950
"""
pass
run_docstring_examples(report_repair_p2, locals())
# ## Testdaten laden
with open('../inputs/2020_01.csv') as f:
input_data = [int(line.rstrip()) for line in f]
# ## Lösungen testen
# + code_folding=[0]
def check_solution(fun, input_data, solution):
start = datetime.now()
result = fun(input_data)
dauer = datetime.now() - start
nachricht = "Ergebnis: {} Rechenzeit: {}"
print(nachricht.format(result, dauer))
assert result == solution
# -
check_solution(report_repair_p1, input_data, 1020084)
check_solution(report_repair_p2, input_data, 295086480)
| exercises/adventofcode.com/problems/2020_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## ASSIGNMENT 3
#
# In this assignment you will learn to use iteration structures (for loop, while loop) and play around with different data structures (lists and dictionaries)
# ### Multiplication tables
# This will be your first introduction in using the for-loop iteration structure. Most of you will remember multiplication tables from primary school, where you had to rehearse them endlessly. For instance, the multiplication table of 4 looks like:
#
# 1 x 4 = 4 <br/>2 x 4 = 8 <br/> 3 x 4 = 12 <br/>4 x 4 = 16 <br/>and so on. <br/><br/>In the upcoming exercises you are going to print such multiplication tables using for-loops.
# #### Question 1
# Print the multiplication table of 2 (stop at 10 x 2). You can use range() in combination with a for-loop to easily do this.
# * With range() you generate the list of multiplicators (e.g. the numbers you are going to multiply 2 with) and then you traverse this list with a for loop and multiply each number that it gives you with 2.
# * Think about which values you want to use as the arguments for range, if you want to start with 1 x 2 and stop at 10 x 2.
#
# You should print the full multiplication and the result.
# +
#answer question 1
# -
# #### Question 2
# Nest the for-loop you wrote for question 1 in a second for-loop to print all the tables. The outer for-loop will generate all the primary table numbers and the inner for-loop will generate all the multiplicators. In pseudocode the structure looks like this:
# ```python
# for each number from 1 to 10:
# for each multiplicator from 1 to 10:
# print <multiplication and result>
# ```
# +
#answer question 2
# -
# #### Question 3
# Print out the message “The table of <table no>” before a new table is printed. The output should thus look like this:<br/><br/> The table of 1: <br/>1 x 1 = 1<br/> 2 x 1 = 2 <br/>3 x 1 = 3<br/> … <br/>10 x 1 = 10 <br/>The table of 2: <br/>1 x 2 = 2<br/> 2 x 2 = 4 <br/>3 x 2 =6 … <br/>10 x 2 = 20<br/> The table of 3: <br/>1 x 3 = 3 <br/>2 x 3 = 6 <br/>and so on.
# +
#answer question 3
# -
# ### Range()
# Range() is a useful function that's already built-in in python. range() comes in a couple of forms. Try the different forms of range() in the cell below, and compare the outcomes.
print(list(range(10)))
print(list(range(5,10)))
print(list(range(10,5))) # !
print(list(range(0,30,3)))
# #### To think about
# You know that range() is a function, and that functions can have (default) arguments. Describe what the three arguments that can be passed to range() do, and what their default values are -- i.e. what python does if you leave them out.
# ### Data structures
# In the lecture, I gave the following example in which I stored various dictionaries in one list:
#
# ```python
# student1 = {"name":"Olivia","age":18}
# student2 = {"name":"Regina","age":20}
# students = [student1, student2] # create list containing these dicts
# students[0]["name"] # list item 1, retrieve and print name Olivia
# Olivia
# ```
#
# Graphically, the structure I created there would look like this:<br/><img style="float: left;" src="listofdicts.png" width="200"><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/>Alternatively, it would also be possible to just create one dictionary, and populate it with a list of values for each person, which would look like this:<br/>
# <img style="float: left;" src="dictoflists.png" width="200"><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/>So in other words, now you have 1 dictionary with the fields name and age, and both these fields contain a list with corresponding values.
# #### Question 4
# Write the code that would create the above structure in Python. Afterwards, you should for instance be able to retrieve the name of the first student with:
# ```python
# students["name"][0]
# ```
# +
#answer question 4
# -
# #### Question 5
# How can you retrieve the age of the second person from this structure referenced by students which you just created? Specify the code to do this
#
#
#
# +
#answer question 5
# -
# #### Question 6
# Write the code to add the following three persons to your structure referenced by students:
#
#
# |Name|Age|
# |:- |:-:|
# |Karl|22 |
# |Priscilla|19|
# |Boris |20|
#
# Check whether the structure now contains 5 persons
#
# +
#answer question 6
# -
# #### Question 7
# Use indices to retrieve the age of each person and use this data to calculate the average age of all 5 persons and then print this average age. You should get 19.8 as the answer
# +
#answer 7
# -
# #### Question 8
# Python offers useful built-in functions that does a lot of the hard work for you. Two of these are:<br/>
#
# sum(<list>) – calculate the sum of all numbers in a list <br/>len(<list>) – give the number of elements in the list
#
# ```python
# l = [2,4,6] total = sum(l) # ‘total’ will get the value 12
# items = len(l) # ‘items’ will get the value 3
# avg = total/items # avg will result to 4
# ```
# Use these two functions to calculate the average age, instead of retrieving every age by using indexing like you did in question 7. If you can, do this without assigning the result of sum and len to a variable (e.g. use sum and len directly in the equation you use to calculate the average). Also print the average age you just calculated.
# +
#answer question 8
| assignments/week2/Assignment3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# coding=UTF-8
import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader, Dataset
import torchvision.models as models
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
# %matplotlib inline
class ResNet(nn.Module):
def __init__(self, model):
super(ResNet, self).__init__()
self.resnet_layer = nn.Sequential(*list(model.children())[:-1])#去掉预训练resnet模型的后1层(fc层)
self.Linear_layer = nn.Linear(2048, 4)#分类层
def forward(self, x):
x = self.resnet_layer(x)
x = x.view(x.size(0), -1)
x = self.Linear_layer(x)
return x
class FaceAPI(object):
def __init__(self, model_path):
resnet = models.resnet50(pretrained=True)
self.model = ResNet(resnet)#加载一下之前训练好的
self.model.load_state_dict(torch.load (model_path, map_location='cpu'))
self.model.eval()#只能预测,不能训练,所以要加这一句evaluate的简写,不然的话,如果里面有dropout,那么预测的时候也会有dropout,我们不希望这样
#这是别人定义好的函数,resnet本身就有这函数,调用一下这个,它就知道时候预测了,不是训练了,那么它里面的dropout什么的就不会起作用了,
self.label_dict = {0: 'left', 1: 'right', 2: 'up', 3: 'straight'}
#定义函数的先后顺序没有要求,因为定义的时候还没有调用
def predict(self, image):#预测
image = self._preprocess(image)
output = self.model(image).argmax(dim =1).numpy()[0]
return self.label_dict[output], np.transpose(image.numpy()[0],(1,2,0))#第二个返回值是为了确认一下,是否变为灰度图了
def _preprocess(self, image):#处理图片
image = cv2.cvtColor(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
image = cv2.resize(image, (224,224))
image = torch.tensor(np.transpose(image, (2, 0, 1)),dtype = torch.float32).view(1, 3, 224, 224)
return image
tmp = FaceAPI("D:\\workshop\\test1\\resnet50_face.pt")
# +
cap=cv2.VideoCapture(0)
while True:
#从摄像头读取图片
sucess,img=cap.read()
text = tmp.predict(img)[0]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (200,100), font, 2, (0,255,0), 3)
cv2.imshow("img",img)
#保持画面的持续。
k=cv2.waitKey(1)
if k == 27:
#通过esc键退出摄像
cv2.destroyAllWindows()
break
elif k==ord("s"):
#通过s键保存图片,并退出。
cv2.imwrite("image2.jpg",img)
cv2.destroyAllWindows()
break
#关闭摄像头
cap.release()
# -
| resnet_computer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ROFaYRSBFRhs"
# This file is now set for all 62 upstream gauges, end-to-end and available to run directly.
#
# I run a test with several epochs, and the results are in the folder as well.
# + [markdown] id="L2eEDiiEsQ3T"
# The first several lines make the codes connected to my personal Google's drive.
#
#
# + id="rt3oqBGQsFHx"
from google.colab import drive
drive.mount('/content/drive')
# + id="UI7fJfFEsJKq"
# %cd '/content/drive/My Drive/Benchmark'
# + id="HpfzP3SUseuS" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3da15645-283b-449a-fd71-5d2caed4977d"
# This example is developed using tensorflow 1.15.
# %tensorflow_version 1.x
# + id="rEDiMkviso6B"
import numpy as np
import pandas as pd
def sequence_data_genearter():
"""
station_list = [519,521,522,523,525,526,527,532,534,535,536,537,538,539,541,542,
543,546,549,551,552,553,554,556,557,562,564,565,568,569,572,574,
590,595,599,601,604,605,607,616,617,621,624,626,631,640,641,642,
644,648,653,655,656,659,661,662,663,665,668,669,670,671,540,544,
563,566,570,571,573,575,579,585,586,587,588,589,591,592,593,594,
596,597,598,600,602,603,606,608,609,610,611,612,1688,613,614,615,
618,619,620,622,625,627,628,629,630,632,634,635,636,637,638,639,
643,645,646,649,654,657,658,660,664,666,667,673,1609] # 125 staion IDs in IFIS
"""
# IN THIS TEST, my model can work on the following 62 watersheds.
station_list = [553,542,522,536,569,543,538,568,574,535,671,539,546,617,534,653,554,552,551,616,527,607,525,565,644,661,562,656,665,642,641,526,556,599,659,670,621,668,648,662,601,624,541,532,557,669,523,537,521,590,549,572,640,631,519,595,605,655,663,564,604,626]
# convert series to supervised learning sequence data
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
stations_train = pd.DataFrame()
stations_valid = pd.DataFrame()
stations_test = pd.DataFrame()
phy = pd.read_csv('./input/Features_Watershed.csv') # slope, travel time, area, and soil type data
ET = pd.read_csv('./input/ET_Iowa.csv') # pretreated min-max scaled, hourly ET data
for station_id in station_list:
wq = pd.read_csv('./input/USGS/'+str(station_id)+'_wq.csv') # watershed discharge Q
pcp = pd.read_csv('./input/PCP/'+str(station_id)+'_pcp_SMA6.csv') # hourly precipitation amount
for datatype in ['train','valid','test']:
if datatype == 'train':
wq_station = wq[:35064] # the first 4 years as training
pcp_station = pcp[:35064]
ET_station = ET[:35064]
elif datatype == 'valid':
wq_station = wq[35064:52608] # the middle 2 years as validation
pcp_station = pcp[35064:52608]
ET_station = ET[35064:52608]
elif datatype == 'test':
wq_station = wq[52608:] # the last 1 year as test, the final evaluation
pcp_station = pcp[52608:]
ET_station = ET[52608:]
dataset = pd.concat([wq_station, pcp_station, ET_station], axis=1) # combine the feature of discharge, rainfall, and ET.
dataset = dataset.iloc[:,[3,4,1]] # feature shape = [?, 3], '?' is used to represent the instances as in TensorFlow
dataset = dataset.fillna(-9999).values # fill NAs using negative values
dataset = dataset.astype('float32') # convert to float
# reframe the data as a supervised learning task
reframed = series_to_supervised(dataset, hours_history, hours_forecast) # feature shape = [?, 576], (72+120)*3=576
reframed_nanlist = reframed[(reframed < -1 ).any(1)].index.tolist() # find rows with any negative value (allowance of 1)
reframed = reframed.drop(reframed_nanlist) # remove rows with negative value
# combining physical data into reframed data
phy_station = phy.loc[phy.ifc_id==station_id] # feature shape = [1, 16], the station name and 15 feature values
phy_station2 = pd.DataFrame(pd.np.repeat(phy_station.values,reframed.shape[0],axis=0),columns=phy_station.columns) # feature shape = [?, 16]
reframed = pd.concat([reframed.reset_index(drop=True), phy_station2.reset_index(drop=True)], axis=1, ignore_index=True) # feature shape = [?, 592]
if datatype == 'train':
stations_train = pd.concat([stations_train, reframed], axis=0)
elif datatype == 'valid':
stations_valid = pd.concat([stations_valid, reframed], axis=0)
elif datatype == 'test':
stations_test = pd.concat([stations_test, reframed], axis=0)
return stations_train.values, stations_valid.values, stations_test.values
def min_max_normalization(dataset):
# min-max normalization
# the max precipitation and discharge should be calculated from the training+validation input.
# Thus, in my generalized model on 62 watereshed. the maximum hourly rainfall in the training+validation period among all 62 watersheds is 899.9.
# And, the maximum hourly discharge is 48775 in the training+validation period among all 62 watersheds.
# If you train a different model, the values here should be different. And the same, the values for the de-normalization in the post-process should be different as well.
PCPmax = 899.9092407226562
PCPmin = 0.0
Qmax = 48775.0
Qmin = 0.0
# normalize the Q and PCP
PCPlist = list(range(0, dataset.shape[1], 3))[:hours_history+hours_forecast] # from column 0, every 3 features are PCP.
Qlist = list(range(2, dataset.shape[1], 3))[:hours_history+hours_forecast] # from column 2, every 3 features are Q.
dataset[:, PCPlist] = (dataset[:, PCPlist] - PCPmin) / (PCPmax-PCPmin)
dataset[:, Qlist] = (dataset[:, Qlist] - Qmin) / (Qmax-Qmin)
return dataset
def X_y_split(train, valid, test):
# randomize the training data
np.random.shuffle(train)
# for the history 72 hours, we know PCP, ET, and Q, which is X1. shape = [?, 72, 3]
# for the future 120 hours, we know PCP and ET, which is X2. shape = [?, 120, 2]
# for each watershed, we know physical features, which is X3. shape = [?, 1, 15]
# for the future 120 hours, we want to get Q, which is y. shape = [?, 120]
# My method is generating two GRU layers, one for history and one for future.
PCP_history = list(range(0, train.shape[1], 3))[:hours_history]
PCP_forecast = list(range(0, train.shape[1], 3))[hours_history:hours_history+hours_forecast]
ET_history = list(range(1, train.shape[1], 3))[:hours_history]
ET_forecast = list(range(1, train.shape[1], 3))[hours_history:hours_history+hours_forecast]
Q_history = list(range(2, train.shape[1], 3))[:hours_history]
Q_forecast = list(range(2, train.shape[1], 3))[hours_history:hours_history+hours_forecast]
X1_list = PCP_history + ET_history + Q_history
X1_list.sort()
X2_list = PCP_forecast + ET_forecast
X2_list.sort()
# split the data into X1, X2, X3 and y.
train_X1, train_X2, train_X3, train_y, train_id = train[:,X1_list], train[:,X2_list], train[:,-15:], train[:,Q_forecast], train[:,-16:-15]
valid_X1, valid_X2, valid_X3, valid_y, valid_id = valid[:,X1_list], valid[:,X2_list], valid[:,-15:], valid[:,Q_forecast], valid[:,-16:-15]
test_X1, test_X2, test_X3, test_y, test_id = test[:,X1_list], test[:,X2_list], test[:,-15:], test[:,Q_forecast], test[:,-16:-15]
# reshape X1 and X2 into 3D [samples, timesteps, features]
train_X1 = train_X1.reshape(train_X1.shape[0], hours_history, 3)
valid_X1 = valid_X1.reshape(valid_X1.shape[0], hours_history, 3)
test_X1 = test_X1.reshape(test_X1.shape[0], hours_history, 3)
train_X2 = train_X2.reshape(train_X2.shape[0], hours_forecast, 2)
valid_X2 = valid_X2.reshape(valid_X2.shape[0], hours_forecast, 2)
test_X2 = test_X2.reshape(test_X2.shape[0], hours_forecast, 2)
# expand X3 to 3D, shape from [?, 15] to [?, 1, 15].
train_X3, valid_X3, test_X3 = np.expand_dims(train_X3, axis=1), np.expand_dims(valid_X3, axis=1), np.expand_dims(test_X3, axis=1)
return [train_X1, train_X2, train_X3, train_y, train_id, valid_X1, valid_X2, valid_X3, valid_y, valid_id, test_X1, test_X2, test_X3, test_y, test_id]
# + id="ep2mLBGOtbBT" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="58d62595-5fda-453d-84ac-9d0c8582b46a"
# parameters
hours_forecast = 120 # 120 hours forecast is the current goal.
hours_history = 72 # 72 hours of history is enough for predicting the future.
train, valid, test = sequence_data_genearter()
train = min_max_normalization(train)
valid = min_max_normalization(valid)
test = min_max_normalization(test)
train_X1, train_X2, train_X3, train_y, train_id, valid_X1, valid_X2, valid_X3, valid_y, valid_id, test_X1, test_X2, test_X3, test_y, test_id = X_y_split(train, valid, test)
del train, valid, test
# + [markdown] id="8cWsIgxvUjAY"
# Design of Model
# + id="kQVssbP9tiVJ"
import pandas as pd
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Dropout, CuDNNGRU, Flatten, TimeDistributed, Lambda, concatenate
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
def NRM_generalized_basic():
# design network
dim_dense=[128, 64, 64, 32, 32]
drop=0.2
phy_input = Input(shape=(train_X3.shape[1],train_X3.shape[2]))
phy_input_zeros = Lambda(lambda x: x * 0)(phy_input)
encoder_phy = Lambda(lambda x: K.repeat_elements(x, hours_history, axis=1))(phy_input_zeros) # simple encoder without physical data
decoder_phy = Lambda(lambda x: K.repeat_elements(x, hours_forecast, axis=1))(phy_input) # physical data is necessary in decoder phase
encoder_input = Input(shape=(train_X1.shape[1],train_X1.shape[2]))
encoder_input_phy = concatenate([encoder_input,encoder_phy],axis=-1)
encoder_rnn1 = CuDNNGRU(32, return_state=True, return_sequences=True)
encoder_output1, encoder_hc1 = encoder_rnn1(encoder_input_phy)
encoder_rnn2 = CuDNNGRU(32, return_state=True, return_sequences=True)
encoder_output2, encoder_hc2 = encoder_rnn2(encoder_output1)
encoder_rnn3 = CuDNNGRU(32, return_state=True, return_sequences=True)
encoder_output3, encoder_hc3 = encoder_rnn3(encoder_output2)
encoder_rnn4 = CuDNNGRU(32, return_state=True, return_sequences=True)
encoder_output4, encoder_hc4 = encoder_rnn4(encoder_output3)
encoder_rnn5 = CuDNNGRU(32, return_state=True)
encoder_output5, encoder_hc5 = encoder_rnn5(encoder_output4)
decoder_input = Input(shape=(train_X2.shape[1],train_X2.shape[2]))
decoder_input_phy = concatenate([decoder_input,decoder_phy],axis=-1)
decoder_rnn1 = CuDNNGRU(32, return_sequences=True)
decoder_rnn2 = CuDNNGRU(32, return_sequences=True)
decoder_rnn3 = CuDNNGRU(32, return_sequences=True)
decoder_rnn4 = CuDNNGRU(32, return_sequences=True)
decoder_rnn5 = CuDNNGRU(32, return_sequences=True)
x = decoder_rnn1(decoder_input_phy, initial_state=encoder_hc1)
x = decoder_rnn2(x, initial_state=encoder_hc2)
x = decoder_rnn3(x, initial_state=encoder_hc3)
x = decoder_rnn4(x, initial_state=encoder_hc4)
x = decoder_rnn5(x, initial_state=encoder_hc5)
for dim in dim_dense:
x = TimeDistributed(Dense(dim, activation='relu'))(x)
x = TimeDistributed(Dropout(drop))(x)
main_out = TimeDistributed(Dense(1, activation='relu'))(x)
main_out = Flatten()(main_out)
model = Model(inputs=[encoder_input, decoder_input, phy_input], outputs=main_out)
model.summary()
return model
# + [markdown] id="YYTZGoWcv4KB"
# Other settings of the model.
#
# + id="5INnZHszuesQ" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="20d5972c-e95c-4f31-d159-e4e8f31b3017"
model = NRM_generalized_basic()
testname = './NRM_generalized_basic'
# some technologies used in training
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=5, cooldown=200, min_lr=1e-8)
earlystoping = EarlyStopping(monitor='val_loss', min_delta=0,
patience=10, verbose=1, mode='auto')
checkpoint = ModelCheckpoint(testname+'.h5', monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=True, mode='min')
# setup the loss function and optimizer
# optimizer
RMSprop=keras.optimizers.RMSprop(lr=0.00003)
# loss function
# I used this customized loss function. MSE would work as well as a default method.
def nseloss(y_true, y_pred):
return K.mean(K.sum((y_pred-y_true)**2,axis=0)/K.sum((y_true-K.mean(y_true))**2,axis=0))
model.compile(optimizer=RMSprop, loss=nseloss)
# + id="7iZ0wbeHIEuG" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="80a62f63-a016-4296-e902-3ee1f5443d4d"
# train the model with large batci size for 5 epochs to get a good initialization weights with dense layers freezed.
history = model.fit([train_X1, train_X2, train_X3], train_y, epochs=100, batch_size=64,
validation_data=([valid_X1, valid_X2, valid_X3], valid_y), callbacks=[reduce_lr, earlystoping, checkpoint], verbose=1)
# save training loss into local file
loss_train = history.history['loss']
loss_valid = history.history['val_loss']
loss_train = pd.DataFrame({'TrainLoss':loss_train})
loss_valid = pd.DataFrame({'TestLoss':loss_valid})
loss_epoches = pd.concat([loss_train, loss_valid], axis=1)
loss_name = testname + '-loss.csv'
loss_epoches.to_csv(loss_name, index = True)
# + [markdown] id="a4eZJ4YWwkDK"
# Evaluation
#
# I am using four statistics now. NSE, KGE, bias, and r (np.corrcoef).
# The most popular one is NSE, a traditional one. The second popular metric is KGE, a new metric proposed in 2009.
#
# + id="4E7iCOEQwiYc"
def nse(y_true, y_pred):
return 1-np.sum((y_pred-y_true)**2)/np.sum((y_true-np.mean(y_true))**2)
def kge(y_true, y_pred):
kge_r = np.corrcoef(y_true,y_pred)[1][0]
kge_a = np.std(y_pred)/np.std(y_true)
kge_b = np.mean(y_pred)/np.mean(y_true)
return 1-np.sqrt((kge_r-1)**2+(kge_a-1)**2+(kge_b-1)**2)
def bias(y_true, y_pred):
return np.sum(y_pred)/np.sum(y_true)-1
model.load_weights('./NRM_generalized_basic.h5')
Q_predict = model.predict([test_X1, test_X2, test_X3])
# post-treatment
# I used the min-max scalling in the beginning, so I scale back.
# Thus, the output was 0-1 scaled, we need to rescaled back to the real streamflow rates in cfs.
# In addition, current unit is cfs, can convert to cms here if needed.
# for detail, see function min_max_normalization()
Q_predict_cfs = Q_predict*48775.0
test_y_cfs = test_y*48775.0
# + id="JEq1R6rwy2s7"
station_list = [553,542,522,536,569,543,538,568,574,535,671,539,546,617,534,653,554,552,551,616,527,607,525,565,644,661,562,656,665,642,641,526,556,599,659,670,621,668,648,662,601,624,541,532,557,669,523,537,521,590,549,572,640,631,519,595,605,655,663,564,604,626]
for station_id in station_list:
# locate the index of the station
station_idx = np.argwhere(test_id.flatten() == station_id).flatten()
Q_predict_station = Q_predict_cfs[station_idx]
Q_true_station = test_y_cfs[station_idx]
# Save files for later analysis, optional
# np.savetxt(str(station_id)+'_time_series_true.csv',Q_true_station, delimiter=',')
# np.savetxt(str(station_id)+'_time_series_pred.csv',Q_predict_station, delimiter=',')
NSE_test_eachHour = []
r_test_eachHour = []
bias_test_eachHour = []
KGE_test_eachHour = []
for x in range(hours_forecast):
valuePred_test=Q_predict_station[:,x]
valueTrue_test=Q_true_station[:,x]
NSE_test_eachHour.append(nse(valueTrue_test,valuePred_test))
r_test_eachHour.append(np.corrcoef(valueTrue_test,valuePred_test)[0][1])
bias_test_eachHour.append(bias(valueTrue_test,valuePred_test))
KGE_test_eachHour.append(kge(valueTrue_test,valuePred_test))
NSE_test_eachHour=pd.DataFrame(NSE_test_eachHour)
NSE_test_eachHour.columns = ['NSE']
r_test_eachHour=pd.DataFrame(r_test_eachHour)
r_test_eachHour.columns = ['r']
bias_test_eachHour=pd.DataFrame(bias_test_eachHour)
bias_test_eachHour.columns = ['bias']
KGE_test_eachHour=pd.DataFrame(KGE_test_eachHour)
KGE_test_eachHour.columns = ['KGE']
evaluation_result = pd.concat([NSE_test_eachHour, KGE_test_eachHour, r_test_eachHour, bias_test_eachHour], axis=1)
evaluation_name = testname+'-'+str(station_id)+'-evaluation.csv'
evaluation_result.to_csv(evaluation_name, index = True)
# + [markdown] id="HiiTH1onfuiX"
# Post evaluation...
#
# These are basic evaluation for each watershed and each hour.
# We can then, calculate the median of these watersheds among 120 prediction hours. (1*120 values, each value is the median of the 62 watersheds)
#
# + id="hrKdSdgYftSQ"
# up
import numpy as np
import pandas as pd
from pandas import DataFrame, concat
eval_station_list = station_list
NSE_median = []
KGE_median = []
r_median = []
bias_median = []
for i in range(120):
NSEs = []
KGEs = []
rs = []
biases = []
for station_id in eval_station_list:
a = pd.read_csv('./NRM_generalized_basic-'+str(station_id)+'-evaluation.csv')
result_nse = a['NSE'][i]
result_kge = a['KGE'][i]
result_r = a['r'][i]
result_bias = a['bias'][i]
NSEs.append(result_nse)
KGEs.append(result_kge)
rs.append(result_r)
biases.append(result_bias)
NSE_median.append(np.median(NSEs))
KGE_median.append(np.median(KGEs))
r_median.append(np.median(rs))
bias_median.append(np.median(biases))
print(NSE_median)
| model3_NRM-G.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dev
# language: python
# name: dev
# ---
import numpy as np
from scipy.special import expit, logsumexp
from scipy.optimize import minimize
from sklearn.datasets import load_iris, load_breast_cancer
from sklearn.linear_model import LogisticRegression as skLogisticRegression
# ### Implementation 1
# - convert multiclass classification problem to binary classification problem in a one-vs-all fashion
# - based on gradient decent
# - similar to sklearn multi_class='ovr' & solver='lbfgs'
# - reference: http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
class LogisticRegression():
def __init__(self, C=1.0):
self.C = C
def _encode(self, y):
classes = np.unique(y)
y_train = np.full((y.shape[0], len(classes)), -1)
for i, c in enumerate(classes):
y_train[y == c, i] = 1
if len(classes) == 2:
y_train = y_train[:, 1].reshape(-1, 1)
return classes, y_train
@staticmethod
def _cost_grad(w, X, y, alpha):
def _log_logistic(x):
if x > 0:
return -np.log(1 + np.exp(-x))
else:
return x - np.log(1 + np.exp(x))
yz = y * (np.dot(X, w[:-1]) + w[-1])
cost = -np.sum(np.vectorize(_log_logistic)(yz)) + 0.5 * alpha * np.dot(w[:-1], w[:-1])
grad = np.zeros(len(w))
t = (expit(yz) - 1) * y
grad[:-1] = np.dot(X.T, t) + alpha * w[:-1]
grad[-1] = np.sum(t)
return cost, grad
def _solve_lbfgs(self, X, y):
result = np.zeros((y.shape[1], X.shape[1] + 1))
for i in range(y.shape[1]):
cur_y = y[:, i]
w0 = np.zeros(X.shape[1] + 1)
res = minimize(fun=self._cost_grad, jac=True, x0=w0,
args=(X, cur_y, 1 / self.C), method='L-BFGS-B')
result[i] = res.x
return result[:, :-1], result[:, -1]
def fit(self, X, y):
self.classes_, y_train = self._encode(y)
self.coef_, self.intercept_ = self._solve_lbfgs(X, y_train)
return self
def decision_function(self, X):
scores = np.dot(X, self.coef_.T) + self.intercept_
if scores.shape[1] == 1:
return scores.ravel()
else:
return scores
def predict(self, X):
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(int)
else:
indices = np.argmax(scores, axis=1)
return self.classes_[indices]
def predict_proba(self, X):
scores = self.decision_function(X)
prob = expit(scores)
if len(scores.shape) == 1:
prob = np.vstack((1 - prob, prob)).T
else:
prob /= np.sum(prob, axis=1)[:, np.newaxis]
return prob
# binary classification
for C in [0.1, 1, 10]:
X, y = load_breast_cancer(return_X_y = True)
clf1 = LogisticRegression(C=C).fit(X, y)
clf2 = skLogisticRegression(C=C, multi_class="ovr", solver="lbfgs",
# keep consisent with scipy default
tol=1e-5, max_iter=15000).fit(X, y)
assert clf1.coef_.shape == (1, X.shape[1])
assert np.allclose(clf1.coef_, clf2.coef_)
assert np.allclose(clf1.intercept_, clf2.intercept_)
prob1 = clf1.decision_function(X)
prob2 = clf2.decision_function(X)
assert np.allclose(prob1, prob2)
prob1 = clf1.predict_proba(X)
prob2 = clf2.predict_proba(X)
assert np.allclose(prob1, prob2)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert np.array_equal(pred1, pred2)
# multiclass classification
for C in [0.1, 1, 10]:
X, y = load_iris(return_X_y=True)
clf1 = LogisticRegression(C=C).fit(X, y)
clf2 = skLogisticRegression(C=C, multi_class="ovr", solver="lbfgs",
# keep consisent with scipy default
tol=1e-5, max_iter=15000).fit(X, y)
assert clf1.coef_.shape == (len(np.unique(y)), X.shape[1])
assert np.allclose(clf1.coef_, clf2.coef_)
assert np.allclose(clf1.intercept_, clf2.intercept_)
prob1 = clf1.decision_function(X)
prob2 = clf2.decision_function(X)
assert np.allclose(prob1, prob2)
prob1 = clf1.predict_proba(X)
prob2 = clf2.predict_proba(X)
assert np.allclose(prob1, prob2)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert np.array_equal(pred1, pred2)
# penalty = 'none'
X, y = load_iris(return_X_y=True)
clf1 = LogisticRegression(C=np.inf).fit(X, y)
clf2 = skLogisticRegression(penalty='none', multi_class="ovr", solver="lbfgs",
# keep consisent with scipy default
tol=1e-5, max_iter=15000).fit(X, y)
assert clf1.coef_.shape == (len(np.unique(y)), X.shape[1])
assert np.allclose(clf1.coef_, clf2.coef_)
assert np.allclose(clf1.intercept_, clf2.intercept_)
prob1 = clf1.decision_function(X)
prob2 = clf2.decision_function(X)
assert np.allclose(prob1, prob2)
prob1 = clf1.predict_proba(X)
prob2 = clf2.predict_proba(X)
assert np.allclose(prob1, prob2)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert np.array_equal(pred1, pred2)
# ### Implementation 2
# - support multiclass classification problem directly
# - based on gradient decent
# - similar to sklearn multi_class='multinomial' & solver='lbfgs'
class LogisticRegression():
def __init__(self, C=1.0):
self.C = C
def _encode(self, y):
classes = np.unique(y)
y_train = np.zeros((y.shape[0], len(classes)))
for i, c in enumerate(classes):
y_train[y == c, i] = 1
return classes, y_train
@staticmethod
def _cost_grad(w, X, y, alpha):
w = w.reshape(y.shape[1], -1)
p = np.dot(X, w[:, :-1].T) + w[:, -1]
p -= logsumexp(p, axis=1)[:, np.newaxis]
cost = -np.sum(y * p) + 0.5 * alpha * np.dot(w[:, :-1].ravel(), w[:, :-1].ravel())
grad = np.zeros_like(w)
diff = np.exp(p) - y
grad[:, :-1] = np.dot(diff.T, X) + alpha * w[:, :-1]
grad[:, -1] = np.sum(diff, axis=0)
return cost, grad.ravel()
def _solve_lbfgs(self, X, y):
w0 = np.zeros(y.shape[1] * (X.shape[1] + 1))
res = minimize(fun=self._cost_grad, jac=True, x0=w0,
args=(X, y, 1 / self.C), method='L-BFGS-B')
result = res.x.reshape(y.shape[1], -1)
if y.shape[1] == 2:
result = result[1][np.newaxis, :]
return result[:, :-1], result[:, -1]
def fit(self, X, y):
self.classes_, y_train = self._encode(y)
self.coef_, self.intercept_ = self._solve_lbfgs(X, y_train)
return self
def decision_function(self, X):
scores = np.dot(X, self.coef_.T) + self.intercept_
if scores.shape[1] == 1:
return scores.ravel()
else:
return scores
def predict(self, X):
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(int)
else:
indices = np.argmax(scores, axis=1)
return self.classes_[indices]
def predict_proba(self, X):
scores = self.decision_function(X)
if len(scores.shape) == 1:
scores = np.c_[-scores, scores]
scores -= np.max(scores, axis=1)[:, np.newaxis]
prob = np.exp(scores)
prob /= np.sum(prob, axis=1)[:, np.newaxis]
return prob
# binary classification
for C in [0.1, 1, 10]:
X, y = load_breast_cancer(return_X_y = True)
clf1 = LogisticRegression(C=C).fit(X, y)
clf2 = skLogisticRegression(C=C, multi_class="multinomial", solver="lbfgs",
# keep consisent with scipy default
tol=1e-5, max_iter=15000).fit(X, y)
assert clf1.coef_.shape == (1, X.shape[1])
assert np.allclose(clf1.coef_, clf2.coef_)
assert np.allclose(clf1.intercept_, clf2.intercept_)
prob1 = clf1.decision_function(X)
prob2 = clf2.decision_function(X)
assert np.allclose(prob1, prob2)
prob1 = clf1.predict_proba(X)
prob2 = clf2.predict_proba(X)
assert np.allclose(prob1, prob2)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert np.array_equal(pred1, pred2)
# multiclass classification
for C in [0.1, 1, 10]:
X, y = load_iris(return_X_y=True)
clf1 = LogisticRegression(C=C).fit(X, y)
clf2 = skLogisticRegression(C=C, multi_class="multinomial", solver="lbfgs",
# keep consisent with scipy default
tol=1e-5, max_iter=15000).fit(X, y)
assert clf1.coef_.shape == (len(np.unique(y)), X.shape[1])
assert np.allclose(clf1.coef_, clf2.coef_)
assert np.allclose(clf1.intercept_, clf2.intercept_)
prob1 = clf1.decision_function(X)
prob2 = clf2.decision_function(X)
assert np.allclose(prob1, prob2)
prob1 = clf1.predict_proba(X)
prob2 = clf2.predict_proba(X)
assert np.allclose(prob1, prob2)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert np.array_equal(pred1, pred2)
# penalty = 'none'
X, y = load_iris(return_X_y=True)
clf1 = LogisticRegression(C=np.inf).fit(X, y)
clf2 = skLogisticRegression(penalty='none', multi_class="multinomial", solver="lbfgs",
# keep consisent with scipy default
tol=1e-5, max_iter=15000).fit(X, y)
assert clf1.coef_.shape == (len(np.unique(y)), X.shape[1])
assert np.allclose(clf1.coef_, clf2.coef_)
assert np.allclose(clf1.intercept_, clf2.intercept_)
prob1 = clf1.decision_function(X)
prob2 = clf2.decision_function(X)
assert np.allclose(prob1, prob2)
prob1 = clf1.predict_proba(X)
prob2 = clf2.predict_proba(X)
assert np.allclose(prob1, prob2)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert np.array_equal(pred1, pred2)
| linear_model/LogisticRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
# + pycharm={"name": "#%%\n"}
cord_list = [(51, 88), (150, 37), (154, 74)]
result_list = ['strike', 'ball', 'ball']
color_dict = {'strike': 'y', 'ball': 'g'}
result_color_list = [color_dict[result] for result in result_list]
y_list = [175-cord[0] for cord in cord_list]
x_list = [cord[1] for cord in cord_list]
x_left = -13
x_right = 147
y_top = -13
y_bottom = 187
# + pycharm={"name": "#%%\n"}
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(x_left, x_right), ylim=(y_top, y_bottom))
ax.set_aspect(1.276)
x_len = (abs(x_left)+abs(x_right))
y_len = (abs(y_top)+abs(y_bottom))
for i in range(1, 5):
if i == 1 or i == 4:
ax.axvline(x=i/5*x_len+x_left, ymin=1/5, ymax=4/5, c='0.6', lw=3, zorder=9)
ax.axhline(y=i/5*y_len+y_top, xmin=1/5, xmax=4/5, c='0.6', lw=3, zorder=9)
ax.axvline(x=i/5*x_len+x_left, c='0.8', lw=1)
ax.axhline(y=i/5*y_len+y_top, c='0.8', lw=1)
ax.scatter(x_list, y_list, s=100*2, zorder=10, c=result_color_list)
# + pycharm={"name": "#%%\n"}
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Loading datasets on Jean-Zay
#
# This notebook illustrates how to access our various datasets from Jean-Zay
#
#
# +
import tensorflow_datasets as tfds
# %pylab inline
# Important !!!! path to shared tensorflow dataset
data_dir='/gpfsscratch/rech/qrc/commun/tensorflow_datasets'
# -
# ## SFH data
# +
from sfh.datasets.sfh import sfh
dset = tfds.load('sfh', split='train', data_dir=data_dir)
# -
for example in dset.take(1):
print(example.keys())
plot(example['time'], example['Mstar'])
# #### Interpolated version of the dataset
#
#
# +
from sfh.datasets.sfh_interp import sfh_interp
dset = tfds.load('sfh_interp', split='train', data_dir=data_dir)
# -
for example in dset.take(1):
print(example.keys())
plot(example['time'], example['Mstar'])
# ## Kinematic data
# +
from sfh.datasets.mergers import kinetic
dset = tfds.load('mergers_kinetic', split='train', data_dir=data_dir)
# -
for example in dset.take(1):
print(example.keys())
figure(figsize=[15, 5])
subplot(131)
imshow(example['image'][0], cmap='gray_r')
subplot(132)
imshow(example['image'][1], cmap='gray_r')
subplot(133)
imshow(example['image'][2], cmap='gray_r')
# ## TNG images
# +
from sfh.datasets.mergers import tng100_images
dset = tfds.load('tng100_images', split='train', data_dir=data_dir)
# -
for example in dset.take(1):
print(example.keys())
imshow(example['image'][:,:,3],cmap='gray_r')
# ## Noiseless TNG images
# +
from sfh.datasets.mergers import tng100_images_noiseless
dset = tfds.load('tng100_images_noiseless', split='train', data_dir=data_dir)
# -
for example in dset.take(1):
print(example.keys())
imshow(example['image'][:,:,3],cmap='gray_r')
# ## SED and loockback time data
# +
from sfh.datasets.sfhsed import sfhsed
dset = tfds.load('sfhsed', split='train', data_dir=data_dir)
# -
for example in dset.take(1):
print(example.keys())
plot(example['time'], example['mass'])
| notebooks/data_access_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.8 64-bit (''env'': venv)'
# name: python36864bitenvvenvccf248a9dfeb4d9f8dc856794b3af623
# ---
from covid_simulation import *
# from scipy.stats import *
# import numpy as np
import matplotlib.pyplot as plt
p=[x['precentage'] for k, x in infection_by_age.items()]
print(p)
p /= np.sum(p)
p
# +
# normal dist
age_stats=[(np.mean(x['age_group']), np.diff(x['age_group'])[0]/2) for k, x in infection_by_age.items()]
age_distribution_model=MixtureModel( submodels=[stats.norm(x[0], x[1]) for x in age_stats], p=p)
x_axis = np.arange(0, 100, 1)
mixture_pdf = age_distribution_model.pdf(x_axis)
plt.plot(x_axis,mixture_pdf)
plt.savefig('population_dist.esp', format='eps')
plt.show()
# -
plt.plot(np.array([p[k] * stats.norm.pdf(x_axis, d[0], d[1]) for k,d in enumerate(age_stats)]).T)
plt.savefig('population_dist_split.esp', format='eps')
plt.show()
dd = np.array([p[k] * stats.norm.pdf(x_axis, d[0], d[1]) for k,d in enumerate(age_stats)])
# +
plt.plot(sum(dd[:3])/sum(sum(dd[:3])))
plt.show()
# -
sum(sum(dd[:4]))
| src/population_dist_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # test the version without pandas
import sys
dossier = '/home/benjamin/Documents/eviacybernetics/Projets/Grevia'
sys.path.append(dossier)
import grevia
import importlib
importlib.reload(grevia.graph_structure)
importlib.reload(grevia)
import networkx as nx
G = nx.read_gpickle('/media/benjamin/Largo/testspdfs/pickle/graph.pkl')
G.size()
edge_info = G.edges(data=True)
edge_sorted = sorted(edge_info, key=lambda edge: edge[2]['weight'], reverse=True)
edge_sorted[:None]
CSV_FILE = '/media/benjamin/Largo/testspdfs/csv/table_classif.csv'
import csv
cluster_dic ={}
print('Loading: ',CSV_FILE)
with open(CSV_FILE, 'r') as csvfile:
clusters_table = csv.DictReader(csvfile, delimiter=',')
for row in clusters_table:
for key in row.keys():
if key in cluster_dic.keys():
cluster_dic[key].append(row[key])
else:
cluster_dic[key]=[row[key]]
del cluster_dic['']
cluster_dic
| version_without_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + gradient={"editing": false, "execution_count": 5, "id": "ba4196ce", "kernelId": ""}
# !pip install -Uqq fastbook
# + gradient={"editing": false, "execution_count": 29, "id": "f74424c5", "kernelId": ""}
# !pip install voila
# !jupyter serverextension enable --sys-prefix voila
# + gradient={"editing": false, "execution_count": 6, "id": "0d391624", "kernelId": ""}
import fastai
from fastbook import *
from fastai.vision.widgets import *
# + gradient={"editing": false, "execution_count": 3, "id": "f7370bcd", "kernelId": ""}
import fastai
# + gradient={"editing": false, "execution_count": 7, "id": "5c2f78a3", "kernelId": ""}
predictor = load_learner('./export.pkl')
# + gradient={"editing": false, "execution_count": 24, "id": "0ae3b60a", "kernelId": ""}
upload_btn = widgets.FileUpload()
output = widgets.Output()
label = widgets.Label()
predict_btn = widgets.Button(description='Classify')
# + gradient={"editing": false, "execution_count": 25, "id": "3a6c700d", "kernelId": ""}
def classify(change):
img = PILImage.create(upload_btn.data[-1])
output.clear_output()
with output:
display(img.to_thumb(128,128))
pred, pred_idx, prob = predictor.predict(img)
label.value = f"Prediction = {pred}, Probability = {prob[pred_idx]}"
# + gradient={"editing": false, "execution_count": 26, "id": "5266a069", "kernelId": ""}
predict_btn.on_click(classify)
# + gradient={"editing": false, "execution_count": 27, "id": "24e65ac2", "kernelId": ""}
VBox([widgets.Label('Upload the picture of a bear'), upload_btn, predict_btn, output, label])
# + gradient={"id": "6eff63a6", "kernelId": ""}
| bear_classifier_app.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 什么是集成学习
import numpy as np
import matplotlib.pyplot as plt
# +
from sklearn import datasets
X, y = datasets.make_moons(n_samples=500, noise=0.3, random_state=42)
# -
plt.scatter(X[y==0,0], X[y==0,1])
plt.scatter(X[y==1,0], X[y==1,1])
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# +
from sklearn.linear_model import LogisticRegression
log_clf = LogisticRegression()
log_clf.fit(X_train, y_train)
log_clf.score(X_test, y_test)
# +
from sklearn.svm import SVC
svm_clf = SVC()
svm_clf.fit(X_train, y_train)
svm_clf.score(X_test, y_test)
# +
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier(random_state=666)
dt_clf.fit(X_train, y_train)
dt_clf.score(X_test, y_test)
# -
y_predict1 = log_clf.predict(X_test)
y_predict2 = svm_clf.predict(X_test)
y_predict3 = dt_clf.predict(X_test)
y_predict = np.array((y_predict1 + y_predict2 + y_predict3) >= 2, dtype='int')
y_predict[:10]
# +
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_predict)
# -
# ### 使用Voting Classifier
# +
from sklearn.ensemble import VotingClassifier
voting_clf = VotingClassifier(estimators=[
('log_clf', LogisticRegression()),
('svm_clf', SVC()),
('dt_clf', DecisionTreeClassifier(random_state=666))],
voting='hard')
# -
voting_clf.fit(X_train, y_train)
voting_clf.score(X_test, y_test)
| 10EnsembleLearningAndRandomForest/01What-is-Ensemble-Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Desktop 정리
# ### 마케팅 분석 Tool
#
# + 시장분석(SWOT / PEST)
#
#
# + 3C Analysis (Company / Competitor / Customer)
#
#
# + STP (Segmentation -> Targeting -> Positioning)
#
# + 군집분석 : 군집들간의 거리, 군집에 쓰인 변수들의 유의수준 확인 --> 군집들간의 통계값 비교로 군집 특성 확인
#
# + MDS : positiong map을 구성. stress 값과 RSQ 값으로 부터 분석의 정확성을 검증 가능
#
# +
#
# + Conjoint analysis
#
# - 소비자들이 제품을 구매할 때, 어떤 속성을 중요하게 여기는 지 확인 가능
#
# -
#
# + Strategy suggestion --> 기대효과
#
# * Marketing MIX : 4P (Promotion / Price / Place / Product)
#
# * New Targeting / Positioning
#
# * Promotion - ATL / BTL
#
#
# ### 마케팅 조사방법
#
# + 사전조사 : 본조사를 하기전에 소수의 표본을 대상으로 예비조사 시행 --> 설문지 문항을 구성하기 위해(대인면접 / 관찰조사)
#
# + 본조사 : 설문지조사(온라인 / 오프라인) --> 통계분석
#
#
# ## CRM
#
# + 보유 고객 관리 강화
#
# + 고객 이탈 방지 전략 : 이탈 고객 특성 분석을 통해
#
# + 휴면 고객 활성화
#
# + 교차 판매 및 Up-selling --> 기존 고객의 우량화
#
# + 고객 세분화를 통한 고객 차별화 전략
#
# +
#
# +
#
# +
#
# ## Internet Marketing
#
# + Internet Marketing requires
#
# + Ability to identify end-users
# + Ability to differentiate customers based on their value and their needs
# + Ability to interact with your customers
# + Ability to customize your products and services based on knowledge about your customers
#
# + Under what conditions Internet Marketing would have a greater impact…
#
# + Product’s target segment online & customers have to physically inspect the product but the quality
# of product is relatively homogenous.(Books, music/videos, SW)
#
# + Product’s target segment online & products require significant purchase consideration.(Automobile, real estate..)
#
# + Product’s target segment online & products are highly branded impulse goods(Fast food, tobacco, beer)
#
#
#
#
#
#
#
# ### About Marketing
#
# * 마케팅의 목적
#
# + 소비자 설득 : 설득의 최종 목표는 소비자의 생각을 바꾸는 것을 넘어 구체적인 행동까지 이끌어 내는 것.
#
# + 인지적 지도 : 소비자들의 머리 속에 그려져 있는 가상의 인지적 지도 (Cognitive Map)에서 내 제품의 영역을 얼마나 많이 차지하느냐
# 하는 것. 일종의 땅따먹기.
#
# + **소비자 만족** : 소비자의 불만을 해소하면서 편리하고 즐겁게 상품을 구매하도록 하는 방법.
# 마케팅이 성공하려면 시장(수요)조사, 상품화 계획, 선전 및 홍보, 판매촉진 활동이 잘 이루어져야 해.
#
#
# * 피곤해진 마케팅
#
# + 피곤 : 소비자니즈를 끝없이 자극하는 수많은 광고와 판촉전략들, 지나친 생산과 그로인해 어지러운 가격 차별화 전략 및 유통체계.
# 경쟁적이고 과도한 마케팅은 소비자들에게 불필요한 욕구를 유발할 뿐만 아니라 소비자들에게 잡음과 공해, 스트레스 요인으로 작용하기도
# 한다. IT기술의 발전으로 소비자에 대한 정보획득과 관리가 수월해지면서 기업은 소비자 개개인을 대상으로 맹공격을 펼치고 있다.
#
# 과도한 마케팅 비용은 상품 가격에 반영되어 결국 상품 가격 인상을 가져온다. 기업의 비효율적이고 적절하지 못한, 심지어 소비자를
# 불쾌하게 하고 피곤하게 하는 마케팅 비용까지도 소비자의 주머니에서 그 비용이 충당된다.
#
#
# ## Business
#
# + Five Forces Model : 기존경쟁 / 공급자 / 수요자 / 잠재적 진입자 / 대체재
# 산업의 수익성을 설명하기 위한 모형
#
# +
| Marketing/Marketing_khb(180618).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to simulate
#
# The unique feature of structural models compared to other econometric models is the ability to provide counterfactuals. There exist three different ways to simulate counterfactuals with ``respy``. Ordered from no data to a panel data on individuals, there is:
#
# 1. *n-step-ahead simulation with sampling*: The first observation of an individual is sampled from the distribution implied by the initial conditions, i.e., the distribution of observed variables or initial experiences, etc. in the first period. Then, the individuals are guided for $n$ periods by the decision rules from the solution of the model.
#
# 2. *n-step-ahead simulation with data*: Take the first observation of each individual from the data and do as in 1..
#
# 3. *one-step-ahead simulation*: Take the complete data and find for each observation the corresponding outcomes, e.g, choices and wages, using the decision rules from the model solution.
#
# You can find more information on initial conditions and how to express them with ``respy`` in [this tutorial](tutorial-initial-conditions.ipynb).
#
# In this notebook, you will learn how to use all of the three methods and what their purpose is. We take the basic model from Keane and Wolpin (1997) and their original data.
#
# We further restrict the model to 12 periods to reduce the complexity and runtime. Thus, the following simulations will differ from the real data a lot because the parameters belong to a model with fifty periods which distorts choice patterns for shorter time frames. We start by importing the model and data.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import respy as rp
params, options, df = rp.get_example_model("kw_97_basic")
options["n_periods"] = 11
# -
# ## 1. n-step-ahead simulation with sampling
#
# The first exercise is an *n-step-ahead simulation with sampling*. This method and the following *n-step-ahead simulation with data* are the cornerstone for providing predictions of behavior in counterfactural environments. Here, the first observation of each individual is sampled from the distribution implied by the initial conditions of the model. The initial conditions determine, for example, the distribution of observed variables, experiences, and previous choices.
simulate = rp.get_simulate_func(params, options, "n_step_ahead_with_sampling")
df_1 = simulate(params)
df_1.groupby("Period").Choice.value_counts().unstack().plot.bar(
stacked=True, rot=0
)
# ## 2. n-step-ahead simulation with data
#
# For the second variant of the n-step-ahead simulation, you do not need to specify, e.g., the distribution of initial experiences. Instead, the first observations in the data are taken as the starting points for the simulation. Thus, restrict the data to first period and pass it to the constructor for the simulation function.
#
# This routine is especially useful if the policy simulation results should be very similar to sample or the distribution of characteristics in the first period is complex.
simulate = rp.get_simulate_func(params, options, "n_step_ahead_with_data", df)
df_2 = simulate(params)
df_2.groupby("Period").Choice.value_counts().unstack().plot.bar(
stacked=True, rot=0
)
# ## 3. one-step-ahead simulation
#
# The one-step-ahead simulation takes the full dataset and simulates the outcomes for each observation. Thus, the original sample size is preserved. The results are used to measure the within-sample fit of the model.
simulate = rp.get_simulate_func(params, options, "one_step_ahead", df)
df_3 = simulate(params)
# +
fig, axs = plt.subplots(1, 2, figsize=(12, 4))
df.groupby("Period").Choice.value_counts().unstack().plot.bar(
ax=axs[0], stacked=True, rot=0, legend=False, title="Original"
)
df_3.groupby("Period").Choice.value_counts().unstack().plot.bar(
ax=axs[1], stacked=True, rot=0, title="Simulated"
)
handles, _ = axs[1].get_legend_handles_labels()
axs[1].get_legend().remove()
fig.legend(
handles=handles, loc="lower center", bbox_to_anchor=(0.5, 0), ncol=5
)
plt.tight_layout(rect=[0, 0.05, 1, 1])
# -
# ## References
#
# > <NAME>. and <NAME>. (1997). [The Career Decisions of Young Men](https://doi.org/10.1086/262080). *Journal of Political Economy*, 105(3): 473-522.
| docs/getting_started/tutorial-simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from scipy.cluster.vq import kmeans2
from skimage import io, color
# %matplotlib inline
# +
def diff(x,y):
return np.abs( np.mod( x - y + 90, 180) - 90 )
def G(x,y,sigma):
return np.exp(-1*diff(x,y)**2/(2*sigma**2))
def G2D(x_range, y_range, mean, sigma):
x0 = mean[0]
y0 = mean[1]
return np.exp( -1*( ( x_range-x0)**2 + (y_range-y0)**2) / (2*sigma**2) )
def mean_connections(W_ab):
total = 0.
for i in range(W_ab.shape[0]):
sub_mat = W_ab[i,:,:]
total = total + sub_mat[sub_mat != 0].size
return total / W_ab.shape[0]
def stimulus_size(x,length,sig_RF):
return (1.+np.exp(-(x + length/2.)/sig_RF) )**-1. * (1. - (1.+np.exp(-(x - length/2.)/sig_RF))**-1. )
# +
# Orientation preference map: for now, use k-means on Blasdel image
rgb_img = mpimg.imread('v1-topology-blasdel-figure6.png')
plt.figure()
plt.imshow(rgb_img)
plt.title('Original topographic image')
lab_img = color.rgb2lab(rgb_img) # convert to L*a*b* colourspace
ab = lab_img[:,:,1:]
n_rows = np.shape(ab)[0]
n_cols = np.shape(ab)[1]
ab = np.reshape(ab, (n_rows*n_cols, 2))
n_colours = 6
centroids, labels = kmeans2(ab, n_colours, iter=3)
labels = np.reshape(labels, (n_rows, n_cols))
rgb_labels = np.tile(labels[:,:,None], [1,1,3])
OP_range = np.linspace(0, 180, n_colours)
full_OP_map = np.copy(labels)
for i in range(n_colours):
seg_img = np.copy(rgb_img)
seg_img[rgb_labels != i] = 0
# assign an orientation preference (degrees) based on segmentation
full_OP_map[full_OP_map == i] = OP_range[i]
# Show the individual segmented images:
# plt.figure()
# plt.imshow(seg_img)
# +
# Determine the connection probabilities
N_pairs = 75 # no. of E/I pairs to a side of a grid
field_size = 16. # size of field to a side (degrees)
dx = field_size / N_pairs
xy_range = np.linspace(0, field_size, N_pairs, False)
# xy_range = np.linspace(-field_size/2, field_size/2, N_pairs)
xv, yv = np.meshgrid(xy_range, xy_range) # x and y grid values (degrees)
# sample the OP map uniformly
min_dim = np.min(np.shape(full_OP_map))
o_samples = np.round(np.linspace(0, min_dim-1, N_pairs))
xo, yo = np.meshgrid(o_samples, o_samples)
xo = xo.astype(int)
yo = yo.astype(int)
OP_map = full_OP_map[yo,xo]
# Rodent map
# OP_map = np.floor(np.random.rand(N_pairs, N_pairs)*180)
# Connection weight parameters (from supp. materials S1.1.2):
kappa_E = 0.1
kappa_I = 0.5
J_EE = 0.1
J_IE = 0.38
J_EI = 0.089
J_II = 0.096
sig_EE = 8*dx
sig_IE = 12*dx
sig_EI = 4*dx
sig_II = 4*dx
sig_ori = 45
# calculate probability of no connections for each neuron (sparse connectivity)
G_EE = np.zeros((N_pairs**2, N_pairs, N_pairs))
G_IE = np.copy(G_EE)
# may not need these
G_EI = np.copy(G_EE)
G_II = np.copy(G_EE)
G_ori = np.copy(G_EE)
pW_EE = np.copy(G_EE)
pW_IE = np.copy(G_EE)
pW_EI = np.copy(G_EE)
pW_II = np.copy(G_EE)
rnd_EE = np.copy(G_EE)
rnd_IE = np.copy(G_EE)
rnd_EI = np.copy(G_EE)
rnd_II = np.copy(G_EE)
np.random.seed(1)
# iterate through each E/I pair:
for i in range(N_pairs):
for j in range(N_pairs):
G_EE[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_EE)
G_IE[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_IE)
# do we need these? it doesn't appear that way in the methods...
G_EI[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_EI)
G_II[N_pairs*i+j, :, :] = G2D( xv, yv, (xv[0,i] , yv[j,0]), sig_II)
G_ori[N_pairs*i+j,:,:] = G(OP_map[j,i], OP_map, sig_ori)
rnd_EE[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
rnd_IE[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
rnd_EI[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
rnd_II[N_pairs*i+j, :, :] = np.random.rand(N_pairs, N_pairs)
for i in range(N_pairs**2):
pW_EE[i,:,:] = kappa_E * np.multiply(G_EE[i,:,:], G_ori[i,:,:])
pW_IE[i,:,:] = kappa_E * np.multiply(G_IE[i,:,:], G_ori[i,:,:])
pW_EI[i,:,:] = kappa_I * np.multiply(G_EE[i,:,:], G_ori[i,:,:])
pW_II[i,:,:] = kappa_I * np.multiply(G_IE[i,:,:], G_ori[i,:,:])
# pW_EI[i,:,:] = kappa_I * np.multiply(G_EI[i,:,:], G_ori[i,:,:])
# pW_II[i,:,:] = kappa_I * np.multiply(G_II[i,:,:], G_ori[i,:,:])
# find zero-weighted connections:
W_EE = np.ones((N_pairs**2, N_pairs, N_pairs))
W_IE = np.copy(W_EE)
W_EI = np.copy(W_EE)
W_II = np.copy(W_EE)
W_EE[pW_EE<rnd_EE] = 0
W_IE[pW_IE<rnd_IE] = 0
W_EI[pW_EI<rnd_EI] = 0
W_II[pW_II<rnd_II] = 0
u_EE = mean_connections(W_EE)
u_IE = mean_connections(W_IE)
u_EI = mean_connections(W_EI)
u_II = mean_connections(W_II)
# -
u_ab = np.mean([u_EE, u_EI, u_IE, u_II])
print u_EE, u_EI, u_IE, u_II, u_ab
# +
# Show the connection maps (optional to run)
nx = 30
ny = 55
neuron_index = ny+nx*N_pairs
plt.figure()
plt.imshow(OP_map)
plt.colorbar()
plt.figure()
plt.imshow(G_EE[neuron_index,:,:])
plt.colorbar()
plt.figure()
plt.imshow(G_ori[neuron_index,:,:])
plt.colorbar()
plt.figure()
plt.imshow(W_EI[neuron_index,:,:])
plt.colorbar()
plt.figure()
plt.imshow(pW_II[neuron_index,:,:])
plt.colorbar()
print 'Neuron O preference: ', OP_map[ny,nx]
# +
# For non-zero connections, determine the weight
W_EE[W_EE != 0] = np.random.normal(J_EE, 0.25*J_EE, W_EE[W_EE!=0].size)
W_IE[W_IE != 0] = np.random.normal(J_IE, 0.25*J_IE, W_IE[W_IE!=0].size)
W_EI[W_EI != 0] = np.random.normal(J_EI, 0.25*J_EI, W_EI[W_EI!=0].size)
W_II[W_II != 0] = np.random.normal(J_II, 0.25*J_II, W_II[W_II!=0].size)
# Set negative weights to zero:
W_EE[W_EE < 0] = 0
W_IE[W_IE < 0] = 0
W_EI[W_EI < 0] = 0
W_II[W_II < 0] = 0
# "Weights of a given type 'b' onto each unit
# are then scaled so that all units of a given type 'a' receive the same
# total type b synaptic weight, equal to Jab times the mean number of
# connections received under p (Wab(x, x′) ̸= 0)"
for i in range(N_pairs**2):
if np.all(W_EE[i,:,:] == np.zeros((N_pairs, N_pairs))) == False:
W_EE[i,:,:] = W_EE[i,:,:]*J_EE*u_EE/np.sum(W_EE[i,:,:])
if np.all(W_IE[i,:,:] == np.zeros((N_pairs, N_pairs))) == False:
W_IE[i,:,:] = W_IE[i,:,:]*J_IE*u_IE/np.sum(W_IE[i,:,:])
if np.all(W_EI[i,:,:] == np.zeros((N_pairs, N_pairs))) == False:
W_EI[i,:,:] = W_EI[i,:,:]*J_EI*u_EI/np.sum(W_EI[i,:,:])
if np.all(W_II[i,:,:] == np.zeros((N_pairs, N_pairs))) == False:
W_II[i,:,:] = W_II[i,:,:]*J_II*u_II/np.sum(W_II[i,:,:])
# +
samples = np.floor(np.random.rand(10)*N_pairs**2)
print 'EE should sum to ', J_EE*u_EE
for i in range(10):
print 'Neuron %d:' % samples[i]
print np.sum(W_EE[samples[i],:,:])
print '=================='
# +
# Model parameters (from supplementary methods)
k = np.random.normal(0.012, 0.05*0.012, (N_pairs, N_pairs))
n_E = np.random.normal(2.0, 0.05*2.0, (N_pairs, N_pairs))
n_I = np.random.normal(2.2, 0.05*2.2, (N_pairs, N_pairs))
sig_FF = 32
sig_RF = dx
tau_E = np.random.normal(0.02, 0.05*0.02, (N_pairs, N_pairs))
tau_I = np.random.normal(0.01, 0.05*0.01, (N_pairs, N_pairs))
# -
def generate_ext_stimulus(ori, size, centre, full_frame=False):
G_FF = G(ori, OP_map, sig_FF)
if full_frame==True:
h = G_FF
else:
x_distance = np.abs(xv - centre[0])
y_distance = np.abs(yv - centre[1])
dist = np.sqrt(x_distance**2 + y_distance**2)
s_l = stimulus_size(dist, size, sig_RF)
# h = np.multiply( s_l, G_FF )
h = s_l * G_FF
return h
# +
def run_simulation( dt, timesteps, c, h, init_cond=[np.zeros((N_pairs, N_pairs)),np.zeros((N_pairs, N_pairs))]):
r_E = np.zeros((timesteps, N_pairs, N_pairs))
r_I = np.copy(r_E)
# add initial conditions:
r_E[0,:,:] = init_cond[0]
r_I[0,:,:] = init_cond[1]
I_E = np.zeros((timesteps, N_pairs, N_pairs))
I_I = np.copy(I_E)
# rSS_E = np.copy(I_E)
# rSS_I = np.copy(I_I)
for t in range(1,timesteps):
# Input drive from external input and network
# for i in range(N_pairs):
# for j in range(N_pairs):
# I_E[t,j,i] = c*h[j,i] + np.sum( np.multiply( W_EE[i*N_pairs+j,:,:], r_E[t-1,:,:]) ) - np.sum( np.multiply( W_EI[i*N_pairs+j,:,:], r_I[t-1,:,:]) )
# I_I[t,j,i] = c*h[j,i] + np.sum( np.multiply( W_IE[i*N_pairs+j,:,:], r_E[t-1,:,:]) ) - np.sum( np.multiply( W_II[i*N_pairs+j,:,:], r_I[t-1,:,:]) )
I_E[t,:,:] = c*h + np.sum( np.reshape(np.multiply(W_EE, r_E[t-1,:,:]), (N_pairs, N_pairs, N_pairs**2)), 2).T - np.sum( np.reshape(np.multiply(W_EI, r_I[t-1,:,:]), (N_pairs, N_pairs, N_pairs**2)), 2).T
I_I[t,:,:] = c*h + np.sum( np.reshape(np.multiply(W_IE, r_E[t-1,:,:]), (N_pairs, N_pairs, N_pairs**2)), 2).T - np.sum( np.reshape(np.multiply(W_II, r_I[t-1,:,:]), (N_pairs, N_pairs, N_pairs**2)), 2).T
# steady state firing rates - power law I/O
rSS_E = np.multiply(k, np.power(np.fmax(0,I_E[t,:,:]), n_E))
rSS_I = np.multiply(k, np.power(np.fmax(0,I_I[t,:,:]), n_I))
# # set negative steady state rates to zero
# rSS_E[rSS_E < 0] = 0
# rSS_I[rSS_I < 0] = 0
# instantaneous firing rates approaching steady state
r_E[t,:,:] = r_E[t-1,:,:] + dt*(np.divide(-r_E[t-1,:,:]+rSS_E, tau_E))
r_I[t,:,:] = r_I[t-1,:,:] + dt*(np.divide(-r_I[t-1,:,:]+rSS_I, tau_I))
return [r_E, r_I, I_E, I_I]
# +
# run a single simulation for testing
h = generate_ext_stimulus(45, 16, (8,8))
dt = 0.001
timesteps = 100
c = 40
[r_E, r_I, I_E, I_I] = run_simulation(dt, timesteps, c, h)
plt.figure()
plt.plot(np.linspace(0,0.1,100), r_E[:,37,37],'r')
plt.title('Individual Excitatory Response')
plt.xlabel('Time (seconds)')
plt.ylabel('Firing Rate (Hz)')
# -
plt.figure()
# plt.hold(True)
plt.plot(np.linspace(0,0.1,100), I_E[:,37,37], 'r', np.linspace(0,0.1,100), I_I[:,37,37], 'b')
if np.isnan(r_E).any():
print 'List of non-overflow neurons: '
for i in range(N_pairs):
for j in range(N_pairs):
if np.isnan(r_E[:,i,j]).any() == False:
print (i,j)
else:
print 'No neurons overflow'
# +
# run simulations to reproduce mean length tuning curves, figure 6E
# We want to use neurons that demonstrate significant surround suppression - higher than 0.25
# (this will take a long time to run)
size_range = np.linspace(1,16,10) # size
c = 10
stim_ori = 45 # degrees
centre = (8,8)
h_range = np.zeros((len(size_range), N_pairs, N_pairs))
for i in range(len(size_range)):
h_range[i,:,:] = generate_ext_stimulus(stim_ori, size_range[i], centre)
dt = 0.001
timesteps = 100
avg_r_E = np.zeros(len(size_range))
avg_r_I = np.copy(avg_r_E)
# store all the firing rates for every trial
results_E = np.zeros((len(size_range, timesteps, N_pairs, N_pairs))
results_I = np.copy(results_E)
for i in range(len(size_range)):
[r_E, r_I, I_E, I_I] = run_simulation(dt, timesteps, c, h_range[i])
avg_r_E[i] = np.mean(r_E)
avg_r_I[i] = np.mean(r_I)
results_E[i,:,:,:] = r_E
results_I[i,:,:,:] = r_I
# find which units demonstrate SSI > 0.25
SSI_list = []
plt.figure()
plt.plot(size_range, avg_r_E, 'r')
plt.title('Average Excitatory Response')
plt.ylabel('Average firing rate')
plt.xlabel('Stimulus size (degrees)')
plt.figure()
plt.plot(size_range, avg_r_I, 'b')
plt.title('Average Inhibitory Response')
plt.ylabel('Average firing rate')
plt.xlabel('Stimulus size (degrees)')
# +
# Plotting results
n_plots = 10
x = np.floor(N_pairs*np.random.rand(n_plots))
y = np.floor(N_pairs*np.random.rand(n_plots))
print x,y
plt.figure()
plt.hold(True)
t_range = np.linspace(0,timesteps*dt, timesteps)
for i in range(n_plots):
plt.plot( t_range, r_E[:,x[i],y[i]], 'r' )
plt.xlabel('Time (s)')
plt.ylabel('Firing Rate (Hz)')
plt.title('Excitatory firing rate')
plt.show()
plt.figure()
plt.plot(t_range, r_I[:,x[0],y[0]] )
plt.xlabel('Time (s)')
plt.ylabel('Firing Rate (Hz)')
plt.title('Inhibitory firing rate')
plt.figure()
plt.plot(t_range, I_E[:,x[0],y[0]], 'r')
plt.xlabel('Time (s)')
plt.ylabel('Drive')
plt.title('Excitatory Network Drive')
plt.figure()
plt.plot(t_range, I_I[:,x[0],y[0]])
plt.xlabel('Time (s)')
plt.ylabel('Drive')
plt.title('Inhibitory Network Drive')
| mechanistic/.ipynb_checkpoints/SSN_2D_model-May-15-clean-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 1 - Constructing simple CSG geometry
#
# To perform a neutronics simulation the geometry of the model must be defined. The simplest way to do this is to use Constructive Solid Geometry (CSG) which involves using primitive shapes and boolean operations to create cells and void spaces.
#
# This python notebook allows users to make a simple CSG using OpenMC and plot 2D slices of the geometry.
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/Ovr7oYukYRw" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# This first code block creates a simple spherical shell geometry by defining two spherical surfaces at different radii. The region between the two surfaces is then defined and a cell created which is added to the geometry 'universe'.
#
# Run the cell to produce three 2D slices of the geometry in different planes.
# +
import openmc
import matplotlib.pyplot as plt
# example surfaces
inner_sphere_surface = openmc.Sphere(r=500)
outer_sphere_surface = openmc.Sphere(r=600)
# above (+) inner_sphere_surface and below (-) outer_sphere_surface
blanket_region = +inner_sphere_surface & -outer_sphere_surface
# example cell
blanket_cell = openmc.Cell(region=blanket_region)
# makes a universe to cotain all the cells
universe = openmc.Universe(cells=[blanket_cell])
# shows the plots, as the geometry is symmetrical the plots look the same
color_assignment = {blanket_cell: 'blue'}
plt.show(universe.plot(width=(1200, 1200), basis='xz', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='xy', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='yz', colors=color_assignment))
# -
# The next code block adds a firstwall cell to the inner surface of the spherical shell.
# +
# example surfaces
inner_sphere_surface = openmc.Sphere(r=480)
middle_sphere_surface = openmc.Sphere(r=500) # note the extra surface
outer_sphere_surface = openmc.Sphere(r=600)
# above (+) middle_sphere_surface and below (-) outer_sphere_surface
blanket_region = +middle_sphere_surface & -outer_sphere_surface
# above (+) inner_sphere_surface and below (-) middle_sphere_surface
firstwall_region = +inner_sphere_surface & -middle_sphere_surface
# now we have two cells
blanket_cell = openmc.Cell(region=blanket_region)
firstwall_cell = openmc.Cell(region=firstwall_region)
# there are now two cells in the list
universe = openmc.Universe(cells=[blanket_cell, firstwall_cell])
# shows the plots, which still look the same for all directions
color_assignment = {blanket_cell: 'blue', firstwall_cell: 'red'}
plt.show(universe.plot(width=(1200, 1200), basis='xz', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='xy', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='yz', colors=color_assignment))
# -
# When creating CSG there is another key aspect of the geometry required when we want to use it to simulate particle movement and interactions - the entire geometry must be defined.
#
# As well as defining the cells which contain materials, we must also define areas of the geometry with no materials, which are also called 'voids'. By default, cells are all voids until a material is assigned to those cells.
#
# Also, the outermost surface must be identified with the boundary_type='vacuum' keyword which tells OpenMC to not compute the movement of particles beyond this surface.
# The next code block shows how void regions and a vacuum boundary can be defined.
# +
# surfaces
inner_sphere_surface = openmc.Sphere(r=480)
middle_sphere_surface = openmc.Sphere(r=500)
outer_sphere_surface = openmc.Sphere(r=600, boundary_type='vacuum') # note the extra keyword
# regions
blanket_region = +middle_sphere_surface & -outer_sphere_surface
firstwall_region = +inner_sphere_surface & -middle_sphere_surface
inner_vessel_region = -inner_sphere_surface # this is the void region
# cells
blanket_cell = openmc.Cell(region=blanket_region)
firstwall_cell = openmc.Cell(region=firstwall_region)
inner_vessel_cell = openmc.Cell(region=inner_vessel_region) # here as the cell is th new void cell
universe = openmc.Universe(cells=[blanket_cell, firstwall_cell, inner_vessel_cell])
# note the new color scheme is based on materials not cells
color_assignment = {blanket_cell: 'blue', firstwall_cell: 'red', inner_vessel_cell:'grey'}
# note the additional argument color_by, normally this defaults to 'cell'
plt.show(universe.plot(width=(1200, 1200), basis='xz', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='xy', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='yz', colors=color_assignment))
# -
# Assigning materials to cells is also necessary when wanting to use the geometry for particle transport.
#
# Material creation was covered in Task 2 so these are just minimal materials to demonstrate material assignment to cells.
#
# Additionally, this plot uses the color_by='material' argument to colour the plots by materials instead of cells.
# +
inner_sphere_surface = openmc.Sphere(r=480)
middle_sphere_surface = openmc.Sphere(r=500)
outer_sphere_surface = openmc.Sphere(r=600)
blanket_region = +middle_sphere_surface & -outer_sphere_surface
firstwall_region = +inner_sphere_surface & -middle_sphere_surface
inner_vessel_region = -inner_sphere_surface # this is the void region, it will not have a material
# This makes a minimal material
lithium_mat = openmc.Material(name='lithium')
lithium_mat.set_density('g/cm3', 2)
lithium_mat.add_element('Li', 1.0)
# This makes another minimal material
tungsten_mat = openmc.Material(name='tungsten')
tungsten_mat.set_density('g/cm3', 19)
tungsten_mat.add_element('W', 1.0)
blanket_cell = openmc.Cell(region=blanket_region)
blanket_cell.fill = lithium_mat # this assigns a material to a cell
firstwall_cell = openmc.Cell(region=firstwall_region)
firstwall_cell.fill = tungsten_mat # this assigns a material to a cell
inner_vessel_cell = openmc.Cell(region=inner_vessel_region)
# note there is no material assignment here as the cell a void cell
universe = openmc.Universe(cells=[blanket_cell, firstwall_cell, inner_vessel_cell])
# note the new color scheme is based on materials not cells
color_assignment = {lithium_mat: 'green', tungsten_mat: 'yellow'}
# note the additional argument color_by, normally this defaults to 'cell'
plt.show(universe.plot(width=(1200, 1200), basis='xz', color_by='material', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='xy', color_by='material', colors=color_assignment))
plt.show(universe.plot(width=(1200, 1200), basis='yz', color_by='material', colors=color_assignment))
# -
# **Learning Outcomes for Part 1:**
#
# - Construction of simple Constructive Solid Geometry (CSG) geometry.
# - Visualisation of models using 2D slices.
# - Assigining materials to cells.
# - Defining the complete geometry including void cells.
# - Defining the edge of the model with the boundary keyword.
# - Coloring models by cell or by material.
| tasks/task_03_making_CSG_geometry/1_simple_csg_geometry.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Daily Assignment 7/9/19 - Hypothesis Tests and Confidence Intervals with Parameters
#
# Use the dataset _GPA.dta_ for this exercise. Suppose we are interested in predicting college GPA for students at Michigan State University. Consider the equation:
#
# $$colGPA = \beta_0 + \beta_1hsGPA + \beta_2ACT + \beta_33skipped + u$$
#
# where $colGPA$ is cumulative college grade point average, $hsGPA$ the student’s high school grade point
# average, and $skipped$ is the average number of lectures skipped per week.
# +
# Insert preamble code here (i.e. load packages, bring in data)
# -
# #### 1. What are your expectations for the coefficients in this equation? Which ones are you unsure about?
# +
# Insert code for Q1 here.
# -
# Add Discussion here.
# #### 2. Estimate the equation and report the results. Test for the hypothesis $\beta_3 = 0$.
#
# +
# Insert code for Q2 here.
# -
# Add discussion here.
# #### 3. Construct a 90% confidence interval for $\beta_3$. Interpret your results.
#
# +
# Insert code for Q3 here.
# -
# Add discussion here.
# #### 4. Test for the hypothesis $\beta_1 = 0.4$ against the two-sided alternative at the 5% significance level.
#
# +
#Insert code for Q4 here.
# -
# Add discussion here.
# #### 5. Test for the hypothesis $\beta_1 = 1$ against $\beta_1 < 1$ at the 10% significance level.
#
# +
#Insert code for Q5 here.
# -
# Add discussion here.
# +
# Another code cell, if you want to keep some stuff separate.
# +
# More space.
| Summer-19/DailyAssignments/Daily Assignment 7_9.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1. session length - general stats on how long sessions are. means,
# percentiles, etc.
# 2. User 'profile' - how many 'kinds' of users do we have? Some who
# just pop in once? Some who pop in a few times a week for a fixed
# amount of time? some who are there all the time? what kinda user
# clusters do we have?
# 3. Who are those people using it 400 times in a semester?
# 4. are there user corelations? Can we spot 'groups' of users with
# similar behavior? What kinda behavior is it? etc
import pandas as pd
import numpy as np
from datetime import datetime
import altair as alt
from IPython import display
# Set the altair theme
def my_theme(*args, **kwargs):
return {'config': {'axis': {'labelFontSize': 20, 'titleFontSize': 20}}}
alt.themes.register('my-chart', my_theme)
alt.themes.enable('my-chart')
# # Params and functions
semester_start = pd.Timestamp('2018-08-15').tz_localize('US/Pacific')
semester_end = pd.Timestamp('2018-12-15').tz_localize('US/Pacific')
def convert_tz(series):
series = series.dt.tz_localize('UTC')
return series.dt.tz_convert('US/Pacific')
# # Load data
# ## User session data
# +
# Log data for user activity
path_sessions = '../data/features/fall-2018/user-sessions.jsonl'
sessions = pd.read_json(path_sessions, convert_dates=['start', 'stop'])
for col in ['start', 'stop']:
sessions[col] = convert_tz(sessions[col])
# -
# Only between start and end of semester
sessions = sessions[sessions['start'] > semester_start][sessions['start'] < semester_end]
# ## Cost per day
cost = pd.read_json('../data/processed/fall-2018/cloud-costs.jsonl', lines=True)
cost['start_time'] = convert_tz(cost['start_time'])
cost = cost.drop(columns=['end_time']).set_index('start_time')
# +
# Only between start and end of semester
cost = cost[cost.index > semester_start][cost.index < semester_end]
# We only use indexing timestamps to make the tz_localize easier.
# after that, we drop it to make everything else easier
cost = cost.reset_index()
# Fill in any missing data before beginning of date
missing_dates = pd.date_range(semester_start, cost.start_time.min(), name='start_time')
missing_dates_cost = pd.DataFrame(missing_dates, np.full(len(missing_dates), np.nan), columns={'start_time', 'cost'})
cost = cost.append(missing_dates_cost)
# + [markdown] toc-hr-collapsed=false
# # Viz and analysis
# + [markdown] toc-hr-collapsed=false
# ## Daily Active user
#
# We count someone as a 'daily active user' if they start / stop their notebook server
# at least once. Due to anonimization techniques applied earlier, this might slightly
# under count users
# -
# Unique daily users - we count anyone who has logged in at least once a day
# We want a dataframe with no index so we can use it easily with Altair
daily_active_users = pd.DataFrame(sessions.set_index('start')['user'].resample('D').nunique()).reset_index()
alt.Chart(daily_active_users, width=900).mark_line().encode(
x='start',
y='user'
)
# Mean daily active users
mean_daily_active_users = daily_active_users['user'].mean()
display.HTML(f'<h3>Mean daily active users: <b>{mean_daily_active_users: .2f}</b></h3>')
# + [markdown] toc-hr-collapsed=true
# ## Daily cloud costs
# -
alt.Chart(cost, width=900).mark_line().encode(
x='start_time',
y=alt.Y('cost', axis=alt.Axis(format="$.2f"))
)
# Mean daily active users
mean_cost = cost['cost'].mean()
display.HTML(f'<h3>Mean daily cloud cost: <b>${mean_cost: .2f}</b></h3>')
# ## Daily cloud costs per **active** user
#
# This is cost per day for *active* users - those who used the cluster. This is only a fraction of your total users, so be careful using this for estimates.
# +
# Combine into a single dataframe based on day
total = pd.merge(daily_active_users, cost, how='outer', left_on='start', right_on='start_time').drop(columns=['start_time'])
# Calculate daily cost per user
total['cost_per_active_user'] = total['cost'] / total['user']
# -
alt.Chart(total, width=900).mark_line().encode(
x='start',
y=alt.Y('cost_per_active_user', axis=alt.Axis(format="$.2f"), title="Cost per active user")
)
# + [markdown] toc-hr-collapsed=true
# ## Total Users
#
# One way to count the 'number of users on the JupyterHub' is to look at everyone who has ever started a notebook. Since anyone with a Berkeley.edu account can log in, this is not the most accurate count of people who *use* the hub - but it's useful nonetheless.
# -
user_starts_count = sessions.groupby('user').count()['start'].to_frame()
display.HTML(f'<h4>Users who used JupyterHub at least once: <b>{user_starts_count.shape[0]}</b></h4>')
# Some basic stats about number of times users started their servers
user_starts_count.describe()
# We can calculate cost per day per user from this user count
# +
total['cost_per_user'] = total['cost'] / user_starts_count.count()['start']
alt.Chart(total, width=900, title='Daily cost per user').mark_line().encode(
x='start',
y=alt.Y('cost_per_user', axis=alt.Axis(format="$.2f"), title="Cost per user")
)
# -
# We can also figure out how much they cost per month.
# +
monthly = cost.copy().set_index('start_time').resample('M').sum()
monthly['cost_per_user'] = monthly['cost'] / user_starts_count.count()['start']
monthly = monthly.reset_index()
alt.Chart(monthly, width=900, title='Monthly cost per user').mark_bar().encode(
x='month(start_time):O',
y=alt.Y('cost_per_user', axis=alt.Axis(format="$.2f"), title="Cost per user")
)
# + [markdown] toc-hr-collapsed=true
# ## Realistic user count
#
# A lot of people might log in a few times to the JupyterHub to check it out, and then never really come back. We should avoid counting those as 'users' when doing our cost analysis.
#
# We can cut off outliers at the 99th percentile and plot a histogram to see how most people use the hub
# -
outlier_cutoff = user_starts_count.quantile(0.99)
alt.Chart(user_starts_count[user_starts_count['start'] < outlier_cutoff['start']], width=900).mark_bar().encode(
alt.X('start', bin=alt.BinParams(maxbins=100)),
y='count()'
)
# There's a lot of users who use the hub <5 times, and many who use it 5-10 times. We pick an arbitrary cut off of 'ten notebook server starts' to count as a 'real' user and not someone just trying out the hub.
users_with_more_than_ten_starts = user_starts_count[user_starts_count['start'] > 10]
display.HTML(f'<h4>Users who used the hub more than 10 times: <b>{users_with_more_than_ten_starts.count()["start"]}')
# We can use this to plot a daily and monthly cost per user
# +
total['cost_per_realistic_user'] = total['cost'] / users_with_more_than_ten_starts.count()['start']
alt.Chart(total, width=900, title='Daily cost per user (with >10 server starts)').mark_line().encode(
x='start',
y=alt.Y('cost_per_realistic_user', axis=alt.Axis(format="$.2f"), title="Cost per realistic user")
)
# +
realistic_monthly = cost.copy().set_index('start_time').resample('M').sum()
realistic_monthly['cost_per_realistic_user'] = realistic_monthly['cost'] / users_with_more_than_ten_starts.count()['start']
realistic_monthly = realistic_monthly.reset_index()
alt.Chart(realistic_monthly, width=900, title='Monthly cost per user (with >10 starts)').mark_bar().encode(
x='month(start_time):O',
y='cost_per_realistic_user'
)
# -
# ## Session length
length_counts = sessions.groupby('length_hours').count().reset_index()
alt.Chart(length_counts, width=300).mark_bar().encode(
x='length_hours',
y=alt.Y('start', title="Count")
)
# # Summary
#
# Mean number of daily active users per day: **1006**
#
# Monthly cost per 'realistic' user: **~2$**
| notebooks/03-visualize-cost-and-usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demonstrate the use of `make_simlibs.py`
#
# The script `make_simlibs.py` is used to make Simlib files from OpSim DataBases. [Go to script.](https://github.com/LSSTDESC/OpSimSummary/blob/master/scripts/make_simlibs.py)
#
# The following commands can be run from the terminal or from a notebook.
#
# ## Index<a name="index"></a>
# * [Options](#options)
# * [Example of usage](#usage)
# 1. [Download the desired file](#download)
# * [Set up options](#chooseOptions)
# * [Run the script](#run)
# * [Output](#output)
#
# ## Options<a name="options"></a>
#
# First, we can look at all the options.
# !python ../scripts/make_simlibs.py -h
# ## Example of usage<a name="usage"></a>
#
# Consider the case where you want to transform a OpSim DataBases file extracted from to a Simlib file.
#
# ### 1. Download the desired file<a name="download"></a>
#
# In this example, we will download the `baseline_v1.4_10yrs.db` file from [here](https://lsst-web.ncsa.illinois.edu/sim-data/) via the use of the nix Command Line `curl` tool. After downloaded, I will move it to the Downloads folder in order to showcase some more options of `make_simlibs`.
#
# ```shell
# curl -O https://lsst-web.ncsa.illinois.edu/sim-data/sims_featureScheduler_runs1.4/baseline/baseline_v1.4_10yrs.db
#
# # mv baseline_v1.4_10yrs.db ${HOME}/Downloads/
#
# # The database file is now in ${HOME}/Downloads/baseline_v1.4_10yrs.db
# ```
#
# ### 2. Set up options<a name="chooseOptions"></a>
#
# - `data_root` $\rightarrow$ path to the directory containing `dbname`. In this case the package is in `${HOME}`.
# - `dbname` $\rightarrow$ relative path to the OpSim DataBase file from `data_root`. In this case it is `Downloads/baseline_v1.4_10yrs.db`. (This could have also been done as `data_root` $\rightarrow$ `${HOME}/Downloads` and `dbname` $\rightarrow$ `baseline_v1.4_10yrs.db` as used below.)
#
# - `opsimversion` $\rightarrow$ version of opsim used. Here we used `fbsv1p3`.
#
# If no `ddf_simlibfilename` and `wfd_simlibfilename` paths are given, the generated files will be output in the current working directory given by `PWD`.
#
# ### 3. Run the script<a name="run"></a>
#
# Go to the folder with the `make_simlibs` script. In this case, the script is in:
#
# ```shell
# # cd ~/OpSimSummary/scripts
# ```
#
# We can then run the script
#
# ```shell
# python make_simlibs.py --data_root /Users/user_name/Downloads --dbname baseline_v1.4_10yrs.db --opsimversion 'fbsv1p3'
# ```
#
# All the above following commands can be run from the terminal but they can also be run from a notebook by adding `!` before the command. Ex:
# !pwd
# #### Alternatives
#
# The `make_simlibs` script prints several messages while running, and can take some time to run. Hence, it is often convenient to run in the background, and store the output in a log file. Naming the log file `simlib_v1.4.log`, the command to run the script then becames:
#
# ```shell
# python make_simlibs.py --data_root /Users/user_name/Downloads --dbname baseline_v1.4_10yrs.db --opsimversion 'fbsv1p3' > simlib_v1.4.log 2>&1 &
# ```
#
# If you want to leave this running on a remote machine
#
# ```shell
# nice nohup python make_simlibs.py --data_root /Users/user_name/Downloads --dbname baseline_v1.4_10yrs.db --opsimversion 'fbsv1p3' > simlib_v1.4.log 2>&1 &
# ```
# should do the job.
#
#
# ### 4. Output<a name="output"></a>
#
# Using the above command, the output files will be in the `~/OpSimSummary/scripts` folder.
#
# The files generated are:
# - `baseline_v1.4_10yrs_DDF.simlib`
# - `baseline_v1.4_10yrs_DDF_avail.csv`
# - `baseline_v1.4_10yrs_DDF_sel.csv`
# - `baseline_v1.4_10yrs_WFD.simlib`
# - `baseline_v1.4_10yrs_WFD_avail.csv`
# - `baseline_v1.4_10yrs_WFD_sel.csv`
# - `ddf_minion_1016_sqlite.csv`
# - `simlib_v1.4.log`
# - `wfd_minion_1016_sqlite.csv`
#
#
# The files ending in `simlib` are the files that can be used as input to `SNANA`. If appropriate for the use, one can further using the `simlib_coadd` script of `SNANA`, which can coadd observations over time intervals to produce more compact simlib files that help `SNANA` generate faster simulations.
#
# `avail.csv` files list the spatial parts of the sky (in terms of healpixels) that are part of the survey (WFD or DDF).
# `sel.csv` is a selection of those healpixels, at which the pointings are used for calculating the SNANA observing library.
#
# Go to [Index](#index).
| example/Demo_make_simlibs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Relation Extraction
# Ensure that FastAPI is up and running. [instructions here](../fast_api/readme.md)
# # Table of contents
#
# 1. Data prep
# 2. Standard
# 2.1. Train Model
# 2.2. Predict Labels
# 2.3. Evaluate Model
# 3. NeXT
# 3.1. Soft Match
# 3.1.1. Train Model
# 3.1.2. Predict Labels
# 3.1.3. Evaluate Model
# 3.2. Strict Match
# 3.2.1. Train Model
# 3.2.2. Predict Labels
# 3.2.3. Evaluate Model
# + pycharm={"name": "#%%\n"}
# imports
import requests
import json
# -
FAST_API_URL = "http://localhost:9000"
# ---
# # 1. Data prep
# We use a subset of `SemEval 2010 Task 8` dataset in this demo.
# ---
# #### Prepare data for Standard training
# load labelled data without explanations for Standard training
re_standard_labeled_data = list(map(
lambda x: {'text': x['text'], 'label': x['label']},
json.load(open('data/semeval_labeled.json'))
))
re_standard_labeled_data[0]
re_standard_dev_data = json.load(open('data/semeval_dev.json'))
re_standard_dev_data[0]
# ---
# #### Prepare data for NeXT training
# load unlabelled data for NeXT training
re_next_unlabeled_data = json.load(open('data/semeval_unlabeled.json'))
re_next_unlabeled_data[0]
# load labelled data with explanations for NeXT training
re_next_explanation_triples = json.load(open('data/semeval_labeled.json'))
re_next_explanation_triples[0]
# ---
# #### Prepare data for evaluation
# load evaluation samples
re_eval_data = json.load(open('data/semeval_test.json'))
# [{text: str, label: str}]
re_eval_data = list(map(lambda doc: [doc['text'], doc['label']], re_eval_data))
# [Sentence, Label] pairs
re_eval_data[0]
# ---
# #### Prepare data for prediction
re_predict_data = list(map(lambda x: x[0], re_eval_data))[:10]
re_predict_data[0]
# ---
# #### Define label space
# dictionary of (key: String, value: Int), key being the label and value being unique integer ID
re_label_space = {
"Product-Producer(e1,e2)": 0,
"Component-Whole(e2,e1)": 1,
"Product-Producer(e2,e1)": 2,
"Instrument-Agency(e2,e1)": 3,
"Message-Topic(e1,e2)": 4,
"Entity-Origin(e2,e1)": 5,
"Entity-Origin(e1,e2)": 6,
"Entity-Destination(e1,e2)": 7,
"Cause-Effect(e1,e2)": 8,
"Content-Container(e1,e2)": 9,
"Member-Collection(e2,e1)": 10,
"Instrument-Agency(e1,e2)": 11,
"Message-Topic(e2,e1)": 12,
"Content-Container(e2,e1)": 13,
"no_relation": 14,
"Member-Collection(e1,e2)": 15,
"Entity-Destination(e2,e1)": 16,
"Component-Whole(e1,e2)": 17,
"Cause-Effect(e2,e1)": 18
}
# ---
# #### Define NER label space
re_ner_label_space = json.load(open('data/semeval_ner_space.json'))
re_ner_label_space
# ---
# ---
# # 2. Standard
# Check [this](../fast_api/json_schema.py#L516) json schema for a list of all parameters.
#
# Check [this](../model_training/internal_api/defaults.py) for default values.
# ## 2.1. Train model
# ### Define training parameters
re_standard_train_params = {
# a string name representing the model name
"experiment_name": "semeval_re_standard",
# a string name representing the dataset name
"dataset_name": "semeval",
# task type - "re" for Relation Extraction
"task": "re",
# training batch size
"match_batch_size": 50,
# learning rate
"learning_rate": 0.1,
# number of epochs
"epochs": 5,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d",
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# random seed
"random_state": 7698,
# when "True" data has to be passed,
# "False" when re-training or the data was processed earlier and can be retrieved
"build_data": True,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Run model training
# depending on input size, and computing environment this might take time.
# please check FAST API logs for updates
response = requests.post(
FAST_API_URL + '/training/standard/api/',
json={
'params': re_standard_train_params,
'label_space': re_label_space,
'labeled_data': re_standard_labeled_data,
'dev_data': re_standard_dev_data,
'ner_label_space': re_ner_label_space
}
)
# JSON with "save_path" key is returned when successful
response.text
# ---
# ## 2.2. Predict labels
# ### Define prediction parameters
re_standard_predict_params = {
# a string name representing the model name
"experiment_name": "semeval_re_standard",
# a string name representing the dataset name
"dataset_name": "semeval",
# task type - "re" for Relation Extraction
"task": "re",
# evaluation batch size
"eval_batch_size": 50,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d",
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Fetch predictions
response = requests.post(
FAST_API_URL + '/training/standard/predict/',
json={
'params': re_standard_predict_params,
'label_space': re_label_space,
'prediction_data': re_predict_data,
'ner_label_space': re_ner_label_space
}
)
# returns JSON object with keys
# "class_probs" - list of list representing predictions for each label
# "class_preds" - list representing the label that was predicted
response.text
# ---
# ## 2.3. Evaluate model
# ### Define evaluation parameters
re_standard_eval_params = {
# a string name representing the model name
"experiment_name": "semeval_re_standard",
# a string name representing the dataset name
"dataset_name": "semeval",
# task type - "re" for Relation Extraction
"task": "re",
# evaluation batch size
"eval_batch_size": 50,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d",
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Run model evaluation
response = requests.post(
FAST_API_URL + '/training/standard/eval/',
json={
'params': re_standard_eval_params,
'label_space': re_label_space,
'eval_data': re_eval_data,
'ner_label_space': re_ner_label_space
}
)
# returns JSON object with keys
# "avg_loss" and "avg_eval_f1_score"
response.text
# ---
# ---
# # 3. NeXT
# Check [this](../fast_api/json_schema.py#L293) json schema for a list of all parameters.
#
# Check [this](../model_training/internal_api/defaults.py) for default values.
# ## 3.1. Soft Match
# ## 3.1.1. Train model
# ### Define training parameters
re_next_soft_match_params = {
# a string name representing the model name
"experiment_name": "semeval_re_nle_soft_match",
# a string name representing the dataset name
"dataset_name": "semeval",
# integer representing the unlabelled dataset size
"dataset_size": len(re_next_unlabeled_data),
# task type - "re" for Relation Extraction
"task": "re",
# the total batch_size is "match_batch_size" + "unlabeled_batch_size"
"match_batch_size": 50,
"unlabeled_batch_size": 100,
# learning rate
"learning_rate": 0.1,
# number of epochs
"epochs": 5,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d", # usual default: "glove.840B.300d"
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# soft_match loss weightage
"gamma": 0.5,
# number of hidden dimensions
"hidden_dim": 100,
# random seed
"random_state": 7698,
# FIND module number of hidden dimensions
"pre_train_hidden_dim": 300,
# FIND module training_size
"pre_train_training_size": 50000,
# enable soft match
"soft_match": True,
# "find" to train only FIND module,
# "clf" to train only the classifier,
# "both" to train FIND module + classifier
"stage": "both",
# for the FIND module,
# when "True" data has to be passed,
# "False" when re-training or the data was processed earlier and can be retrieved
"pre_train_build_data": True,
# when "True" data has to be passed,
# "False" when re-training or the data was processed earlier and can be retrieved
"build_data": True,
"pre_train_epochs": 5,
}
# ### Run model training
# depending on input size, and computing environment this might take time.
# please check FAST API logs for updates
response = requests.post(
FAST_API_URL + '/training/next/api/',
json={
'params': re_next_soft_match_params,
'label_space': re_label_space,
'explanation_triples': re_next_explanation_triples,
'unlabeled_text': re_next_unlabeled_data,
'ner_label_space': re_ner_label_space
}
)
# JSON with "save_path" key is returned when successful
response.text
# ---
# ## 3.1.2. Predict labels
# ### Define prediction parameters
re_next_soft_predict_params = {
# a string name representing the model name
"experiment_name": "semeval_re_nle_soft_match",
# a string name representing the dataset name
"dataset_name": "semeval",
# integer representing the unlabelled dataset size
"train_dataset_size": len(re_next_unlabeled_data),
# task type - "re" for Relation Extraction
"task": "re",
# evaluation batch size
"eval_batch_size": 50,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d", # usual default: "glove.840B.300d"
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# FIND module training_size
"pre_train_training_size": 50000,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Fetch predictions
response = requests.post(
FAST_API_URL + '/training/next/predict/',
json={
'params': re_next_soft_predict_params,
'label_space': re_label_space,
'prediction_data': re_predict_data,
'ner_label_space': re_ner_label_space
}
)
# returns JSON object with keys
# "class_probs" - list of list representing predictions for each label
# "class_preds" - list representing the label that was predicted
response.text
# ---
# ## 3.1.3. Evaluate model
# ### Define evaluation parameters
re_next_soft_eval_params = {
# a string name representing the model name
"experiment_name": "semeval_re_nle_soft_match",
# a string name representing the dataset name
"dataset_name": "semeval",
# integer representing the unlabelled dataset size
"train_dataset_size": len(re_next_unlabeled_data),
# task type - "re" for Relation Extraction
"task": "re",
# evaluation batch size
"eval_batch_size": 50,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d", # usual default: "glove.840B.300d"
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# FIND module training_size
"pre_train_training_size": 50000,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Run model evaluation
response = requests.post(
FAST_API_URL + '/training/next/eval/',
json={
'params': re_next_soft_eval_params,
'label_space': re_label_space,
'eval_data': re_eval_data,
'ner_label_space': re_ner_label_space
}
)
# returns JSON object with keys
# "avg_loss", "avg_eval_ent_f1_score", "avg_eval_ent_f1_score" and "no_relation_thresholds"
response.text
# ---
# ---
# ## 3.2. Strict Match
# ## 3.2.1. Train model
# ### Define training parameters
# parameters are same as in "soft_match" except "soft_match" is set to "False" here
re_next_strict_match_params = {
# a string name representing the model name
"experiment_name": "semeval_re_nle_strict_match",
# a string name representing the dataset name
"dataset_name": "semeval",
# integer representing the unlabelled dataset size
"dataset_size": len(re_next_unlabeled_data),
# task type - "re" for Relation Extraction
"task": "re",
# the total batch_size is "match_batch_size" + "unlabeled_batch_size"
"match_batch_size": 50,
"unlabeled_batch_size": 100,
# learning rate
"learning_rate": 0.1,
# number of epochs
"epochs": 5,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d", # usual default: "glove.840B.300d"
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# soft_match loss weightage
"gamma": 0.5,
# number of hidden dimensions
"hidden_dim": 100,
# random seed
"random_state": 7698,
# FIND module number of hidden dimensions
"pre_train_hidden_dim": 300,
# FIND module training_size
"pre_train_training_size": 50000,
# disable soft match
"soft_match": False,
# "find" to train only FIND module,
# "clf" to train only the classifier,
# "both" to train FIND module + classifier
"stage": "both",
# for the FIND module,
# when "True" data has to be passed,
# "False" when re-training or the data was processed earlier and can be retrieved
"pre_train_build_data": True,
# when "True" data has to be passed,
# "False" when re-training or the data was processed earlier and can be retrieved
"build_data": True,
"pre_train_epochs": 5,
}
# ### Run model training
# depending on input size, and computing environment this might take time.
# please check FAST API logs for updates
response = requests.post(
FAST_API_URL + '/training/next/api/',
json={
'params': re_next_strict_match_params,
'label_space': re_label_space,
'explanation_triples': re_next_explanation_triples,
'unlabeled_text': re_next_unlabeled_data,
'ner_label_space': re_ner_label_space
}
)
# JSON with "save_path" key is returned when successful
response.text
# ---
# ## 3.2.2. Predict labels
# ### Define prediction parameters
re_next_strict_predict_params = {
# a string name representing the model name
"experiment_name": "semeval_re_nle_strict_match",
# a string name representing the dataset name
"dataset_name": "semeval",
# integer representing the unlabelled dataset size
"train_dataset_size": len(re_next_unlabeled_data),
# task type - "re" for Relation Extraction
"task": "re",
# evaluation batch size
"eval_batch_size": 50,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d", # usual default: "glove.840B.300d"
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# FIND module training_size
"pre_train_training_size": 50000,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Fetch predictions
response = requests.post(
FAST_API_URL + '/training/next/predict/',
json={
'params': re_next_strict_predict_params,
'label_space': re_label_space,
'prediction_data': re_predict_data,
'ner_label_space': re_ner_label_space
}
)
# returns JSON object with keys
# "class_probs" - list of list representing predictions for each label
# "class_preds" - list representing the label that was predicted
response.text
# ---
# ## 3.2.3. Evaluate model
# ### Define evaluation parameters
re_next_strict_eval_params = {
# a string name representing the model name
"experiment_name": "semeval_re_nle_strict_match",
# a string name representing the dataset name
"dataset_name": "semeval",
# integer representing the unlabelled dataset size
"train_dataset_size": len(re_next_unlabeled_data),
# task type - "re" for Relation Extraction
"task": "re",
# evaluation batch size
"eval_batch_size": 50,
# embedding to be used for training. usual default: "glove.840B.300d"
"embeddings": "charngram.100d", # usual default: "glove.840B.300d"
# embedding dimension of the "embeddings" provided
"emb_dim": 100,
# number of hidden dimensions
"hidden_dim": 100,
# FIND module training_size
"pre_train_training_size": 50000,
# label representing the relation "no_relation"
"none_label_key": "no_relation",
}
# ### Run model evaluation
response = requests.post(
FAST_API_URL + '/training/next/eval/',
json={
'params': re_next_strict_eval_params,
'label_space': re_label_space,
'eval_data': re_eval_data,
'ner_label_space': re_ner_label_space
}
)
# returns JSON object with keys
# "avg_loss", "avg_eval_ent_f1_score", "avg_eval_ent_f1_score" and "no_relation_thresholds"
response.text
| example_notebooks/relation_extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import Ipynb_importer
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import datetime as datetime
from mpl_finance import candlestick2_ohlc
from Algorithm.Demo import Demo as IW
returns = pd.read_excel('Data/stock_data.xlsx', sheet_name='Sheet2', index_col=0)
date = pd.read_excel('Data/stock_data.xlsx', sheet_name='Sheet2')
plt.figure(1)
#print(date)
plt.subplot(511)
#returns.plot()
plt.title('Stock Data')
plt.xlabel('Time')
plt.legend(loc=0)
oiw = IW.Model(returns)
score, cap, cap_daily_p, position = oiw.Core()
plt.plot(score)
plt.subplot(512)
plt.plot(cap_daily_p)
plt.subplot(513)
plt.plot(position)
plt.subplot(514)
plt.plot(cap)
# -
cap.append(0)
score.append(0)
px_last = returns['PX_LAST']
print len(score), len(position), len(px_last)
ex = pd.DataFrame({'Score': score, 'Capital': cap, 'Last_Price': px_last, 'Position': position, 'Daily %': cap_daily_p})
# +
ex.to_excel('Data/export_TS.xlsx', sheet_name='export')
'''
plt.subplot(311)
prices = pd.read_excel('stock_data.xlsx', sheet_name=2, index_col=0, usecols="A,B,C,D,E")
#prices.plot()
plt.title('Prices')
plt.xlabel('Time')
plt.legend(loc=0)
plt.plot(prices)
plt.subplot(312)
bb = pd.read_excel('stock_data.xlsx', sheet_name=2, index_col=0, usecols="A,B,E,M,N,O")
#bb.plot()
plt.title('BB')
plt.xlabel('Time')
plt.legend(loc=0)
plt.plot(bb)
'''
print float(returns['PX_LAST'][-1]) / float(returns['PX_LAST'][0]) - 1.
print cap[-2] / cap[0] - 1.
ax = plt.subplot(515)
candlestick2_ohlc(ax, returns['PX_OPEN'], returns['PX_HIGH'], returns['PX_LOW'], returns['PX_LAST'], width=0.3)
xdate = returns.index
#ax.xaxis.set_major_locator(ticker.MaxNLocator(6))
def mydate(x, pos) :
try:
return xdate[int(x)]
except IndexError:
return ''
ax.xaxis.set_major_formatter(ticker.FuncFormatter(mydate))
plt.tight_layout()
plt.show()
| TradingSystem/__main__.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import theano
from theano import tensor as T
theano.config.floatX = 'float32'
# making a train dataset
X_train = np.asarray([[0.0], [1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0], [9.0]],dtype=theano.config.floatX)
y_train = np.asarray([1.0, 1.3, 3.1, 2.0, 5.0, 6.3, 6.6, 7.4, 8.0, 9.0], dtype=theano.config.floatX)
# -
# OLS regression
def train_linreg(X_train, y_train, eta, epochs):
costs = []
# initialize
eta0 = T.fscalar('eta0')
y = T.fvector(name='y')
X = T.fmatrix(name='X')
w = theano.shared(np.zeros(shape=(X_train.shape[1] + 1), dtype=theano.config.floatX), name='w')
# calculate cost
net_input = T.dot(X, w[1:]) + w[0]
errors = y - net_input
cost = T.sum(T.pow(errors, 2))
# perform gradient update
gradient = T.grad(cost, wrt=w)
update = [(w, w - eta0 * gradient)]
# compile model
train = theano.function(inputs=[eta0], outputs=cost, updates=update, givens={X: X_train, y: y_train})
for _ in range(epochs):
costs.append(train(eta))
return costs, w
# training the linear regression model & plotting the values of the Sum of Squared Errors (SSE)
import matplotlib.pyplot as plt
costs, w = train_linreg(X_train, y_train, eta=0.001, epochs=10)
plt.plot(range(1, len(costs)+1), costs)
plt.tight_layout()
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.show()
# compile a new predict_linreg function tomake predictions based on the input features
def predict_linreg(X, w):
Xt = T.matrix(name='X')
net_input = T.dot(Xt, w[1:]) + w[0]
predict = theano.function(inputs=[Xt], givens={w: w}, outputs=net_input)
return predict(X)
# plotting the linear regression fit on the training data
plt.scatter(X_train, y_train, marker='s', s=50)
plt.plot(range(X_train.shape[0]), predict_linreg(X_train, w), color='gray', marker='o', markersize=4, linewidth=3)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| ch13/02-linear-regression-with-theano.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pwd
data_path='/home/jlam/projects/dl_w_python_nb/data/aclImdb/'
# ls {data_path}
# ### data prep
import os
train_dir = os.path.join(data_path,'train')
# ls {train_dir}
def load_data(dir_name):
"""
load data from directory to list
expects pos and neg subdirectories in dir_name
returns texts, labels as lists
"""
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(train_dir,label_type)
for fname in os.listdir(dir_name):
if fname[-4:]=='.txt':
f = open(os.path.join(dir_name,fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
return texts, labels
texts, labels = load_data(train_dir)
len(texts),len(labels)
# ### build word indexes
# parameters
max_words = 10000
max_length = 100
embedding_dim = 8
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
tokenizer = Tokenizer(num_words=max_words) #initialize by keeping 10,000 words
tokenizer.fit_on_texts(texts) # train
sequences = tokenizer.texts_to_sequences(texts)
len(sequences)
# #### get word index from tokenizer
word_index = tokenizer.word_index
print('found %s unique tokens.' % len(word_index))
word_index['man']
# #### pad sequence to max_len & convert labels to 1d array
data = pad_sequences(sequences,max_length)
labels = np.asarray(labels)
len(data[0])
# ### building model
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
model = Sequential()
# input dim - len of vocab + 1
# output dim - dimension of dense embedding
# max length - required to connect to flatten layer
# after embedding, the shape is (samples, maxlen, 8)
model.add(Embedding(max_words, embedding_dim, input_length=max_length))
# flatten the 3D tensor of embeddings into a 2D tensor of shape
# , shape is (samples, maxlen * 8)
model.add(Flatten())
# why add relu?
model.add(Dense(32, activation='relu'))
# add classification on top
model.add(Dense(1, activation='sigmoid'))
# add optimizer, loss function and metric
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
x_train=data
y_train=labels
history = model.fit(x_train,y_train,
epochs=10,
batch_size=32,
validation_split=0.2)
# +
import matplotlib.pyplot as plt
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# plot accuracy
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
# plot loss
plt.plot(epochs, loss, 'ro', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# -
plot_history(history)
| .ipynb_checkpoints/2019_01_26_raw_text_to_embed-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/berilldindar/Pneumonia-Detection-/blob/main/PneumoniaDetection_ALEXNet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="a0HPi4ZEa_Ed"
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
# + id="9UBo1-q0bCbi"
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img
from keras.layers import Flatten,Dense
from keras.models import Sequential
import glob, os, random
# + colab={"base_uri": "https://localhost:8080/"} id="5TG5udUybHLA" outputId="4ff7b7ee-5b67-4cc3-fe23-c6082e10dd9e"
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="<KEY>"
import os
import cv2
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
from keras.models import Model, load_model
from keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(22)
# + id="yvWEW47tbMiA"
def load_normal(norm_path):
norm_files = np.array(os.listdir(norm_path))
norm_labels = np.array(['normal']*len(norm_files))
norm_images = []
for image in tqdm(norm_files):
image = cv2.imread(norm_path + image)
image = cv2.resize(image, dsize=(200,200))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
norm_images.append(image)
norm_images = np.array(norm_images)
return norm_images, norm_labels
def load_pneumonia(pneu_path):
pneu_files = np.array(os.listdir(pneu_path))
pneu_labels = np.array([pneu_file.split('_')[1] for pneu_file in pneu_files])
pneu_images = []
for image in tqdm(pneu_files):
image = cv2.imread(pneu_path + image)
image = cv2.resize(image, dsize=(200,200))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
pneu_images.append(image)
pneu_images = np.array(pneu_images)
return pneu_images, pneu_labels
# + colab={"base_uri": "https://localhost:8080/"} id="7hW_VR-xbQEj" outputId="7248f011-0e4e-4988-b12e-4937531983e3"
norm_images, norm_labels = load_normal('/content/drive/MyDrive/FinalOdevDerin/train/NORMAL/')
pneu_images, pneu_labels = load_pneumonia('/content/drive/MyDrive/FinalOdevDerin/train/PNEUMONIA/')
# + id="Z3R1T3s0bQAL"
X_train = np.append(norm_images, pneu_images, axis=0)
y_train = np.append(norm_labels, pneu_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="IiRdgBn0bW78" outputId="21574ac7-c42b-47e0-ca32-ba5ccffc9388"
np.unique(y_train,return_counts=True)
#OpenCV'nin görüntüyü okuduğu varsayılan dtype 'uint8' olduğundan, bu görüntü üzerinde çalışan her şeyin uyumluluk için 'uint8' türünde olması gerekir.
# + colab={"base_uri": "https://localhost:8080/", "height": 267} id="-I8h1weLbZZI" outputId="ba2c7d25-15ed-4f16-9ab2-23127d1f25af"
fig, axes = plt.subplots(ncols=7, nrows=2, figsize=(16, 4))
indices = np.random.choice(len(X_train), 14)
counter = 0
for i in range(2):
for j in range(7):
axes[i,j].set_title(y_train[indices[counter]])
axes[i,j].imshow(X_train[indices[counter]], cmap='gray')
axes[i,j].get_xaxis().set_visible(False)
axes[i,j].get_yaxis().set_visible(False)
counter += 1
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="LEX6f7iBbcTY" outputId="c0c320eb-7993-4d31-a79a-f8849570dca2"
norm_images_test, norm_labels_test = load_normal('/content/drive/MyDrive/FinalOdevDerin/test/NORMAL/')
pneu_images_test, pneu_labels_test = load_pneumonia('/content/drive/MyDrive/FinalOdevDerin/test/PNEUMONIA/')
X_test = np.append(norm_images_test, pneu_images_test, axis=0)
y_test = np.append(norm_labels_test, pneu_labels_test)
# + id="BvuSGFRibeeA"
with open('pneumonia_data.pickle', 'wb') as f:
pickle.dump((X_train, X_test, y_train, y_test), f)
with open('pneumonia_data.pickle', 'rb') as f:
(X_train, X_test, y_train, y_test) = pickle.load(f)
# + id="Xy6l42K6bgRC"
y_train = y_train[:, np.newaxis]
y_test = y_test[:, np.newaxis]
# + id="U7pJSpkWbiSG"
one_hot_encoder = OneHotEncoder(sparse=False)
# + id="mq-ynjwrbkmp"
y_train_one_hot = one_hot_encoder.fit_transform(y_train)
y_test_one_hot = one_hot_encoder.transform(y_test)
# + id="taGFuBTvbmQo"
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)
# + id="b8XDTXCMbomP"
datagen = ImageDataGenerator(
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
validation_split=0.1,
height_shift_range = 0.1)
# + id="UWSe7O_pby0f"
datagen.fit(X_train)
train_gen = datagen.flow(X_train, y_train_one_hot, batch_size = 32)
# + colab={"base_uri": "https://localhost:8080/"} id="9tf_EojXb1Nf" outputId="10a8a9f3-399b-4d71-f0c8-f31d1a2ca51b"
input_shape = (X_train.shape[1], X_train.shape[2], 1)
print(input_shape)
# + id="JR3kfB6Bjz4C"
from keras.layers import Conv2D,MaxPool2D,ZeroPadding2D
# + colab={"base_uri": "https://localhost:8080/"} id="OObufQnkb64h" outputId="908dd1d9-95a3-4ec7-faaa-cefa0f5acdcb"
input1 = Input(shape=(X_train.shape[1], X_train.shape[2], 1))
alexnet=tf.keras.Sequential()
alexnet.add(Conv2D(128,kernel_size=(11,11),strides=(4,4),activation='relu'))
alexnet.add(MaxPool2D(pool_size=(3,3),strides=(2,2)))
alexnet.add(ZeroPadding2D((2,2))) #4tane ekliyor 2,2
alexnet.add(Conv2D(128,kernel_size=(5,5),strides=(1,1),activation='relu'))
alexnet.add(MaxPool2D(pool_size=(3,3),strides=(2,2)))
alexnet.add(ZeroPadding2D((1,1)))
alexnet.add(Conv2D(256,kernel_size=(3,3),strides=(1,1),activation='relu'))
alexnet.add(ZeroPadding2D((1,1)))
alexnet.add(Conv2D(256,kernel_size=(3,3),strides=(1,1),activation='relu'))
alexnet.add(MaxPool2D(pool_size=(3,3),strides=(2,2)))
alexnet.add(Flatten())
alexnet.add(Dense(1024,activation='relu'))
alexnet.add(Dense(1024,activation='relu'))
alexnet.add(Dense(3,activation='softmax'))
alexnet.build(X_train.shape)
alexnet.summary()
# + id="hkDiyqSacQvY"
alexnet.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['acc'])
# + colab={"base_uri": "https://localhost:8080/"} id="TqUhjFBzcQuf" outputId="8cb86174-81e7-4077-a5ed-b2c4336f0f52"
history = alexnet.fit_generator(train_gen, epochs=20,
validation_data=(X_test, y_test_one_hot))
# + colab={"base_uri": "https://localhost:8080/", "height": 795} id="F0kNFSkRcQtg" outputId="9ebdfdc9-7011-4c1a-fd98-19688ccddf44"
print('Displaying accuracy')
plt.figure(figsize=(8,6))
plt.title('Accuracy scores')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['acc', 'val_acc'])
plt.show()
print('Displaying loss')
plt.figure(figsize=(8,6))
plt.title('Loss value')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="b__4yvTyccY3" outputId="9760dec4-fcd7-4b52-95f7-872bb3171bc8"
predictions = alexnet.predict(X_test)
print(predictions)
# + id="HRuP5uTscfE4"
predictions = one_hot_encoder.inverse_transform(predictions)
# + colab={"base_uri": "https://localhost:8080/"} id="cf9UvspDchEN" outputId="a8c81db8-486d-47f5-901c-ace846bbcbc0"
print('Model evaluation')
print(one_hot_encoder.categories_)
classnames = ['bacteria', 'normal', 'virus']
# + colab={"base_uri": "https://localhost:8080/", "height": 512} id="tXqdtaaDci-E" outputId="3b51f496-9cae-43cd-e064-9c38b9022011"
cm = confusion_matrix(y_test, predictions)
plt.figure(figsize=(8,8))
plt.title('Confusion matrix')
sns.heatmap(cm, cbar=False, xticklabels=classnames, yticklabels=classnames, fmt='d', annot=True, cmap=plt.cm.Blues)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="2qal51t-Rv9E" outputId="c0a86807-6d13-430f-de5a-8b4beecc8af5"
print('Classification Report')
target_names = ['normal', 'bacteria', 'virus']
print(classification_report(y_test,predictions, target_names=target_names))
| PneumoniaDetection_ALEXNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="text-align: center;">
# <h1>Interactive Charts - Bokeh</h1>
# <a href="https://docs.bokeh.org/en/latest/docs/user_guide/interaction.html" target="_blank">Interactive Bokeh - Main</a>
# </div>
# <div style="text-align: left;">
# <h3>Hiding Glyphs</h3>
# </div>
# +
from bokeh.palettes import Spectral4
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.stocks import AAPL, GOOG, IBM, MSFT
import pandas as pd
# -
local_output_folder = "visuals-html"
joiner = lambda filename, folder=local_output_folder: rf"{folder}\{filename}"
# +
data = [AAPL, GOOG, IBM, MSFT]
p = figure(plot_width=800, plot_height=250, x_axis_type="datetime")
p.title.text = "Click ticker name in legend to hide data"
for data, name, color in zip([AAPL, GOOG, IBM, MSFT], ["AAPL", "GOOG", "IBM", "MSFT"], Spectral4):
df = pd.DataFrame(data)
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"])
p.line(df.loc[:, "date"], df.loc[:, "close"], line_width=2, color=color, alpha=0.8, legend_label=name)
p.legend.location = "top_left"
p.legend.click_policy="hide"
output_file(joiner("stocks-1.html"), title="Interactive Plot Example")
show(p)
| notebook-samples/.ipynb_checkpoints/bokeh-stocks-hide-show-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: p36workshop
# language: python
# name: p36workshop
# ---
# ## SageMaker Pipelines integration with Model Monitor and Clarify
#
# This notebook showcases how Model Monitor and Clarify steps can be integrated with SageMaker Pipelines. This allows users to calculate
# baselines for data quality and model quality checks by running the underlying Model Monitor and Clarify containers.
# ## Data/Model Quality, Bias, and Model Explainability Checks in SageMaker Pipelines
#
# This notebook introduces two new step types in SageMaker Pipelines -
# * `QualityCheckStep`
# * `ClarifyCheckStep`
#
# With these two steps, the pipeline is able to perform baseline calculations that are needed as a standard against which data/model quality issues can be detected (including bias and explainability).
#
# These steps leverage SageMaker pre-built containers:
#
# * `QualityCheckStep` (for Data/Model Quality): [sagemaker-model-monitor-analyzer](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-pre-built-container.html)
# * `ClarifyCheckStep` (for Data/Model Bias and Model Explainability): [sagemaker-clarify-processing](https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-configure-processing-jobs.html#clarify-processing-job-configure-container)
#
# The training dataset that you used to train the model is usually a good baseline dataset. The training dataset data schema and the inference dataset schema should exactly match (the number and order of the features). Note that the prediction/output columns are assumed to be the first columns in the training dataset. From the training dataset, you can ask SageMaker to suggest a set of baseline constraints and generate descriptive statistics to explore the data.
#
# These two new steps will always calculate new baselines using the dataset provided.
# ### Drift Check Baselines in the Model Registry
#
# The `RegisterStep` has a new parameter called `drift_check_baselines`. This refers to the baseline files associated with the model. When deployed, these baseline files are used by Model Monitor for Model Quality/Data Quality checks. In addition, these baselines can be used in `QualityCheckStep` and `ClarifyCheckStep` to compare newly trained models against models that have already been registered in the Model Registry.
#
# ### Step Properties
#
# The `QualityCheckStep` has the following properties -
#
# * `CalculatedBaselineStatistics` : The baseline statistics file calculated by the underlying Model Monitor container.
# * `CalculatedBaselineConstraints` : The baseline constraints file calculated by the underlying Model Monitor container.
# * `BaselineUsedForDriftCheckStatistics` and `BaselineUsedForDriftCheckConstraints` : These are the two properties used to set `drift_check_baseline` in the Model Registry. The values set in these properties vary depending on the parameters passed to the step. The different behaviors are described in the table below.
#
# The `ClarifyCheckStep` has the following properties -
#
# * `CalculatedBaselineConstraints` : The baseline constraints file calculated by the underlying Clarify container.
# * `BaselineUsedForDriftCheckConstraints` : This property is used to set `drift_check_baseline` in the Model Registry. The values set in this property will vary depending on the parameters passed to the step. The different behaviors are described in the table below.
# ### Notebook Overview
#
# This notebook should be run with `Python 3.9` using the SageMaker Studio `Python3 (Data Science)` kernel. The `sagemaker` sdk version required for this notebook is `>2.70.0`.
# Let's start by installing preview wheels of the Python SDK, boto and aws cli
# ! pip install sagemaker botocore boto3 awscli --upgrade
# +
import os
import json
import boto3
import sagemaker
import sagemaker.session
from sagemaker import utils
from sagemaker.estimator import Estimator
from sagemaker.inputs import TrainingInput, CreateModelInput, TransformInput
from sagemaker.model import Model
from sagemaker.transformer import Transformer
from sagemaker.model_metrics import MetricsSource, ModelMetrics, FileSource
from sagemaker.drift_check_baselines import DriftCheckBaselines
from sagemaker.processing import (
ProcessingInput,
ProcessingOutput,
ScriptProcessor,
)
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.workflow.functions import JsonGet
from sagemaker.workflow.parameters import (
ParameterBoolean,
ParameterInteger,
ParameterString,
)
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.steps import (
ProcessingStep,
TrainingStep,
CreateModelStep,
TransformStep,
)
from sagemaker.workflow.step_collections import RegisterModel
# Importing new steps and helper functions
from sagemaker.workflow.check_job_config import CheckJobConfig
from sagemaker.workflow.clarify_check_step import (
DataBiasCheckConfig,
ClarifyCheckStep,
ModelBiasCheckConfig,
ModelPredictedLabelConfig,
ModelExplainabilityCheckConfig,
SHAPConfig,
)
from sagemaker.workflow.quality_check_step import (
DataQualityCheckConfig,
ModelQualityCheckConfig,
QualityCheckStep,
)
from sagemaker.workflow.execution_variables import ExecutionVariables
from sagemaker.workflow.functions import Join
from sagemaker.model_monitor import DatasetFormat, model_monitoring
from sagemaker.clarify import BiasConfig, DataConfig, ModelConfig
# -
# ### Create the SageMaker Session
region = sagemaker.Session().boto_region_name
sm_client = boto3.client("sagemaker")
boto_session = boto3.Session(region_name=region)
sagemaker_session = sagemaker.session.Session(boto_session=boto_session, sagemaker_client=sm_client)
prefix = "model-monitor-clarify-step-pipeline"
# ### Define variables and parameters needed for the Pipeline steps
role = sagemaker.get_execution_role()
default_bucket = sagemaker_session.default_bucket()
base_job_prefix = "model-monitor-clarify"
model_package_group_name = "model-monitor-clarify-group"
pipeline_name = "model-monitor-clarify-pipeline"
# ### Define pipeline parameters
#
# Both `QualityCheckStep` and `ClarifyCheckStep` use two boolean flags `skip_check` and `register_new_baseline` to control their behavior.
#
# * `skip_check` : This determines if a drift check is executed or not.
# * `register_new_baseline` : This determines if the newly calculated baselines (in the step property `CalculatedBaselines`) should be set in the step property `BaselineUsedForDriftCheck`.
# * `supplied_baseline_statistics` and `supplied_baseline_constraints` : If `skip_check` is set to False, baselines can be provided to this step through this parameter. If provided, the step will compare the newly calculated baselines (`CalculatedBaselines`) against those provided here instead of finding the latest baselines from the Model Registry. In the case of `ClarifyCheckStep`, only `supplied_baseline_constraints` is a valid parameter, for `QualityCheckStep`, both parameters are used.
# * `model_package_group_name` : The step will use the `drift_check_baselines` from the latest approved model in the model package group for the drift check. If `supplied_baseline_*` is provided, this field will be ignored.
#
# The first time the pipeline is run, the `skip_check` value should be set to True using the pipeline execution parameters so that new baselines are registered and no drift check is executed.
# ### Combining Pipeline parameters
#
# This table summarizes how the pipeline parameters work when combined.
#
# The parameter `drift_check_baselines` is used to supply baselines to the `RegisterStep` that will be used for all drift checks involving the model.
#
# Newly calculated baselines can be reference by the properties `CalculatedBaselineStatistics` and `CalculatedBaselineConstraints` on the `QualityCheckStep` and `CalculatedBaselineConstraints` on the `ClarifyCheckStep`.
#
# For example, `data_quality_check_step.properties.CalculatedBaselineStatistics` and `data_quality_check_step.properties.CalculatedBaselineConstraints`. This property refers to the baseline that is calculated when the data quality check step is executed.
#
# | `skip_check` / `register_new_baseline` | Does step do a drift check? | Value of step property `CalculatedBaseline` | Value of step property `BaselineUsedForDriftCheck` | Possible Circumstances for this parameter combination|
# | -------------------------------------- | ---------------------------------------------------------|------------------------------------------------------------ |------------------------------------------------- | -----------------------------------------------------|
# | F / F | Drift Check executed against existing baselines. | New baselines calculated by step execution | Baseline from latest approved model in Model Registry or baseline supplied as step parameter | Regular re-training with checks enabled to get a new model version, but carry over previous baselines as DriftCheckBaselines in Registry for new model version. |
# | F / T | Drift Check executed against existing baselines. | New baselines calculated by step execution | Newly calculated baseline by step execution (value of property `CalculatedBaseline`) | Regular re-training with checks enabled to get a new model version, but refresh DriftCheckBaselines in Registry with newly calculated baselines for the new model version. |
# | T / F | No Drift Check. | New baselines calculated by step execution | Baseline from latest approved model in Model Registry or baseline supplied as step parameter | Violation detected by the model monitor on endpoint for a particular type of check and the pipeline is triggered for retraining a new model. Skip the check against previous baselines, but carry over previous baselines as DriftCheckBaselines in Registry for new model version. |
# | T / T | No Drift Check. | New baselines calculated by step execution | Newly calculated baseline by step execution (value of property `CalculatedBaseline`) | a. Initial run of the pipeline, building the first model version and generate initial baselines. <br>b. Violation detected by the model monitor on endpoint for a particular type of check and the pipeline is triggered for retraining a new model. Skip the check against previous baselines and refresh DriftCheckBaselines with newly calculated baselines in Registry directly. |
# +
processing_instance_count = ParameterInteger(name="ProcessingInstanceCount", default_value=1)
processing_instance_type = ParameterString(
name="ProcessingInstanceType", default_value="ml.m5.xlarge"
)
training_instance_type = ParameterString(name="TrainingInstanceType", default_value="ml.m5.xlarge")
model_approval_status = ParameterString(
name="ModelApprovalStatus", default_value="PendingManualApproval"
)
# The dataset used here is the open source Abalone dataset that can be found
# here - https://archive.ics.uci.edu/ml/datasets/abalone
input_data = ParameterString(
name="InputDataUrl",
default_value=f"s3://sagemaker-sample-files/datasets/tabular/uci_abalone/abalone.csv",
)
# for data quality check step
skip_check_data_quality = ParameterBoolean(name="SkipDataQualityCheck", default_value=False)
register_new_baseline_data_quality = ParameterBoolean(
name="RegisterNewDataQualityBaseline", default_value=False
)
supplied_baseline_statistics_data_quality = ParameterString(
name="DataQualitySuppliedStatistics", default_value=""
)
supplied_baseline_constraints_data_quality = ParameterString(
name="DataQualitySuppliedConstraints", default_value=""
)
# for data bias check step
skip_check_data_bias = ParameterBoolean(name="SkipDataBiasCheck", default_value=False)
register_new_baseline_data_bias = ParameterBoolean(
name="RegisterNewDataBiasBaseline", default_value=False
)
supplied_baseline_constraints_data_bias = ParameterString(
name="DataBiasSuppliedBaselineConstraints", default_value=""
)
# for model quality check step
skip_check_model_quality = ParameterBoolean(name="SkipModelQualityCheck", default_value=False)
register_new_baseline_model_quality = ParameterBoolean(
name="RegisterNewModelQualityBaseline", default_value=False
)
supplied_baseline_statistics_model_quality = ParameterString(
name="ModelQualitySuppliedStatistics", default_value=""
)
supplied_baseline_constraints_model_quality = ParameterString(
name="ModelQualitySuppliedConstraints", default_value=""
)
# for model bias check step
skip_check_model_bias = ParameterBoolean(name="SkipModelBiasCheck", default_value=False)
register_new_baseline_model_bias = ParameterBoolean(
name="RegisterNewModelBiasBaseline", default_value=False
)
supplied_baseline_constraints_model_bias = ParameterString(
name="ModelBiasSuppliedBaselineConstraints", default_value=""
)
# for model explainability check step
skip_check_model_explainability = ParameterBoolean(
name="SkipModelExplainabilityCheck", default_value=False
)
register_new_baseline_model_explainability = ParameterBoolean(
name="RegisterNewModelExplainabilityBaseline", default_value=False
)
supplied_baseline_constraints_model_explainability = ParameterString(
name="ModelExplainabilitySuppliedBaselineConstraints", default_value=""
)
# -
# ### Processing step for feature engineering
# +
# %%writefile preprocess.py
"""Feature engineers the abalone dataset."""
import argparse
import logging
import os
import pathlib
import requests
import tempfile
import boto3
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# Since we get a headerless CSV file we specify the column names here.
feature_columns_names = [
"sex",
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
]
label_column = "rings"
feature_columns_dtype = {
"sex": str,
"length": np.float64,
"diameter": np.float64,
"height": np.float64,
"whole_weight": np.float64,
"shucked_weight": np.float64,
"viscera_weight": np.float64,
"shell_weight": np.float64,
}
label_column_dtype = {"rings": np.float64}
def merge_two_dicts(x, y):
"""Merges two dicts, returning a new copy."""
z = x.copy()
z.update(y)
return z
if __name__ == "__main__":
logger.debug("Starting preprocessing.")
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str, required=True)
args = parser.parse_args()
base_dir = "/opt/ml/processing"
pathlib.Path(f"{base_dir}/data").mkdir(parents=True, exist_ok=True)
input_data = args.input_data
bucket = input_data.split("/")[2]
key = "/".join(input_data.split("/")[3:])
logger.info("Downloading data from bucket: %s, key: %s", bucket, key)
fn = f"{base_dir}/data/abalone-dataset.csv"
s3 = boto3.resource("s3")
s3.Bucket(bucket).download_file(key, fn)
logger.debug("Reading downloaded data.")
df = pd.read_csv(
fn,
header=None,
names=feature_columns_names + [label_column],
dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype),
)
os.unlink(fn)
logger.debug("Defining transformers.")
numeric_features = list(feature_columns_names)
numeric_features.remove("sex")
numeric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler()),
]
)
categorical_features = ["sex"]
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocess = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
logger.info("Applying transforms.")
y = df.pop("rings")
X_pre = preprocess.fit_transform(df)
y_pre = y.to_numpy().reshape(len(y), 1)
X = np.concatenate((y_pre, X_pre), axis=1)
logger.info("Splitting %d rows of data into train, validation, test datasets.", len(X))
np.random.shuffle(X)
train, validation, test = np.split(X, [int(0.7 * len(X)), int(0.85 * len(X))])
logger.info("Writing out datasets to %s.", base_dir)
pd.DataFrame(train).to_csv(f"{base_dir}/train/train.csv", header=False, index=False)
pd.DataFrame(validation).to_csv(
f"{base_dir}/validation/validation.csv", header=False, index=False
)
pd.DataFrame(test).to_csv(f"{base_dir}/test/test.csv", header=False, index=False)
# +
sklearn_processor = SKLearnProcessor(
framework_version="0.23-1",
instance_type=processing_instance_type,
instance_count=processing_instance_count,
base_job_name=f"{base_job_prefix}/sklearn-abalone-preprocess",
sagemaker_session=sagemaker_session,
role=role,
)
step_process = ProcessingStep(
name="PreprocessAbaloneData",
processor=sklearn_processor,
outputs=[
ProcessingOutput(output_name="train", source="/opt/ml/processing/train"),
ProcessingOutput(output_name="validation", source="/opt/ml/processing/validation"),
ProcessingOutput(output_name="test", source="/opt/ml/processing/test"),
],
code="preprocess.py",
job_arguments=["--input-data", input_data],
)
# -
# ### Calculating the Data Quality
#
# `CheckJobConfig` is a helper function that's used to define the job configurations used by the `QualityCheckStep`. By separating the job configuration from the step parameters, the same `CheckJobConfig` can be used across multiple steps for quality checks.
#
# The `DataQualityCheckConfig` is used to define the Quality Check job by specifying the dataset used to calculate the baseline, in this case, the training dataset from the data processing step, the dataset format, in this case, a csv file with no headers, and the output path for the results of the data quality check.
# +
check_job_config = CheckJobConfig(
role=role,
instance_count=1,
instance_type="ml.c5.xlarge",
volume_size_in_gb=120,
sagemaker_session=sagemaker_session,
)
data_quality_check_config = DataQualityCheckConfig(
baseline_dataset=step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri,
dataset_format=DatasetFormat.csv(header=False, output_columns_position="START"),
output_s3_uri=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"dataqualitycheckstep",
],
),
)
data_quality_check_step = QualityCheckStep(
name="DataQualityCheckStep",
skip_check=skip_check_data_quality,
register_new_baseline=register_new_baseline_data_quality,
quality_check_config=data_quality_check_config,
check_job_config=check_job_config,
supplied_baseline_statistics=supplied_baseline_statistics_data_quality,
supplied_baseline_constraints=supplied_baseline_constraints_data_quality,
model_package_group_name=model_package_group_name,
)
# -
# ### Calculating the Data Bias
#
# The job configuration from the previous step is used here and the `DataConfig` class is used to define how the `ClarifyCheckStep` should compute the data bias. The training dataset is used again for the bias evaluation, the column representing the label is specified through the `label` parameter, and a `BiasConfig` is provided.
#
# In the `BiasConfig`, we specify a facet name (the column that is the focal point of the bias calculation), the value of the facet that determines the range of values it can hold, and the threshold value for the label.
#
# More details on `BiasConfig` can be found [here](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.clarify.BiasConfig).
# +
data_bias_analysis_cfg_output_path = (
f"s3://{default_bucket}/{base_job_prefix}/databiascheckstep/analysis_cfg"
)
data_bias_data_config = DataConfig(
s3_data_input_path=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
s3_output_path=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"databiascheckstep",
],
),
label=0,
dataset_type="text/csv",
s3_analysis_config_output_path=data_bias_analysis_cfg_output_path,
)
data_bias_config = BiasConfig(
label_values_or_threshold=[15.0], facet_name=[8], facet_values_or_threshold=[[0.5]]
)
data_bias_check_config = DataBiasCheckConfig(
data_config=data_bias_data_config,
data_bias_config=data_bias_config,
)
data_bias_check_step = ClarifyCheckStep(
name="DataBiasCheckStep",
clarify_check_config=data_bias_check_config,
check_job_config=check_job_config,
skip_check=skip_check_data_bias,
register_new_baseline=register_new_baseline_data_bias,
supplied_baseline_constraints=supplied_baseline_constraints_data_bias,
model_package_group_name=model_package_group_name,
)
# -
# ### Train an XGBoost Model
# +
model_path = f"s3://{sagemaker_session.default_bucket()}/{base_job_prefix}/AbaloneTrain"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
base_job_name=f"{base_job_prefix}/abalone-train",
sagemaker_session=sagemaker_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
step_train = TrainingStep(
name="TrainAbaloneModel",
estimator=xgb_train,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"validation"
].S3Output.S3Uri,
content_type="text/csv",
),
},
depends_on=[data_bias_check_step.name, data_quality_check_step.name],
)
# -
# ### Create the model
#
# The model is created so that a batch transform job can be used to get predictions from the model on a test dataset. These predictions are used when calculating model quality, model bias, and model explainability.
# +
model = Model(
image_uri=image_uri,
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=role,
)
inputs = CreateModelInput(
instance_type="ml.m5.large",
accelerator_type="ml.eia1.medium",
)
step_create_model = CreateModelStep(
name="AbaloneCreateModel",
model=model,
inputs=inputs,
)
# -
# ### Transform Output
#
# The output of the transform step combines the prediction and the input label. The output format is <br>
# `prediction, original label`
# +
transformer = Transformer(
model_name=step_create_model.properties.ModelName,
instance_type="ml.m5.xlarge",
instance_count=1,
accept="text/csv",
assemble_with="Line",
output_path=f"s3://{default_bucket}/AbaloneTransform",
)
step_transform = TransformStep(
name="AbaloneTransform",
transformer=transformer,
inputs=TransformInput(
data=step_process.properties.ProcessingOutputConfig.Outputs["test"].S3Output.S3Uri,
input_filter="$[1:]",
join_source="Input",
output_filter="$[0,-1]",
content_type="text/csv",
split_type="Line",
),
)
# -
# ### Check the Model Quality
#
# In this `QualityCheckStep` we calculate the baselines for statistics and constraints using the predictions that the model generates from the test dataset (output from the TransformStep). We define the problem type as 'Regression' in the `ModelQualityCheckConfig` along with specifying the columns which represent the input and output. Since the dataset has no headers, `_c0`, `_c1` are auto-generated header names that should be used in the `ModelQualityCheckConfig`.
# +
model_quality_check_config = ModelQualityCheckConfig(
baseline_dataset=step_transform.properties.TransformOutput.S3OutputPath,
dataset_format=DatasetFormat.csv(header=False),
output_s3_uri=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"modelqualitycheckstep",
],
),
problem_type="Regression",
inference_attribute="_c0", # use auto-populated headers since we don't have headers in the dataset
ground_truth_attribute="_c1", # use auto-populated headers since we don't have headers in the dataset
)
model_quality_check_step = QualityCheckStep(
name="ModelQualityCheckStep",
skip_check=skip_check_model_quality,
register_new_baseline=register_new_baseline_model_quality,
quality_check_config=model_quality_check_config,
check_job_config=check_job_config,
supplied_baseline_statistics=supplied_baseline_statistics_model_quality,
supplied_baseline_constraints=supplied_baseline_constraints_model_quality,
model_package_group_name=model_package_group_name,
)
# -
# ### Check for Model Bias
#
# Similar to the Data Bias check step, a `BiasConfig` is defined and Clarify is used to calculate the model bias using the training dataset and the model.
# +
model_bias_analysis_cfg_output_path = (
f"s3://{default_bucket}/{base_job_prefix}/modelbiascheckstep/analysis_cfg"
)
model_bias_data_config = DataConfig(
s3_data_input_path=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
s3_output_path=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"modelbiascheckstep",
],
),
s3_analysis_config_output_path=model_bias_analysis_cfg_output_path,
label=0,
dataset_type="text/csv",
)
model_config = ModelConfig(
model_name=step_create_model.properties.ModelName,
instance_count=1,
instance_type="ml.m5.xlarge",
)
# We are using this bias config to configure Clarify to detect bias based on the first feature in the featurized vector for Sex
model_bias_config = BiasConfig(
label_values_or_threshold=[15.0], facet_name=[8], facet_values_or_threshold=[[0.5]]
)
model_bias_check_config = ModelBiasCheckConfig(
data_config=model_bias_data_config,
data_bias_config=model_bias_config,
model_config=model_config,
model_predicted_label_config=ModelPredictedLabelConfig(),
)
model_bias_check_step = ClarifyCheckStep(
name="ModelBiasCheckStep",
clarify_check_config=model_bias_check_config,
check_job_config=check_job_config,
skip_check=skip_check_model_bias,
register_new_baseline=register_new_baseline_model_bias,
supplied_baseline_constraints=supplied_baseline_constraints_model_bias,
model_package_group_name=model_package_group_name,
)
# -
# ### Check Model Explainability
#
# SageMaker Clarify uses a model-agnostic feature attribution approach, which you can use to understand why a model made a prediction after training and to provide per-instance explanation during inference. The implementation includes a scalable and efficient implementation of SHAP, based on the concept of a Shapley value from the field of cooperative game theory that assigns each feature an importance value for a particular prediction.
#
# For Model Explainability, Clarify requires an explainability configuration to be provided. In this example, we use `SHAPConfig`. For more information of `explainability_config`, visit the [Clarify documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/clarify-model-explainability.html).
# +
model_explainability_analysis_cfg_output_path = "s3://{}/{}/{}/{}".format(
default_bucket, base_job_prefix, "modelexplainabilitycheckstep", "analysis_cfg"
)
model_explainability_data_config = DataConfig(
s3_data_input_path=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
s3_output_path=Join(
on="/",
values=[
"s3:/",
default_bucket,
base_job_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
"modelexplainabilitycheckstep",
],
),
s3_analysis_config_output_path=model_explainability_analysis_cfg_output_path,
label=0,
dataset_type="text/csv",
)
shap_config = SHAPConfig(seed=123, num_samples=10)
model_explainability_check_config = ModelExplainabilityCheckConfig(
data_config=model_explainability_data_config,
model_config=model_config,
explainability_config=shap_config,
)
model_explainability_check_step = ClarifyCheckStep(
name="ModelExplainabilityCheckStep",
clarify_check_config=model_explainability_check_config,
check_job_config=check_job_config,
skip_check=skip_check_model_explainability,
register_new_baseline=register_new_baseline_model_explainability,
supplied_baseline_constraints=supplied_baseline_constraints_model_explainability,
model_package_group_name=model_package_group_name,
)
# -
# ### Evaluate the performance of the model
#
# Using a processing job, evaluate the performance of the model. The performance is used in the Condition Step to determine if the model should be registered or not.
# +
# %%writefile evaluate.py
"""Evaluation script for measuring mean squared error."""
import json
import logging
import pathlib
import pickle
import tarfile
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import mean_squared_error
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == "__main__":
logger.debug("Starting evaluation.")
model_path = "/opt/ml/processing/model/model.tar.gz"
with tarfile.open(model_path) as tar:
tar.extractall(path=".")
logger.debug("Loading xgboost model.")
model = pickle.load(open("xgboost-model", "rb"))
logger.debug("Reading test data.")
test_path = "/opt/ml/processing/test/test.csv"
df = pd.read_csv(test_path, header=None)
logger.debug("Reading test data.")
y_test = df.iloc[:, 0].to_numpy()
df.drop(df.columns[0], axis=1, inplace=True)
X_test = xgboost.DMatrix(df.values)
logger.info("Performing predictions against test data.")
predictions = model.predict(X_test)
logger.debug("Calculating mean squared error.")
mse = mean_squared_error(y_test, predictions)
std = np.std(y_test - predictions)
report_dict = {
"regression_metrics": {
"mse": {"value": mse, "standard_deviation": std},
},
}
output_dir = "/opt/ml/processing/evaluation"
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
logger.info("Writing out evaluation report with mse: %f", mse)
evaluation_path = f"{output_dir}/evaluation.json"
with open(evaluation_path, "w") as f:
f.write(json.dumps(report_dict))
# -
script_eval = ScriptProcessor(
image_uri=image_uri,
command=["python3"],
instance_type=processing_instance_type,
instance_count=1,
base_job_name=f"{base_job_prefix}/script-abalone-eval",
sagemaker_session=sagemaker_session,
role=role,
)
evaluation_report = PropertyFile(
name="AbaloneEvaluationReport",
output_name="evaluation",
path="evaluation.json",
)
step_eval = ProcessingStep(
name="EvaluateAbaloneModel",
processor=script_eval,
inputs=[
ProcessingInput(
source=step_train.properties.ModelArtifacts.S3ModelArtifacts,
destination="/opt/ml/processing/model",
),
ProcessingInput(
source=step_process.properties.ProcessingOutputConfig.Outputs["test"].S3Output.S3Uri,
destination="/opt/ml/processing/test",
),
],
outputs=[
ProcessingOutput(output_name="evaluation", source="/opt/ml/processing/evaluation"),
],
code="evaluate.py",
property_files=[evaluation_report],
)
# ### Define the metrics to be registered with the model in the Model Registry
# +
model_metrics = ModelMetrics(
model_data_statistics=MetricsSource(
s3_uri=data_quality_check_step.properties.CalculatedBaselineStatistics,
content_type="application/json",
),
model_data_constraints=MetricsSource(
s3_uri=data_quality_check_step.properties.CalculatedBaselineConstraints,
content_type="application/json",
),
bias_pre_training=MetricsSource(
s3_uri=data_bias_check_step.properties.CalculatedBaselineConstraints,
content_type="application/json",
),
model_statistics=MetricsSource(
s3_uri=model_quality_check_step.properties.CalculatedBaselineStatistics,
content_type="application/json",
),
model_constraints=MetricsSource(
s3_uri=model_quality_check_step.properties.CalculatedBaselineConstraints,
content_type="application/json",
),
bias_post_training=MetricsSource(
s3_uri=model_bias_check_step.properties.CalculatedBaselineConstraints,
content_type="application/json",
),
explainability=MetricsSource(
s3_uri=model_explainability_check_step.properties.CalculatedBaselineConstraints,
content_type="application/json",
),
bias=MetricsSource(
s3_uri=model_bias_check_step.properties.CalculatedBaselineConstraints,
content_type="application/json",
),
)
drift_check_baselines = DriftCheckBaselines(
model_data_statistics=MetricsSource(
s3_uri=data_quality_check_step.properties.BaselineUsedForDriftCheckStatistics,
content_type="application/json",
),
model_data_constraints=MetricsSource(
s3_uri=data_quality_check_step.properties.BaselineUsedForDriftCheckConstraints,
content_type="application/json",
),
bias_pre_training_constraints=MetricsSource(
s3_uri=data_bias_check_step.properties.BaselineUsedForDriftCheckConstraints,
content_type="application/json",
),
bias_config_file=FileSource(
s3_uri=model_bias_check_config.monitoring_analysis_config_uri,
content_type="application/json",
),
model_statistics=MetricsSource(
s3_uri=model_quality_check_step.properties.BaselineUsedForDriftCheckStatistics,
content_type="application/json",
),
model_constraints=MetricsSource(
s3_uri=model_quality_check_step.properties.BaselineUsedForDriftCheckConstraints,
content_type="application/json",
),
bias_post_training_constraints=MetricsSource(
s3_uri=model_bias_check_step.properties.BaselineUsedForDriftCheckConstraints,
content_type="application/json",
),
explainability_constraints=MetricsSource(
s3_uri=model_explainability_check_step.properties.BaselineUsedForDriftCheckConstraints,
content_type="application/json",
),
explainability_config_file=FileSource(
s3_uri=model_explainability_check_config.monitoring_analysis_config_uri,
content_type="application/json",
),
)
# -
# ### Register the model
#
# The two parameters in `RegisterModel` that hold the metrics calculated by the `ClarifyCheckStep` and `QualityCheckStep` are `model_metrics` and `drift_check_baselines`.
#
# `drift_check_baselines` - these are the baseline files that will be used for drift checks in `QualityCheckStep` or `ClarifyCheckStep` and model monitoring jobs that are set up on endpoints hosting this model.
#
# `model_metrics` - these should be the latest baselines calculated in the pipeline run. This can be set using the step property `CalculatedBaseline`
#
# The intention behind these parameters is to give users a way to configure the baselines associated with a model so they can be used in drift checks or model monitoring jobs. Each time a pipeline is executed, users can choose to update the `drift_check_baselines` with newly calculated baselines. The `model_metrics` can be used to register the newly calculated baselines or any other metrics associated with the model.
#
# Every time a baseline is calculated, it is not necessary that the baselines used for drift checks are updated to the newly calculated baselines. In some cases, users may retain an older version of the baseline file to be used for drift checks and not register new baselines that are calculated in the Pipeline run.
step_register = RegisterModel(
name="RegisterAbaloneModel",
estimator=xgb_train,
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["text/csv"],
response_types=["text/csv"],
inference_instances=["ml.t2.medium", "ml.m5.large"],
transform_instances=["ml.m5.large"],
model_package_group_name=model_package_group_name,
approval_status=model_approval_status,
model_metrics=model_metrics,
drift_check_baselines=drift_check_baselines,
)
# condition step for evaluating model quality and branching execution
cond_lte = ConditionLessThanOrEqualTo(
left=JsonGet(
step_name=step_eval.name,
property_file=evaluation_report,
json_path="regression_metrics.mse.value",
),
right=6.0,
)
step_cond = ConditionStep(
name="CheckMSEAbaloneEvaluation",
conditions=[cond_lte],
if_steps=[step_register],
else_steps=[],
)
# ### Create the Pipeline
# pipeline instance
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
training_instance_type,
model_approval_status,
input_data,
skip_check_data_quality,
register_new_baseline_data_quality,
supplied_baseline_statistics_data_quality,
supplied_baseline_constraints_data_quality,
skip_check_data_bias,
register_new_baseline_data_bias,
supplied_baseline_constraints_data_bias,
skip_check_model_quality,
register_new_baseline_model_quality,
supplied_baseline_statistics_model_quality,
supplied_baseline_constraints_model_quality,
skip_check_model_bias,
register_new_baseline_model_bias,
supplied_baseline_constraints_model_bias,
skip_check_model_explainability,
register_new_baseline_model_explainability,
supplied_baseline_constraints_model_explainability,
],
steps=[
step_process,
data_quality_check_step,
data_bias_check_step,
step_train,
step_create_model,
step_transform,
model_quality_check_step,
model_bias_check_step,
model_explainability_check_step,
step_eval,
step_cond,
],
sagemaker_session=sagemaker_session,
)
# ### Get Pipeline definition
# +
import json
definition = json.loads(pipeline.definition())
definition
# -
pipeline.upsert(role_arn=role)
# ### First time executing
#
# The first time the pipeline is run the parameters need to be overridden so that the checks are skipped and newly calculated baselines are registered
execution = pipeline.start(
parameters=dict(
SkipDataQualityCheck=True,
RegisterNewDataQualityBaseline=True,
SkipDataBiasCheck=True,
RegisterNewDataBiasBaseline=True,
SkipModelQualityCheck=True,
RegisterNewModelQualityBaseline=True,
SkipModelBiasCheck=True,
RegisterNewModelBiasBaseline=True,
SkipModelExplainabilityCheck=True,
RegisterNewModelExplainabilityBaseline=True,
)
)
# ### Wait for the pipeline execution to complete
execution.wait()
# ### Cleaning up resources
#
# Users are responsible for cleaning up resources created when running this notebook. Specify the ModelName, ModelPackageName, and ModelPackageGroupName that need to be deleted. The model names are generated by the CreateModel step of the Pipeline and the property values are available only in the Pipeline context. To delete the models created by this pipeline, navigate to the Model Registry and Console to find the models to delete.
#
# +
# Create a SageMaker client
# sm_client = boto3.client("sagemaker")
# # Delete SageMaker Models
# sm_client.delete_model(ModelName="...")
# # Delete Model Packages
# sm_client.delete_model_package(ModelPackageName="...")
# # Delete the Model Package Group
# sm_client.delete_model_package_group(ModelPackageGroupName="model-monitor-clarify-group")
# # Delete the Pipeline
# sm_client.delete_pipeline(PipelineName="model-monitor-clarify-pipeline")
| sagemaker-pipelines/tabular/model-monitor-clarify-pipelines/sagemaker-pipeline-model-monitor-clarify-steps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Paillier Homomorphic Encryption Example
#
# DISCLAIMER: This is a proof-of-concept implementation. It does not represent a remotely product ready implementation or follow proper conventions for security, convenience, or scalability. It is part of a broader proof-of-concept demonstrating the vision of the OpenMined project, its major moving parts, and how they might work together.
#
#
from syft.he.paillier import KeyPair, PaillierTensor
from syft import TensorBase
import numpy as np
# # Basic Ops
pubkey,prikey = KeyPair().generate()
x = PaillierTensor(pubkey, np.array([1, 2, 3, 4, 5.]))
x.data[0].__dict__
x.decrypt(prikey)
(x+x[0]).decrypt(prikey)
(x*5).decrypt(prikey)
(x+x/5).decrypt(prikey)
# # Key SerDe
pubkey,prikey = KeyPair().generate()
x = PaillierTensor(pubkey, np.array([1, 2, 3, 4, 5.]))
pubkey_str = pubkey.serialize()
prikey_str = prikey.serialize()
pubkey2,prikey2 = KeyPair().deserialize(pubkey_str,prikey_str)
prikey2.decrypt(x)
y = PaillierTensor(pubkey,(np.ones(5))/2)
prikey.decrypt(y)
# # Value SerDe
import pickle
y_str = pickle.dumps(y)
y2 = pickle.loads(y_str)
prikey.decrypt(y2)
| notebooks/Syft - Paillier Homomorphic Encryption Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spark SQL Examples
#
# Run the code cells below. This is the same code from the previous screencast.
# +
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from pyspark.sql.types import IntegerType
from pyspark.sql.functions import desc
from pyspark.sql.functions import asc
from pyspark.sql.functions import sum as Fsum
import datetime
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
# -
spark = SparkSession \
.builder \
.appName("Data wrangling with Spark SQL") \
.getOrCreate()
path = "data/sparkify_log_small.json"
user_log = spark.read.json(path)
user_log.take(1)
user_log.printSchema()
# # Create a View And Run Queries
#
# The code below creates a temporary view against which you can run SQL queries.
user_log.createOrReplaceTempView("user_log_table")
spark.sql("SELECT * FROM user_log_table LIMIT 2").show()
spark.sql('''
SELECT *
FROM user_log_table
LIMIT 2
'''
).show()
spark.sql('''
SELECT COUNT(*)
FROM user_log_table
'''
).show()
spark.sql('''
SELECT userID, firstname, page, song
FROM user_log_table
WHERE userID == '1046'
'''
).collect()
spark.sql('''
SELECT DISTINCT page
FROM user_log_table
ORDER BY page ASC
'''
).show()
# # User Defined Functions
spark.udf.register("get_hour", lambda x: int(datetime.datetime.fromtimestamp(x / 1000.0).hour))
spark.sql('''
SELECT *, get_hour(ts) AS hour
FROM user_log_table
LIMIT 1
'''
).collect()
songs_in_hour = spark.sql('''
SELECT get_hour(ts) AS hour, COUNT(*) as plays_per_hour
FROM user_log_table
WHERE page = "NextSong"
GROUP BY hour
ORDER BY cast(hour as int) ASC
'''
)
songs_in_hour.show()
# # Converting Results to Pandas
songs_in_hour_pd = songs_in_hour.toPandas()
print(songs_in_hour_pd)
| spark-example/7_data_wrangling-sql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > **Note:** In most sessions you will be solving exercises posed in a Jupyter notebook that looks like this one. Because you are cloning a Github repository that only we can push to, you should **NEVER EDIT** any of the files you pull from Github. Instead, what you should do, is either make a new notebook and write your solutions in there, or **make a copy of this notebook and save it somewhere else** on your computer, not inside the `sds` folder that you cloned, so you can write your answers in there. If you edit the notebook you pulled from Github, those edits (possible your solutions to the exercises) may be overwritten and lost the next time you pull from Github. This is important, so don't hesitate to ask if it is unclear.
# # Exercise Set 17: Text as Data 2
#
# *Morning, August 23, 2018*
#
# In this Exercise Set you will practice methods within Information Extraction in python.
# You will practice the following:
# * Practice doing look ups using set operations.
# * Implement and compare different lexical based methods for sentiment analysis.
# * Furthermore you get to play with the output from a Word2Vec model and a Topic Model, both trained on 4 million reviews from the TrustPilot Review dataset, that we practiced scraping.
# ## Exercise Section 17.1: Look-ups and Dictionary Methods
# In ths exercise you will practice using curated lexicons to extract knowledge from text.
#
# First we load the dataset. Again we use the Review Data Set. Load it by running the following:
# ```python
# import pandas as pd
# df = pd.read_csv('https://raw.githubusercontent.com/snorreralund/scraping_seminar/master/english_review_sample.csv')
# ```
#
# > **Ex 17.1.1:**
# Define two concepts you want to measure in the reviews. And curate a list of words expressing that concept. E.g. words related to Travelling, Computers, or for the bold define words that indicate Trolling.
# * Convert the two lists into sets, and assign to variables of choice.
#
# These will be your Lexicons that you want to match up with the documents.
#
# +
#[Answer 17.1.1]
# -
# > **Ex 17.1.2:**
# Now we design a simple preprocessing function to:
# * first tokenize the string using the nltk.word_tokenize function.
# * And secondly it converts capital letters to noncapital letters for each token, using a list comprehension.
# +
#[Answer 17.1.2]
# -
# >**Ex.17.1.3:**
# *Now we apply the preprocessing scheme to all of our documents assigning it to a variable: tokenized_docs.
# *Secondly we convert all of the tokenized docs into sets, by loop through the documents and applying the set() command.
# +
#[Answer 17.1.3]
# -
# >**Ex 17.1.4:** Now we shall find the overlap between our curated lists, and each document set.
# We do this by defining a container named `overlap`.
# Then we run through all document sets:
# * And take the length of the overlap between the document set and our curated lexicons.
# * Append the length to the `overlap` container.
# HINT: Overlaps between sets our found using the `&` sign. And length you get from the `len()` builtin function.
# * Finally assign the overlap values. a new column in the dataframe with the overlap values.
# +
#[Answer 17.1.4]
# -
# ## Exercise section 17.2 Lexical Based Sentiment Analysis using Dictionaries
# Here I want you to test 4 different dictionaries for sentiment analysis on the review dataset.
#
# * You will compare each document to the nltk.corpus.opinion_lexicon build into nltk.
# * You will try to use the Afinn package. `pip install afinn`
# * And finally you will compare the rulebased version of the VADER (Valence Aware Dictionary and sEntiment Reasoner - "VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text" Hutto and Gilbert 2014) sentiment analyser, to a simple lookup version.
#
# This means comparing 4 different lexical based sentiment analysis. Two of them you will use your `set` operations for checking overlap between documents and a set. And two them will be prepackaged with builtin methods.
#
#
# > **17.2.1:** First we need to get our curated lists of words with strong signals of sentiment. The wordlists you will need for this exercise is in the nltk.corpus.opinion_lexicon. The other one is the wordlist used in the VADER method, this you will download from github using the following link: https://raw.githubusercontent.com/cjhutto/vaderSentiment/master/vaderSentiment/vader_lexicon.txt
# * First assign nltk.corpus.opinion_lexicon to the variable `lexicon_1`
# * next run the following command to parse the VADER lexicon from github:
# ```python
# import pandas as pd
# vader_df = pd.read_csv('https://raw.githubusercontent.com/cjhutto/vaderSentiment/master/vaderSentiment/vader_lexicon.txt'
# ,sep='\t',header=None) # changing the separator to tab, and specifying no header
# vader_df.columns = ['token','average_score','variance','ratings'] # adding the header
# ```
# * To get a list - that will be converted to a set - of posive and negative words we will extract it from the dataframe by filtering on the column average score. Take the tokens where the average score is less than 0 and assign to a variable `negative`, and do the opposite for a variable `positive`. Remember to convert them to a set.
# * Define two dictionaries for each of the curated lexicons looking like this:
# ```python
# opinion_lexicon = {'positive':positive_words,'negative':negative_words}
# vader_lexicon = {'positive':vader_positive,'negative',vader_negative}
# ```
#
# +
#[Answer 17.2.1]
# -
# >**Ex. 17.2.3:** Scoring a document using the dicionary.
# Now we write a function that takes in a document and a dictionary containing negative and positive words. The function will tokenize the document and return a sentiment score based on the overlapping words.
#
# * First you apply the `preprocess` function you created in exercise 17.1 on the document.
# * Then you filter all words from the documents that are not in the positive word set and take the length of the resulting list.
# * You do the same with the negative word set.
# * Finally you calculate a polarity score by subtracting the negative overlap from the positive overlap and divide it by the length of the document : `pos-neg/len(doc)`
# (Hint1: Filter like this [w for w in doc if w in pos])
#
# Wrap the above in a function called apply_sentiment_dictionary.
# +
#[Answer 17.2.3]
# -
# > **Ex. 17.2.4:** Make two new columns in the dataframe;'opinion' ,'vader_raw', by applying the function with their respective dictionaries as input. This means you will have to give the .apply function another argument: `.apply(apply_sentiment_dictionary,args=(vader_lexicon,)`
# +
#[Answer Ex. 17.2.4]
# -
# >**Ex.17.2.5:** Applying the prepackaged.
# * Figure out how to apply the Afinn method here: https://github.com/fnielsen/afinn
# * Apply the afinn score on each document and define a column 'afinn_score'.
# +
#[Answer Ex.17.2.5]
# -
# >**Ex.17.2.6:** Applying the prepackaged(2).
# The MIT VADER Analyzer is run by initializing the analyzer = nltk.sentiment.SentimentIntensityAnalyzer(). And then using the builtin function of the analyzer: `.polarity_score(string)`. The function has more than one output so defining the function has more than one output, so we will only use the 'compound' variable.
# * apply the .polarity_score function to each document and extract the 'compound' value from the dictionary output of the sentiment analyzer. And define a new column called 'vader_compound' in the dataframe.
# +
#[Answer 17.2.6]
# -
# > **Ex. 17.2.7:** Comparing the performance of the Sentiment Analyzers.
# How to actually evaluate the performance of their scores does not have a definite answer, since we do not have a Human label score of each review, also they are on different scales, so what scale to use. However we might do the following:
# * Convert all ratings into a binary: 1 if above 3 and 0 if below.
# * Do the same with the Scores from the sentiment analyzers.
# * And calculate an accuracy score.
#
# or
# >
# * we could do a simple correlation between the score and the rating. And compare which has the best fit.
# * use np.correcoef()
# * or even train a classifier to predict the Rating using the output from the classifier.
# +
#[Answer 17.2.7]
# -
# ## Exercise Section 17.3: Playing around with Outputs from Unsupervised Models
# Here I want you to get acquinted with the capabilities and the syntax in the python implementation of the two famous unsupervised methods for text data: Topic Modelling and Word2Vec.
#
# You need to install the pyldaviz package: `conda install -c conda-forge pyldavis`
#
#
# Download the Word2Vec model here: https://www.dropbox.com/sh/lwpoyipspunzojl/AABSoO8j7EUjPLixSBkOe7Uda?dl=0
#
# Download the TopicModel here: https://www.dropbox.com/sh/fmmxcyvnti0c1y7/AAAOgHmnD2mbbHEQiwtJsjW-a?dl=0
# >**Ex.17.3.1:** The Word2Vec model object.
# Here we will use the `model.wv.most_similar()` method to seach the vector space. This can be used when developing lexicons.
# We will see how the model has embedded the negative and positive words from our lexicons.
# * First we define a union between the Vader lexicon and the Opinion lexicon. The union of two sets can be done using the `|` operator.
# * Then we filter which of these words are actually in the vocabulary of the model. The vocabulary of the model can be found under the model.vocab property, and then you use an `if in` statement to filter.
# +
#[Answer 17.3.1]
# -
# > **Ex 17.3.2:**
# Now we pick a random sample from the negative and apply the .most_similar command.
# * We use the `random` module and the random.choice method to get a word.
# * And then we `print(word,model.wv.most_similar(word))`
# +
#[Answer 17.3.2]
# -
# > **Ex.17.3.3:** Now we do some of the famous linear algebra (King - Man + Women = Queen)
# But instead we say: What is good- :) + :( = ?
# We do this by applying the same function:
# `.most_similar(positive=['good',':('],negative=[':)'])`
# +
#[Answer 17.3.3]
# -
# > **Exercise 17.3.5:** Interactive Plotting of Word Embedding
# ** Inspecting clusters of Words **
# Run PCA on a subsample of the wordvectors found by applying this command.
# * Inspect what the different dimensions seem to represent by hovering over the words.
# >**Ex.17.3.6:** Inspecting TOPIC MODELS using pyldaviz
# Lets look at the ldamodel object. We shall use the pyldaviz package to "discover" what the topic model have found.
# load corpus
import pickle
sample_corpus = pickle.load(open('topicmodel_review/lda_sample_corpus.pkl','rb'))
import pyLDAvis.gensim
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda,corpus=sample_corpus,dictionary=lda.id2word) # this takes a while to run
# +
#vis
| material/session_17/exercise_17.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''buddhalight'': conda)'
# name: python3
# ---
# # Lab 10.3.1 Visdom Example
#
# **<NAME> 2021**
#
# **[Deep Learning By Torch] End to End study scripts of Deep Learning by implementing code practice with Pytorch.**
#
# If you have an any issue, please PR below.
#
# [[Deep Learning By Torch] - Github @JonyChoi](https://github.com/jonychoi/Deep-Learning-By-Torch)
# Here, we are going to learn about the usage of the Visdom, which is the tool to visualize the graphs and other various things during experiment.
#
# You can find more about here:
#
# https://pypi.org/project/visdom/
# ## Imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# ## Import Visdom
#
# Jupyter Notebook > Terminal
#
# Type ```python -m visdom.server```
import visdom
vis = visdom.Visdom()
# ## Text
vis.text("Hello, world", env="main")
# ### Take a Moment!
#
# **TORCH.RANDN**
#
# torch.randn(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
#
# Returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution).
#
# out(i)∼N(0,1)
#
# The shape of the tensor is defined by the variable argument ```size```.
#
# **Parameters**
# > **size** (int...) – a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple.
#
# **Keyword Arguments**
# > - **generator** (torch.Generator, optional) – a pseudorandom number generator for sampling
#
# > - **out** (Tensor, optional) – the output tensor.
#
# > - **dtype** (torch.dtype, optional) – the desired data type of returned tensor. Default: if None, uses a global default (see torch.set_default_tensor_type()).
#
# > - **layout** (torch.layout, optional) – the desired layout of returned Tensor. Default: torch.strided.
#
# > - **device** (torch.device, optional) – the desired device of returned tensor. Default: if None, uses the current device for the default tensor type (see torch.set_default_tensor_type()). device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
#
# > - **requires_grad** (bool, optional) – If autograd should record operations on the returned tensor. Default: False.
#
# Example:
#
# ```
# >>> torch.randn(4)
# tensor([-2.1436, 0.9966, 2.3426, -0.6366])
# >>> torch.randn(2, 3)
# tensor([[ 1.5954, 2.8929, -1.0923],
# [ 1.1719, -0.4709, -0.1996]])
# ```
# ## Image
a = torch.randn(3, 200, 200)
vis.image(a)
# ### Take a Moment!
#
# ## Q What is the difference between torch.tensor and torch.Tensor?
#
# Since version 0.4.0, it is possible to use torch.tensor and torch.Tensor
#
# What is the difference? What was the reasoning for providing these two very similar and confusing alternatives?
#
# ## A
#
# In PyTorch torch.Tensor is the main tensor class. So all tensors are just instances of torch.Tensor.
#
# When you call torch.Tensor() you will get an empty tensor without any data.
#
# In contrast torch.tensor is a function which returns a tensor. In the documentation it says:
#
# torch.tensor(data, dtype=None, device=None, requires_grad=False) → Tensor
# Constructs a tensor with data.
#
# This also also explains why it is no problem creating an empty tensor instance of `torch.Tensor` without `data` by calling:
# tensor_without_data = torch.Tensor()
# But on the other side:
#
# ```tensor_without_data = torch.tensor()```
#
# Will lead to an error:
#
# ```---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-12-ebc3ceaa76d2> in <module>()
# ----> 1 torch.tensor()
#
# TypeError: tensor() missing 1 required positional arguments: "data"
# ```
#
# But in general there is no reason to choose `torch.Tensor` over `torch.tensor`. Also `torch.Tensor` lacks a docstring.
# Similar behaviour for creating a tensor without data like with: torch.Tensor() can be achieved using:
#
# ```torch.tensor(())```
#
# Output:
#
# ```tensor([])```
# ## Images
vis.images(torch.Tensor(3,3,28,28))
# ## Example (using MNIST and CIFAR10)
MNIST = datasets.MNIST(root = 'MNIST_data/',
download = True,
transform = transforms.ToTensor(),
train = True)
cifar10 = datasets.CIFAR10(root = 'cifar10/',
download = True,
transform = transforms.ToTensor(),
train = True)
# ### CIFAR10
data = cifar10.__getitem__(0)
print(data[0].shape)
vis.image(data[0], env = 'main')
# ### MNIST
data = MNIST.__getitem__(0)
print(data[0].shape) #make this since data is tuple of (X, Y)
vis.image(data[0], env = "main")
# ### Check dataset
data_loader = torch.utils.data.DataLoader(dataset = MNIST,
shuffle = False,
drop_last = True,
batch_size = 32)
for num, value in enumerate(data_loader):
#print(value) tuple of (datas, labels)
value = value[0] # get only datas
print(value.shape)
vis.images(value)
break
vis.close(env="main")
Y_data = torch.randn(5)
plt = vis.line(Y = Y_data)
# ## Line Plot
X_data = torch.Tensor([1,2,3,4,5])
plt = vis.line(Y=Y_data, X = X_data)
# ## Line Update
# +
Y_append = torch.randn(1)
X_append = torch.Tensor([6])
vis.line(Y = Y_append, X =X_append, win=plt, update="append")
# -
# ## Multiple Line on Single Windows
# +
num = torch.Tensor(list(range(0, 10)))
num = num.view(-1 ,1)
num = torch.cat((num, num), dim = 1)
plt = vis.line(Y = torch.randn(10, 2), X = num)
# -
# ## Line info
plt = vis.line(Y = Y_data, X = X_data, opts = dict(title = 'Test', showlegend = True))
plt = vis.line(Y=Y_data, X = X_data, opts= dict(title='Test', legend = ['no.1'], showlegend=True))
plt = vis.line(Y=torch.randn(10, 2), X = num, opts=dict(title = 'Test', legend = ['no.1', 'no.2'], showlegend = True))
# ## Make Function for update line
def loss_tracker(loss_plot, loss_value, num):
vis.line(X=num, Y = loss_value, win = loss_plot, update = 'append')
# +
plt = vis.line(Y = torch.Tensor(1).zero_())
for i in range(500):
loss = torch.randn(1) + i
loss_tracker(plt, loss, torch.Tensor([i]))
# -
# ## Close the window
vis.close(env = "main")
| 12. Convolutional Neural Network/12-4. Visdom Example.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .fs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (F#)
// language: F#
// name: .net-fsharp
// ---
// <h2>--- Day 1: Sonar Sweep ---</h2>
// [](https://mybinder.org/v2/gh/oddrationale/AdventOfCode2021FSharp/main?urlpath=lab%2Ftree%2FDay01.ipynb)
// <p>You're minding your own business on a ship at sea when the overboard alarm goes off! You rush to see if you can help. Apparently, one of the Elves tripped and accidentally sent the sleigh keys flying into the ocean!</p>
// <p>Before you know it, you're inside a submarine the Elves keep ready for situations like this. It's covered in Christmas lights (because of course it is), and it even has an experimental antenna that should be able to track the keys if you can boost its signal strength high enough; there's a little meter that indicates the antenna's signal strength by displaying 0-50 <em class="star">stars</em>.</p>
// <p>Your instincts tell you that in order to save Christmas, you'll need to get all <em class="star">fifty stars</em> by December 25th.</p>
// <p>Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants <em class="star">one star</em>. Good luck!</p>
// <p>As the submarine drops below the surface of the ocean, it automatically performs a sonar sweep of the nearby sea floor. On a small screen, the sonar sweep report (your puzzle input) appears: each line is a measurement of the sea floor depth as the sweep looks further and further away from the submarine.</p>
// <p>For example, suppose you had the following report:</p>
// <pre><code>199
// 200
// 208
// 210
// 200
// 207
// 240
// 269
// 260
// 263
// </code></pre>
// <p>This report indicates that, scanning outward from the submarine, the sonar sweep found depths of <code>199</code>, <code>200</code>, <code>208</code>, <code>210</code>, and so on.</p>
// <p>The first order of business is to figure out how quickly the depth increases, just so you know what you're dealing with - you never know if the keys will get <span title="Does this premise seem fishy to you?">carried into deeper water</span> by an ocean current or a fish or something.</p>
// <p>To do this, count <em>the number of times a depth measurement increases</em> from the previous measurement. (There is no measurement before the first measurement.) In the example above, the changes are as follows:</p>
// <pre><code>199 (N/A - no previous measurement)
// 200 (<em>increased</em>)
// 208 (<em>increased</em>)
// 210 (<em>increased</em>)
// 200 (decreased)
// 207 (<em>increased</em>)
// 240 (<em>increased</em>)
// 269 (<em>increased</em>)
// 260 (decreased)
// 263 (<em>increased</em>)
// </code></pre>
// <p>In this example, there are <em><code>7</code></em> measurements that are larger than the previous measurement.</p>
// <p><em>How many measurements are larger than the previous measurement?</em></p>
// + dotnet_interactive={"language": "fsharp"}
let input = File.ReadAllLines @"input/01.txt"
// + dotnet_interactive={"language": "fsharp"}
#!time
input
|> Seq.map int
|> Seq.windowed 2
|> Seq.filter (fun pair -> pair.[0] < pair.[1])
|> Seq.length
// -
// <h2 id="part2">--- Part Two ---</h2>
// <p>Considering every single measurement isn't as useful as you expected: there's just too much noise in the data.</p>
// <p>Instead, consider sums of a <em>three-measurement sliding window</em>. Again considering the above example:</p>
// <pre><code>199 A
// 200 A B
// 208 A B C
// 210 B C D
// 200 E C D
// 207 E F D
// 240 E F G
// 269 F G H
// 260 G H
// 263 H
// </code></pre>
// <p>Start by comparing the first and second three-measurement windows. The measurements in the first window are marked <code>A</code> (<code>199</code>, <code>200</code>, <code>208</code>); their sum is <code>199 + 200 + 208 = 607</code>. The second window is marked <code>B</code> (<code>200</code>, <code>208</code>, <code>210</code>); its sum is <code>618</code>. The sum of measurements in the second window is larger than the sum of the first, so this first comparison <em>increased</em>.</p>
// <p>Your goal now is to count <em>the number of times the sum of measurements in this sliding window increases</em> from the previous sum. So, compare <code>A</code> with <code>B</code>, then compare <code>B</code> with <code>C</code>, then <code>C</code> with <code>D</code>, and so on. Stop when there aren't enough measurements left to create a new three-measurement sum.</p>
// <p>In the above example, the sum of each three-measurement window is as follows:</p>
// <pre><code>A: 607 (N/A - no previous sum)
// B: 618 (<em>increased</em>)
// C: 618 (no change)
// D: 617 (decreased)
// E: 647 (<em>increased</em>)
// F: 716 (<em>increased</em>)
// G: 769 (<em>increased</em>)
// H: 792 (<em>increased</em>)
// </code></pre>
// <p>In this example, there are <em><code>5</code></em> sums that are larger than the previous sum.</p>
// <p>Consider sums of a three-measurement sliding window. <em>How many sums are larger than the previous sum?</em></p>
// + dotnet_interactive={"language": "fsharp"}
#!time
input
|> Seq.map int
|> Seq.windowed 3
|> Seq.map (fun window -> window |> Seq.sum)
|> Seq.windowed 2
|> Seq.filter (fun pair -> pair.[0] < pair.[1])
|> Seq.length
// -
// [Next](Day02.ipynb)
| Day01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1. Introduction
# BioCRNpyler is a software tool designed to rapidly compile large biological chemical reaction networks (CRNs) from simple user specifications (written in python). It has built in support of a number of models for transcription, translation, and gene expression regulation using components common in _E. coli_ synthetic biology. This tutorial explains the inner workings of BioCRNpyler and shows how to create custom mixtures, components, and mechanisms. Specifically, we will go through making a custom gene expression model:
# <br>
# >$G \to G + P \rightleftharpoons G:P \to G + P + X$
#
# here $G$ is a gene and $P$ is a polymerase and $X$ is the protein expressed by $G$. No translational machinery is included in this model, making it one of the simplest possible for expression. Note that we are ignoring translation for simplicitiy, not becuase it isn't important.
#
# On the top level, BioCRNpyler uses three kinds of objects:
# * __Mechanisms__: are the details of how a physics process is implemented as a CRN. These take the form of black box reaction schemas which compile into a CRN containing all the intermediate steps required to get from a specified input to an output.
# * __Components__: are the ingredients one might imagine adding to a test tube, say from a pipette. They do not include all chemical species involved in a reaction, but just the key ones we might experimentally modulate. Components may contain their own mechanisms or default to those used by a mixture. An example of a component is a piece of DNA encoding a gene. A DNA-Transcription factor complex, on the other hand, would not normally be a component.
# * __Mixtures__: can be thought of as the "reaction soup" we are working in. Mixtures contain default components and mechanisms. Components are added to mixtures to create different reaction conditions.
#
# Internally, BioCRNpyler tells the Mixture to compile all its Components. Each Component contains its own Mechanisms (or defaults to Mechanisms defined in the Mixture) and calls each Mechanism (read: reaction schema) to generate a set of chemical species and reactions which are combined into a complete CRN. BioCRNpyler also has its own internal CRN representation, which we will discuss next.
# ### Chemical Reaction Network (CRN) model
# A CRN is a set of species $S$ and a set of reactions $R$ where each reaction is expressed $I \rightarrow O$ where $I$ are the inputs species, $O$ are the output species. Each reaction occurs with a rate function (propensity) $\rho(x)$ which takes the state of the CRN (the values of all the species) as an input. By default, reactions use massaction rates: $\rho(x) = k \Pi_{s \in I} x_s$ here $k$ is some constant and $x_s$ is the value of the species $s$. A number of built in propensities exist and are described in the documentation, including a general propensity allowing for an arbitrary function.
#
# Internally, BioCRNpyler represents species as strings involving a type identifier and a name: type_name. This is to allow for species to be identified as "gene_X", "mrna_X", etc. Complexes between species can be created automatically using the ComplexSpecies constructor or given custom defined names. By default, a complex of gene_X and protein_Y would be called complex_gene_X_protein_Y. This would be considered different from complex_protein_Y_gene_X in Bioscrape's CRN semantics because species here are effectively strings.
#
# Reactions are stored as lists of species (for the inputs and outputs) and a rate constant k. Non massaction reactions also require a parameter dictionary of their relevant parameter values. Massaction reactions are allowed to be reversible, in which case they are thought of as two irreversible reactions. Reaction rates default to 1.0.
#
# Now, we will create the CRN described above directly and approximate it with a non-massaction propensity.
# +
from biocrnpyler.chemical_reaction_network import Species, Reaction, ComplexSpecies, ChemicalReactionNetwork
#create the three species in the CRN
G = Species(name = "G", material_type = "dna")
P = Species(name = "P", material_type = "protein")
X = Species(name = "X", material_type = "protein")
PG = ComplexSpecies([P, G]) #complex takes a list of species and returns a complex
species = [P, G, X, PG] #a list of species
#Create the reversible reaction: + P <--> G:P
kf = 100 #Forward reaction rate
kr = .01
inputs1 = [G, P]
outputs1 = [PG]
rxn1 = Reaction(inputs1, outputs1, k = kf, k_rev = kr) #type defaults to massaction
#Create the irreversible reaction G:P --> G + P + X
inputs2 = [PG]
outputs2 = [G, P, X]
kexpress = 1.
rxn2 = Reaction(inputs2, outputs2, k = kexpress)
rxns = [rxn1, rxn2] #a list of reactions
CRN = ChemicalReactionNetwork(species, rxns)
#Species, reactions, and CRNs can all be directly printed
print("species representation:\n", species)
print("\nrxns representation:\n", rxns)
print("\nCRN Representation:\n", CRN)
#We will now create a third reaction which models the production of X as a positive hill function of P
inputs3 = [G, P]
outputs3 = [G, P, X]
khill = 10
params_hill = {"K":"K", "n":2, "s1":P} #parmeters can be numbers or strings
rxn3 = Reaction(inputs3, outputs3, k=khill, propensity_type = "hillpositive", propensity_params = params_hill)
CRN2 = ChemicalReactionNetwork(species, [rxn3])
print("\nCRN2:\n",CRN2)
# -
# ### 2. Creating a Custom Mechanism: GeneExpression
# To create custom Mechanism objects, subclass the Mechanism class and rewrite the object constructor, the update_species function, and the update_reactions function. Briefly:
# * In the constructor we will set the name of the mechanism and the name of the polymerase species, rnap.
# * In update_species, we will create a list of all the species used in the reaction schema: the gene, gene-rnap complex, and the product species.
# * In update_reactions we create a list of all the reactions required for our reaction schema: the polymerase binding and unbinding reactions as well as the reaction producing the gene product X.
#
# Note that this code could be generated much faster using the built in MichalisMentenRXN Mechanism, but we will do it by hand here for educational purposes.
# +
from biocrnpyler.mechanism import Mechanism
class GeneExpression(Mechanism):
#Overwrite the constructor.
# Name: the name of the Mechanism (set when it is instantiated).
# rnap: the polymerase, which we will allow to be multiple types of object for user convenience
# type: this is the "kind" of mechanism - used as a key in mechanism dictionaries
def __init__(self, name, rnap, type = "gene_expression", **keywords):
#Check if the rnap type species (see chemical reaction network details below)
if isinstance(rnap, Species):
self.rnap = rnap
#or is type string, in which case create a
elif isinstance(rnap, str):
self.rnap = Species(name = rnap, material_type = "protein")
#someone might make RNAP a component if they want to add it to a mixture, as you might with a T7 polymerase in a cell-free system
elif isinstance(rnap, Component) and rnap.get_species() != None:
self.rnap = rnap.get_species()
else:
raise ValueError("'rnap' parameter must be a string, a Component with defined get_specie(), or a chemical_reaction_network.specie")
#The superclass constructor will take care of the name
Mechanism.__init__(self = self, name = name, mechanism_type = type, **keywords) #MUST CALL THE SUPER CONSTRUCTOR!
#Overwrite update_species:
# dna: the name of the gene to be expressed
# product: the name of the gene product
#update_species returns a list of all species used by the reaction schema
def update_species(self, dna, product):
#We do not need to do a check on the DNA or product types because that will be performed at the Component level.
#Create the list of species to return
species = [dna, self.rnap, product]
#The ComplexSpecies class is chemical_reaction_network.specie complex made up a list of species
species += [ComplexSpecies([dna, self.rnap])]
#Return a list of species
return species
#Overwrite update_species:
# dna: the name of the gene to be expressed
# product: the name of the gene product
# component and part_id are used for the mechanism to find parameters approrpiately
#update_species returns a list of all species used by the reaction schema
#update_reactions will require rates as well as the relevant species. Returns a list of chemical_reaction_network.reaction
def update_reactions(self, dna, product, component, part_id = None):
#Component.get_parameter will automatically search a parameter dictionary for the best parameter to use.
#The string names here, 'kexpress', 'kb', 'ku', must be defined by you to match the parameter data file.
#see parameter loading examples for more information.
kexpress = component.get_parameter("kexpress", part_id = part_id, mechanism = self)
kb = component.get_parameter("kb", part_id = part_id, mechanism = self)
ku = component.get_parameter("ku", part_id = part_id, mechanism = self)
#complex specie
comp = ComplexSpecies([dna, self.rnap])
#Binding Reaction: dna + rnap <--> dna:rnap
binding_rxn = Reaction(inputs=[dna, self.rnap], outputs=[comp], k=kb, k_rev=ku)
#Catalytic Reaction: dna:rnap --> dna + rnap + product
cat_rxn = Reaction(inputs=[comp], outputs=[dna, product, self.rnap], k=kexpress)
#Return a list of reactions
return [binding_rxn, cat_rxn]
# -
# ### 3. Creating a Custom Component: Gene
# To create custom Component objects, subclass the Component class and rewrite constructor, update_species, and update_reactions functions.
# * The Constructor: will set the name of the DNA specie and the name of the protein product
# * update_species: will call each mechanism (in this case just GeneExpression) to get their species
# * update_reactions: will call each mechanism (in this case just GeneExpression) to get their reactions
#
# In general, each component's functions update_species and update_reactions need to know (via you, the programmer) what the names of the mechanisms they are expected to use are. These mechanisms will be automatically inherited from the Mixture object the Component is added to (by default) but can also be overwritten with the mechanisms keyword in the Component constructor.
# +
from biocrnpyler.component import Component
class Gene(Component):
#OVERWRITE CONSTRUCTOR
def __init__(self, dna_name, product, **keywords):
#check types for name and product and set internal variables
if isinstance(dna_name, Species):
self.dna = dna_name
#or is type string, in which case create a
elif isinstance(dna_name, str):
self.dna = Species(name = dna_name, material_type = "dna")
else:
raise ValueError("dna_name must be a string or a chemical_reaction_network.species")
if isinstance(product, Species):
self.product = dna_name
#or is type string, in which case create a
elif isinstance(product, str):
self.product = Species(name = product, material_type = "protein")
else:
raise ValueError("product must be a string or a chemical_reaction_network.species")
Component.__init__(self = self, name = dna_name, **keywords) #MUST CALL THE SUPERCLASS CONSTRUCTOR!
#OVERWRITE update_species
def update_species(self):
#The Component will automatically search for a mechanism called "gene_expression", which it can find in 2 ways
# 1: it can inherit this from its Mixture (which requires the Mixture has an appropriate "gene_expression" mechanism)
# 2: this can be passed into the Gene constructor in a dictionary as a keyword arg mechanisms= {'gene_expression':Mechanism [Object Instance]}
mech_express = self.mechanisms["gene_expression"] #key is the mechanism type
#Return the species from the mechanisms in your mixture. In this case, just one.
return mech_express.update_species(self.dna, self.product)
#OVERWRITE update_reactions
def update_reactions(self):
#get mechanism: key is the mechanism type
mech_express = self.mechanisms["gene_expression"]
#Return the reactions from each mechanism in your mixture. In this case, just this one.
return mech_express.update_reactions(self.dna, self.product, component = self, part_id = self.name)
# -
# ### 4. Creating a Custom Mixture: ExpressionMixture
# To create custom Mixture objects, subclass the Mixture class and rewrite the object constructor function to contain the appropriate default mechanisms and components. All other functionalities will be inherited from the Mixture super class.
# +
#ExpressionMixture
from biocrnpyler import Mixture
class ExpressionMixture(Mixture):
#OVERWRITE THIS METHOD
def __init__(self, name="", rnap = "RNAP", **keywords):
#Check if the rnap type species (see chemical reaction network details below)
if isinstance(rnap, Species):
self.rnap = rnap
#or is type string, in which case create a
elif isinstance(rnap, str):
self.rnap = Species(name = rnap, material_type = "protein")
#someone might make RNAP a component if they want to add it to a mixture, as you might with a T7 polymerase in a cell-free system
elif isinstance(rnap, Component) and rnap.get_species() != None:
self.rnap = rnap.get_species()
else:
raise ValueError("'rnap' parameter must be a string, a Component with defined get_species(), or a chemical_reaction_network.species")
#Create an instance of the GeneExpression mechanism
mech_express = GeneExpression("default_gene_expression", self.rnap)
#Create default mechanism dict
default_mechanisms = {
mech_express.mechanism_type:mech_express
}
#Create default components
# default_components = [self.rnap]
species = [self.rnap]
#MUST CALL THE SUPERCLASS CONSTRUCTOR!
Mixture.__init__(self, name = name, default_mechanisms=default_mechanisms, **keywords)
# -
# ### 5. Combine everything and compile a CRN and print it.
# +
#Create a fake parameter dictionary for the example
parameters = {("default_gene_expression","Reporter", "kexpress"):1.0,
("default_gene_expression","Reporter", "ku"):.01,
("default_gene_expression","Reporter", "kb"):100.0 }
#Instantiate a gene
G1 = Gene("Reporter", "GFP", parameters = parameters)
myMixture = ExpressionMixture(components = [G1])
CRN = myMixture.compile_crn()
#Print the CRN
print(CRN)
# -
| examples/Developer Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df=pd.read_csv('TreeData.csv')
df.head(22)
# +
# Preprocessing :
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from itertools import product
from sklearn.preprocessing import StandardScaler
# Classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn import tree
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
# +
X = df.drop(['Label'], axis = 1).values
Y = df['Label']
X = StandardScaler().fit_transform(X)
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.30, random_state = 101)
# -
trainedmodel = LogisticRegression().fit(X_Train,Y_Train)
predictions =trainedmodel.predict(X_Test)
print(confusion_matrix(Y_Test,predictions))
print(classification_report(Y_Test,predictions))
# +
# trainedforest = RandomForestClassifier().fit(X_Train,Y_Train)
# predictionforest = trainedforest.predict(X_Test)
# print(confusion_matrix(Y_Test,predictionforest))
# print(classification_report(Y_Test,predictionforest))
# -
trainedsvm = svm.LinearSVC().fit(X_Train, Y_Train)
predictionsvm = trainedsvm.predict(X_Test)
print(confusion_matrix(Y_Test,predictionsvm))
print(classification_report(Y_Test,predictionsvm))
trainedtree = tree.DecisionTreeClassifier().fit(X_Train, Y_Train)
predictionstree = trainedtree.predict(X_Test)
print(confusion_matrix(Y_Test,predictionstree))
print(classification_report(Y_Test,predictionstree))
# predictionstree = trainedtree.predict_proba(X_Test)
# print(predictionstree)
trainedlda = LinearDiscriminantAnalysis().fit(X_Train, Y_Train)
predictionlda = trainedlda.predict(X_Test)
print(confusion_matrix(Y_Test,predictionlda))
print(classification_report(Y_Test,predictionlda))
trainednb = GaussianNB().fit(X_Train, Y_Train)
predictionnb = trainednb.predict(X_Test)
print(confusion_matrix(Y_Test,predictionnb))
print(classification_report(Y_Test,predictionnb))
# +
pca = PCA(n_components=2,svd_solver='full')
X_pca = pca.fit_transform(X)
# print(pca.explained_variance_)
X_reduced, X_test_reduced, Y_Train, Y_Test = train_test_split(X_pca, Y, test_size = 0.30, random_state = 101)
# pca = PCA(n_components=2,svd_solver='full')
# X_reduced = pca.fit_transform(X_Train)
#X_reduced = TSNE(n_components=2).fit_transform(X_Train, Y_Train)
trainednb = GaussianNB().fit(X_reduced, Y_Train)
trainedtree = tree.DecisionTreeClassifier().fit(X_reduced, Y_Train)
trainedforest = RandomForestClassifier(n_estimators=700).fit(X_reduced,Y_Train)
trainedmodel = LogisticRegression().fit(X_reduced,Y_Train)
# pca = PCA(n_components=2,svd_solver='full')
# X_test_reduced = pca.fit_transform(X_Test)
#X_test_reduced = TSNE(n_components=2).fit_transform(X_Test, Y_Test)
print('Naive Bayes')
predictionnb = trainednb.predict(X_test_reduced)
print(confusion_matrix(Y_Test,predictionnb))
print(classification_report(Y_Test,predictionnb))
print('Decision Tree')
predictionstree = trainedtree.predict(X_test_reduced)
print(confusion_matrix(Y_Test,predictionstree))
print(classification_report(Y_Test,predictionstree))
print('Random Forest')
predictionforest = trainedforest.predict(X_test_reduced)
print(confusion_matrix(Y_Test,predictionforest))
print(classification_report(Y_Test,predictionforest))
print('Logistic Regression')
predictions =trainedmodel.predict(X_test_reduced)
print(confusion_matrix(Y_Test,predictions))
print(classification_report(Y_Test,predictions))
# +
# Thanks to: https://scikit-learn.org/stable/auto_examples/ensemble/plot_voting_decision_regions.html
# Plotting decision regions
reduced_data = X_reduced
trainednb = GaussianNB().fit(reduced_data, Y_Train)
trainedtree = tree.DecisionTreeClassifier().fit(X_reduced, Y_Train)
trainedforest = RandomForestClassifier(n_estimators=700).fit(reduced_data,Y_Train)
trainedmodel = LogisticRegression().fit(reduced_data,Y_Train)
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[trainednb, trainedtree, trainedforest, trainedmodel],
['Naive Bayes Classifier', 'Decision Tree',
'Random Forest', 'Logistic Regression']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z,cmap=plt.cm.coolwarm, alpha=0.4)
axarr[idx[0], idx[1]].scatter(reduced_data[:, 0], reduced_data[:, 1], c=Y_Train,
s=20, edgecolor='k')
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| AI/Tree Data/ML-TreeData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# A comparison between LR and NWAY methods
# ======================================
#
# We crossmatch XXL-North X-ray sources with SDSS optical sources, both selected within a 1 deg radius circle, using the LR and NWAY methods, and compare the results.
# Definition of the catalogues and set crossmatch:
# +
from astropy import units as u
from astropy.coordinates import SkyCoord
from mocpy import MOC
from astromatch import Catalogue, Match
# Common MOC
region_center = SkyCoord(ra=35.30109, dec=-4.45962, unit='deg')
search_radius = 1*u.deg
moc_xxl = MOC.from_elliptical_cone(
lon=region_center.ra,
lat=region_center.dec,
a=search_radius,
b=search_radius,
pa=0*u.deg,
max_depth=14
)
# X-rays
xcat = Catalogue(
'xxl_1deg.fits',
name='xxl',
id_col='Xseq',
coord_cols=['RABdeg', 'DEBdeg'],
poserr_cols=['e_Bpos'],
poserr_type='circle',
area=moc_xxl,
)
# Optical
ocat = Catalogue(
'sdss_1deg.fits',
name='sdss',
id_col='objID',
coord_cols=['RA_ICRS', 'DE_ICRS'],
poserr_cols=['e_RA_ICRS', 'e_DE_ICRS'],
poserr_type='rcd_dec_ellipse',
area=moc_xxl,
mag_cols=['umag', 'gmag', 'rmag', 'imag', 'zmag'],
)
xm = Match(xcat, ocat)
# -
# Cross-matching using LR method:
results_lr = xm.run(method='lr', radius=10.0*u.arcsec)
matchs_lr_primary = xm.get_matchs(match_type='primary')
# Save calculated magnitude priors for later use:
prior_table = xm.priors.to_table(include_bkg_priors=True)
prior_table.write("lrpriors.fits", format="fits", overwrite=True)
# Cross-matching using NWAY (default magnitude priors):
results_nway_dfprior = xm.run(method='nway', radius=10.0*u.arcsec, use_mags=True, prior_completeness=0.55)
matchs_nway_dfprior_primary = xm.get_matchs(match_type='primary')
# Cross-matching using NWAY (LR magnitude priors):
# +
from astromatch.priors import Prior
priors = {'sdss': Prior.from_table("lrpriors.fits", ['umag', 'gmag', 'rmag', 'imag', 'zmag'])}
results_nway_lrprior = xm.run(
method='nway', radius=10.0*u.arcsec, prior_completeness=0.55, use_mags=True, priors=priors
)
matchs_nway_lrprior_primary = xm.get_matchs(match_type='primary')
# -
# Comparison of results
# -------------------------------
#
# Identification rate:
# +
lr_id_rate = 100 * len(matchs_lr_primary) / len(xcat)
print(f"LR identification rate :{lr_id_rate:.1f}%")
lr_nway_dfprior_rate = 100 * len(matchs_nway_dfprior_primary) / len(xcat)
print(f"NWAY (default prior) identification rate :{lr_nway_dfprior_rate:.1f}%")
lr_nway_lrprior_rate = 100 * len(matchs_nway_lrprior_primary) / len(xcat)
print(f"NWAY (LR prior) identification rate :{lr_nway_lrprior_rate:.1f}%")
# -
# prob_has_match distributions:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(9,7))
_, bins, _ = plt.hist(matchs_lr_primary["prob_has_match"], label="LR",
bins="auto")
plt.hist(matchs_nway_dfprior_primary["prob_has_match"], label="NWAY (default prior)",
histtype='step', lw=5, ls=":", bins=bins)
plt.hist(matchs_nway_lrprior_primary["prob_has_match"], label="NWAY (LR prior)",
histtype='step', lw=5, ls="--", bins=bins)
plt.legend()
plt.xlim(0, 1)
plt.xlabel("prob_has_match")
plt.show()
# +
import numpy as np
from astropy.table import join
matchs_lr_primary_ids = matchs_lr_primary[["SRCID_xxl", "SRCID_sdss"]]
matchs_nway_dfprior_primary_ids = matchs_nway_dfprior_primary[["SRCID_xxl", "SRCID_sdss"]]
matchs_nway_lrprior_primary_ids = matchs_nway_lrprior_primary[["SRCID_xxl", "SRCID_sdss"]]
lr_nway_dfprior = join(matchs_lr_primary_ids, matchs_nway_dfprior_primary_ids,
join_type="left", keys=["SRCID_xxl"], table_names=["lr", "nway"])
equal_ids = len(np.where(lr_nway_dfprior["SRCID_sdss_lr"] == lr_nway_dfprior["SRCID_sdss_nway"])[0])
percent_equal_ids = 100 * equal_ids / len(lr_nway_dfprior)
print(f"LR and NWAY (default prior) get the same counterpart for {percent_equal_ids:.1f}% of the X-ray sources.")
lr_nway_lrprior = join(matchs_lr_primary_ids, matchs_nway_lrprior_primary_ids,
join_type="left", keys=["SRCID_xxl"], table_names=["lr", "nway"])
equal_ids = len(np.where(lr_nway_lrprior["SRCID_sdss_lr"] == lr_nway_lrprior["SRCID_sdss_nway"])[0])
percent_equal_ids = 100 * equal_ids / len(lr_nway_lrprior)
print(f"LR and NWAY (LR prior) get the same counterpart for {percent_equal_ids:.1f}% of the X-ray sources.")
| docs/astromatch/comparison_lr_nway.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
import tempfile
from fastai2.basics import *
from fastai2.learner import Callback
from nbdev.showdoc import *
# +
#all_slow
# +
#default_exp callback.captum
# -
# # Captum
# Captum is the Model Interpretation Library from PyTorch as available [here](https://captum.ai)
#
# To use this we need to install the package using
#
# `conda install captum -c pytorch`
#
# or
#
# `pip install captum`
#
# This is a Call back to use Captum.
# +
# export
# Dirty hack as json_clean doesn't support CategoryMap type
from ipykernel import jsonutil
_json_clean=jsonutil.json_clean
def json_clean(o):
o = list(o.items) if isinstance(o,CategoryMap) else o
return _json_clean(o)
jsonutil.json_clean = json_clean
# +
#export
from captum.attr import IntegratedGradients
from captum.attr import visualization as viz
from matplotlib.colors import LinearSegmentedColormap
from captum.insights import AttributionVisualizer, Batch
from captum.insights.features import ImageFeature
# -
#export
class IntegradedGradientsCallback(Callback):
"Captum Callback for Resnet Interpretation"
def __init__(self):
pass
def after_fit(self):
self.integrated_gradients = IntegratedGradients(self.model)
def visualize(self, inp_data, n_steps=200, cmap_name='custom blue', colors=None, N=256,
methods=None, signs=None, outlier_perc=1):
if methods is None: methods=['original_image','heat_map']
if signs is None: signs=["all", "positive"]
dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=1)
self.enc_inp,self.enc_preds= dl.one_batch()
dec_data=dl.decode((self.enc_inp,self.enc_preds))
self.dec_img,self.dec_pred=dec_data[0][0],dec_data[1][0]
self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if colors is None else colors
self.attributions_ig = self.integrated_gradients.attribute(self.enc_inp.to(self.dl.device), target=self.enc_preds, n_steps=200)
default_cmap = LinearSegmentedColormap.from_list(cmap_name,
self.colors, N=N)
_ = viz.visualize_image_attr_multiple(np.transpose(self.attributions_ig.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(self.dec_img.numpy(), (1,2,0)),
methods=methods,
cmap=default_cmap,
show_colorbar=True,
signs=signs,
outlier_perc=outlier_perc, titles=[f'Original Image - ({self.dec_pred})', 'IG'])
# +
from fastai2.vision.all import *
path = untar_data(URLs.PETS)/'images'
def is_cat(x): return x[0].isupper()
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2, seed=42,
label_func=is_cat, item_tfms=Resize(128))
learn = cnn_learner(dls, resnet34, metrics=error_rate,cbs=IntegradedGradientsCallback())
learn.fine_tune(1)
# +
paths=list(path.iterdir())
learn.integraded_gradients.visualize(paths,n_steps=1000)
# -
#export
class CaptumInsightsCallback(Callback):
"Captum Insights Callback for Image Interpretation"
def __init__(self): pass
def _formatted_data_iter(self, dl, normalize_func):
dl_iter=iter(dl)
while True:
images,labels=next(dl_iter)
images=normalize_func.decode(images).to(dl.device)
yield Batch(inputs=images, labels=labels)
def visualize(self, inp_data, debug=True):
_baseline_func= lambda o: o*0
_get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
dl = self.dls.test_dl(L(inp_data),with_labels=True, bs=4)
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
visualizer = AttributionVisualizer(
models=[self.model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=_get_vocab(dl.vocab),
features=[
ImageFeature(
"Image",
baseline_transforms=[_baseline_func],
input_transforms=[normalize_func],
)
],
dataset=self._formatted_data_iter(dl,normalize_func)
)
visualizer.render(debug=debug)
# +
from fastai2.vision.all import *
path = untar_data(URLs.PETS)/'images'
def is_cat(x): return x[0].isupper()
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2, seed=42,
label_func=is_cat, item_tfms=Resize(128))
learn = cnn_learner(dls, resnet34, metrics=error_rate,cbs=CaptumInsightsCallback())
learn.fine_tune(1)
# +
paths=list(path.iterdir())
learn.captum_insights.visualize(paths)
| nbs/73_callback.captum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Supplementary Materials
# This code accompanies the paper *Asymptotic Convergence of Gradient Descent for Linear Regression Least Squares Optimization* (Lipshitz, 2017)
#
# ## Initialization
from pylab import *
from numpy import random as random
random.seed(1)
N=1000.
w = array([14., 30.]);
x = zeros((2, int(N))).astype(float32)
x[0,:] = arange(N).astype(float32)
x[1,:] = 1
y = w.dot(x) + random.normal(size=int(N), scale=100.)
# ## Defining Regression
# +
yh = lambda xs, ws: \
ws.dot(xs)
grad = lambda ys, yhs, xs: \
(1./xs.shape[1])*sum((yhs-ys)*xs).astype(float32)
delta = lambda gs, a: \
a*gs
def regress(y, x, alpha, T=1000, wh=None, **kwargs):
wh = random.normal(2, size=2)
whs = zeros((T, 2))
whs[0,:] = wh
for i in xrange(1,T):
wh+=delta(grad(y,yh(x,wh), x), alpha)
whs[i,:] = wh.copy()
return wh, whs
# +
def regrSample(y, x, alpha, T=1000, N=10, **kwargs):
out = map(
lambda a: \
regress(y,x, alpha, T=T), xrange(N)
)
trains = array([o[1] for o in out])
wDist = array([o[0] for o in out])
return wDist, trains
def statsRegr(*args, **kwargs):
wDist, trains = regrSample(*args, **kwargs)
return np.mean(trains, axis=0), np.std(trains, axis=0)
# -
# ## Running Regression above and Below the Upper Bound on $\alpha$
# The theoretically derived bounds on $\alpha$ are $$\alpha \in \left( -2\frac{N}{|\mathbf{x}|^2}, 0 \right]$$
#
# Other $\alpha$ values diverge
# +
def plotDynamicsForAlpha(alpha, axTitle, T=1000, N=10):
t = np.arange(T)
mu, sig = statsRegr(y, x, alpha, T=T, N=N)
plot(mu[:,0], 'r:', label='$w_1$')
plot(mu[:,1], 'b:', label='$w_2$')
fill_between(t, \
mu[:,0]+sig[:,0], \
mu[:,0]-sig[:,0], \
facecolor='red', alpha=0.5)
fill_between(t,\
mu[:,1]+sig[:,1], \
mu[:,1]-sig[:,1], \
facecolor='blue', alpha=0.5)
xlabel("t [Iterations]", fontdict={'fontsize':fs*.8})
yl = ylabel("$w_{i,t}$",fontdict={'fontsize':fs*.8})
yl.set_rotation('horizontal')
title(axTitle, fontdict={'fontsize':fs})
tight_layout()
return mu, sig
# -
alphaData = [
("$a=2$", 2),
("$a=0$",0.),
("$a=-0.5N/x^2$",-0.5*N/linalg.norm(x[0,:])**2),
("$a=-N/x^2$", -N/linalg.norm(x[0,:])**2),
("$a=-1.3N/x^2$", -1.3*N/linalg.norm(x[0,:])**2),
("$a=-1.6N/x^2$", -1.6*N/linalg.norm(x[0,:])**2),
("$a=-1.99N/x^2$", -1.99*N/linalg.norm(x[0,:])**2),
("$a=-2N/x^2$", -2*N/linalg.norm(x[0,:])**2)
]
# +
# %matplotlib inline
from scipy.stats import norm
import seaborn as sns
fs = 15
figure(figsize=(10,3*len(alphaData)))
outs = []
for i, d in enumerate(alphaData):
k, v = d
# subplot(len(alphaData),1, i+1)
figure(figsize=(10,3))
outs.append(plotDynamicsForAlpha(v, k, T=150 ))
tight_layout()
# suptitle("Dynamical Learning Trajectories for Significant Alpha Values", y=1.08, fontdict={'fontsize':20});
# +
for i, axtitle in enumerate(alphaData):
axtitle, axnum = axtitle
mu, sig = outs[i]
figure(figsize=(10,3))
if np.sum(np.isnan(mu)) > 0:
k=2
idx0=argwhere(~np.isnan(mu[:,0]))[-1]-1
idx1=argwhere(~np.isnan(sig[:,0]))[-1]-1
idx = min(idx0, idx1)
xmin = max(mu[idx,0]-k*sig[idx,0], mu[idx,0]-k*sig[idx,0])
xmax = min(mu[idx,0]+k*sig[idx,0], mu[idx,0]+k*sig[idx,0])
x_axis = np.linspace(xmin,xmax, num=300);
else:
xmin = max(mu[-1,0]-3*sig[-1,0], mu[-1,0]-3*sig[-1,0])
xmax = min(mu[-1,0]+3*sig[-1,0], mu[-1,0]+3*sig[-1,0])
x_axis = np.linspace(xmin,xmax, num=300);
plt.plot(x_axis, norm.pdf(x_axis,mu[-1,0],sig[-1,0]),'r:');
plt.plot(x_axis, norm.pdf(x_axis,mu[-1,1],sig[-1,1]), 'b:');
xlim(xmin = xmin, xmax=xmax)
p, v = yticks()
plt.yticks(p,map(lambda w: round(w, 2),linspace(0, 1, num=len(p))))
title(axtitle)
tight_layout()
# -
x.shape
# +
figure(figsize=(10,10))
subplot(2,1,1)
title("Closed From Expression", fontdict={'fontsize':10})
T = 30
w0 = random.normal(2, size=2)
t = np.arange(T)
a = -2.1*N/linalg.norm(x[0,:])**2
beta2 = (1/N)*a*x[0,:].dot(x[0,:])
beta1 = -(1/N)*a*x[0,:].dot(y)
ws = w0[0]*(beta2+1)**t - beta1*(1-(beta2+1)**t)/beta2
# ws = w0[0]*(-1)**t + ((-1)**t -1)*x[0,:].dot(y)/linalg.norm(x[0,:])**2
plot(ws)
subplot(2,1,2)
title("Simulation", fontdict={'fontsize':10})
wh = w0
whs = zeros((T, 2))
whs[0,:] = wh
for i in xrange(1,T):
wh+=delta(grad(y,yh(x,wh), x), a)
whs[i,:] = wh.copy()
plot(whs[:,0])
suptitle(("Asymptotic Behavior "
"of Closed form and Simulated Learning: $a = -2.1N/x^2$"), fontdict={"fontsize":20})
# -
# ## $\alpha = \sup A$
t = arange(0,10)
ws = (0**t)*(w0[0]+x[0,:].dot(y)/linalg.norm(x[0,:])**2) + x[0,:].dot(y)/linalg.norm(x[0,:])**2
figure()
ax = subplot(111)
ax.set_title("alpha = sup A")
ax.plot(ws)
t = arange(0,10)
ws = ((-1)**t)*w0[0] - (x[0,:].dot(y)/linalg.norm(x[0,:])**2) + (-2)**t*x[0,:].dot(y)/linalg.norm(x[0,:])**2
figure()
ax = subplot(111)
ax.set_title("alpha = sup A")
ax.plot(ws)
| _notebooks/.ipynb_checkpoints/Asymptotic Convergence of Gradient Descent for Linear Regression Least Squares Optimization-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convert pt3 files to Photon-HDF5
#
#
# # Prepare the data file
#
#
# Before starting, you need to get a data file to be converted to Photon-HDF5.
# You can use one of our example data files available
# [on figshare](https://figshare.com/articles/data_files_for_phconvert/5421565).
#
# Specify the input data file in the following cell:
filename = 'data/DNA_FRET_0.5nM.pt3'
import os
try:
with open(filename): pass
print('Data file found, you can proceed.')
except IOError:
print('ATTENTION: Data file not found, please check the filename.\n'
' (current value "%s")' % filename)
# %matplotlib inline
import numpy as np
import phconvert as phc
print('phconvert version: ' + phc.__version__)
# ## Load Data
d, meta = phc.loader.nsalex_pq(filename,
donor = 2,
acceptor = 1,
alex_period_donor = (0, 2000),
alex_period_acceptor = (2000, 3200),
excitation_wavelengths = (470e-9, 635e-9),
detection_wavelengths = (525e-9, 690e-9),
)
meta['hardware_name']
# +
detectors = d['photon_data']['detectors']
print("Detector Counts")
print("-------- --------")
for det, count in zip(*np.unique(detectors, return_counts=True)):
print("%8d %8d" % (det, count))
# -
# ## Remove the overflow counts
# +
nanotimes = d['photon_data']['nanotimes']
detectors = d['photon_data']['detectors']
timestamps = d['photon_data']['timestamps']
not_overflow = d['photon_data']['nanotimes'] != 0
detectors = detectors[not_overflow]
timestamps = timestamps[not_overflow]
nanotimes = nanotimes[not_overflow]
# -
print("Detector Counts")
print("-------- --------")
for det, count in zip(*np.unique(detectors, return_counts=True)):
print("%8d %8d" % (det, count))
d['photon_data']['nanotimes'] = nanotimes
d['photon_data']['detectors'] = detectors
d['photon_data']['timestamps'] = timestamps
phc.plotter.alternation_hist(d)
# ## Metadata
author = 'Biswajit'
author_affiliation = 'Leiden University'
description = 'A demonstrative pt3 data file.'
sample_name = 'Copper Azurin in 1mM Ascorbate'
dye_names = 'ATTO655'
buffer_name = 'HEPES pH7 with 100 mM NaCl'
# ### Add meta data
# +
d['description'] = description
d['sample'] = dict(
sample_name=sample_name,
dye_names=dye_names,
buffer_name=buffer_name,
num_dyes = len(dye_names.split(',')))
d['identity'] = dict(
author=author,
author_affiliation=author_affiliation)
# -
# Remove some empty groups that may cause errors on saving
_ = meta.pop('dispcurve', None)
_ = meta.pop('imghdr', None)
d['user'] = {'picoquant': meta}
# ## Save to Photon-HDF5
phc.hdf5.save_photon_hdf5(d, overwrite=True)
# ## Load Photon-HDF5
from pprint import pprint
filename = d['_data_file'].filename
h5data = phc.hdf5.load_photon_hdf5(filename)
phc.hdf5.dict_from_group(h5data.identity)
phc.hdf5.dict_from_group(h5data.setup)
pprint(phc.hdf5.dict_from_group(h5data.photon_data))
h5data._v_file.close()
| notebooks/Convert ns-ALEX PT3 files to Photon-HDF5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WDbhGHtG8jFE"
# # Part 3: Build an Embeddings index from a data source
#
# In Part 1, we gave a general overview of txtai, the backing technology and examples of how to use it for similarity searches. Part 2 covered how to use txtai for extractive question-answer systems.
#
# The previous examples worked on data stored in memory for demo purposes. For real world large-scale use cases, data is usually stored in a database (Elasticsearch, SQL, MongoDB, files, etc). This example covers reading data from SQLite, building a Embedding index backed by word embeddings and running queries against the generated Embeddings index.
#
# This example covers functionality found in the [paperai](https://github.com/neuml/paperai) library. See that library for a full solution that can be used with the dataset discussed below.
# + [markdown] id="UQ0fCwXn9bcH"
# # Install dependencies
#
# Install txtai and all dependencies
# + id="czPYSA2Q9ZHO"
# %%capture
# !pip install git+https://github.com/neuml/txtai
# + [markdown] id="SN9SCZKQ9fJF"
# # Download data
#
# This example is going to work off a subset of the [CORD-19](https://www.semanticscholar.org/cord19) dataset. COVID-19 Open Research Dataset (CORD-19) is a free resource of scholarly articles, aggregated by a coalition of leading research groups, covering COVID-19 and the coronavirus family of viruses.
#
# The following download is SQLite database with a subject of CORD-19, generated from a [Kaggle notebook](https://www.kaggle.com/davidmezzetti/cord-19-slim/output). More information on this data format, can be found in the [CORD-19 Analysis](https://www.kaggle.com/davidmezzetti/cord-19-analysis-with-sentence-embeddings) notebook.
# + id="TONQ4_Kv9dtd" colab={"base_uri": "https://localhost:8080/"} outputId="f0c7ee46-f427-458e-f3d3-913cd13bd473"
# !wget https://github.com/neuml/txtai/releases/download/v1.1.0/tests.gz
# !gunzip tests.gz
# !mv tests articles.sqlite
# + [markdown] id="bzdaJiZYBIHE"
# # Build Word Vectors
#
# This example will build a search system backed by word embeddings. While note quite as powerful as transformer embeddings, they often provide a good tradeoff of performance to functionality for an embedding based search system.
#
# For this notebook, we'll build our own custom embeddings for demo purposes. A number of pre-trained word embedding models are available:
#
# - [General language models from pymagnitude](https://github.com/plasticityai/magnitude)
# - [CORD-19 fastText](https://www.kaggle.com/davidmezzetti/cord19-fasttext-vectors)
# + id="fJcn-CAH-u3K" colab={"base_uri": "https://localhost:8080/"} outputId="af389b28-4016-4d1b-8ade-643c1b6ef8dd"
import os
import sqlite3
import tempfile
from txtai.tokenizer import Tokenizer
from txtai.vectors import WordVectors
print("Streaming tokens to temporary file")
# Stream tokens to temp working file
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as output:
# Save file path
tokens = output.name
db = sqlite3.connect("articles.sqlite")
cur = db.cursor()
cur.execute("SELECT Text from sections")
for row in cur:
output.write(" ".join(Tokenizer.tokenize(row[0])) + "\n")
# Free database resources
db.close()
# Build word vectors model - 300 dimensions, 3 min occurrences
WordVectors.build(tokens, 300, 3, "cord19-300d")
# Remove temporary tokens file
os.remove(tokens)
# Show files
# !ls -l
# + [markdown] id="_UxcC1-JGH-d"
# # Build an embeddings index
#
# The following steps builds an embeddings index using the word vector model just created. This model builds a BM25 + fastText index. BM25 is used to build a weighted average of the word embeddings for a section. More information on this method can be found in this [Medium article](https://towardsdatascience.com/building-a-sentence-embedding-index-with-fasttext-and-bm25-f07e7148d240?gi=79da927aa10).
# + id="5PrrxGRPGHqX" colab={"base_uri": "https://localhost:8080/"} outputId="dae51d29-676f-41b4-c9a0-0e2b7e837405"
import sqlite3
import regex as re
from txtai.embeddings import Embeddings
from txtai.tokenizer import Tokenizer
def stream():
# Connection to database file
db = sqlite3.connect("articles.sqlite")
cur = db.cursor()
# Select tagged sentences without a NLP label. NLP labels are set for non-informative sentences.
cur.execute("SELECT Id, Name, Text FROM sections WHERE (labels is null or labels NOT IN ('FRAGMENT', 'QUESTION')) AND tags is not null")
count = 0
for row in cur:
# Unpack row
uid, name, text = row
# Only process certain document sections
if not name or not re.search(r"background|(?<!.*?results.*?)discussion|introduction|reference", name.lower()):
# Tokenize text
tokens = Tokenizer.tokenize(text)
document = (uid, tokens, None)
count += 1
if count % 1000 == 0:
print("Streamed %d documents" % (count), end="\r")
# Skip documents with no tokens parsed
if tokens:
yield document
print("Iterated over %d total rows" % (count))
# Free database resources
db.close()
# BM25 + fastText vectors
embeddings = Embeddings({"path": "cord19-300d.magnitude",
"scoring": "bm25",
"pca": 3})
# Build scoring index if scoring method provided
if embeddings.config.get("scoring"):
embeddings.score(stream())
# Build embeddings index
embeddings.index(stream())
# + [markdown] id="zHk24su3e_gb"
# # Query data
#
# The following runs a query against the embeddings index for the terms "risk factors". It finds the top 5 matches and returns the corresponding documents associated with each match.
# + id="CRbDhvvDKEl-" colab={"base_uri": "https://localhost:8080/", "height": 293} outputId="b982c2b4-97fd-4d22-b275-78eeba3ba04e"
import pandas as pd
from IPython.display import display, HTML
pd.set_option("display.max_colwidth", None)
db = sqlite3.connect("articles.sqlite")
cur = db.cursor()
results = []
for uid, score in embeddings.search("risk factors", 5):
cur.execute("SELECT article, text FROM sections WHERE id = ?", [uid])
uid, text = cur.fetchone()
cur.execute("SELECT Title, Published, Reference from articles where id = ?", [uid])
results.append(cur.fetchone() + (text,))
# Free database resources
db.close()
df = pd.DataFrame(results, columns=["Title", "Published", "Reference", "Match"])
# It has been reported that displaying HTML within VSCode doesn't work.
# When using VSCode, the data can be exported to an external HTML file to view.
# See example below.
# htmlData = df.to_html(index=False)
# with open("data.html", "w") as file:
# file.write(htmlData)
display(HTML(df.to_html(index=False)))
# + [markdown] id="XSf68I-ZfXOG"
# # Extracting additional columns from query results
#
# The example above uses the Embeddings index to find the top 5 best matches. In addition to this, an Extractor instance is used to ask additional questions over the search results, creating a richer query response.
# + id="TLVOTQJchvTi"
# %%capture
from txtai.extractor import Extractor
# Create extractor instance using qa model designed for the CORD-19 dataset
extractor = Extractor(embeddings, "NeuML/bert-small-cord19qa")
# + id="19fmKawThs6d" colab={"base_uri": "https://localhost:8080/", "height": 293} outputId="550db0d6-2a86-4766-8bf0-14c18f8eb5f3"
db = sqlite3.connect("articles.sqlite")
cur = db.cursor()
results = []
for uid, score in embeddings.search("risk factors", 5):
cur.execute("SELECT article, text FROM sections WHERE id = ?", [uid])
uid, text = cur.fetchone()
# Get list of document text sections to use for the context
cur.execute("SELECT Id, Name, Text FROM sections WHERE (labels is null or labels NOT IN ('FRAGMENT', 'QUESTION')) AND article = ?", [uid])
sections = []
for sid, name, txt in cur.fetchall():
if not name or not re.search(r"background|(?<!.*?results.*?)discussion|introduction|reference", name.lower()):
sections.append((sid, txt))
cur.execute("SELECT Title, Published, Reference from articles where id = ?", [uid])
article = cur.fetchone()
# Use QA extractor to derive additional columns
answers = extractor(sections, [("Risk Factors", "risk factors", "What risk factors?", False),
("Locations", "hospital country", "What locations?", False)])
results.append(article + (text,) + tuple([answer[1] for answer in answers]))
# Free database resources
db.close()
df = pd.DataFrame(results, columns=["Title", "Published", "Reference", "Match", "Risk Factors", "Locations"])
display(HTML(df.to_html(index=False)))
# + [markdown] id="ColTLy--rWfR"
# In the example above, the Embeddings index is used to find the top N results for a given query. On top of that, a question-answer extractor is used to derive additional columns based on a list of questions. In this case, the "Risk Factors" and "Location" columns were pulled from the document text.
# + [markdown] id="KWyoysauy7Pr"
# # Next
# In part 4 of this series, we'll use combine the power of Elasticsearch with Extractive QA to build a large-scale, advanced search system.
#
| examples/03_Build_an_Embeddings_index_from_a_data_source.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="gUCuOKl0Sqnb"
# <img src="https://pandas.pydata.org/static/img/pandas.svg" width="250">
#
# ## <center> Getting Started with Pandas
# + id="-d5w2i8gysmB"
import pandas as pd
# + [markdown] id="0zyp-ohKSqnh"
# ### Installing `pandas`
# + [markdown] id="7375wPMXSqni"
# To install pandas, use the following commands:
#
# `pip install pandas` <br>
# -or- <br>
# `conda install pandas`
# + [markdown] id="STHclt8FSqni"
# ### Download Anaconda
#
# https://www.anaconda.com/products/individual
| Pandas - Advanced Pandas/01.Beginner to Advanced Pandas/01-01-getting_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutoriel numpy pour la création et la manipulation de base des matrices
#
# Commandes de base python notebook
#
# Pour executer une cellule : Shift + Entrée
# + jupyter={"outputs_hidden": false}
# gestion des bibliothèques externes
import numpy as np
print("Import OK") # affichage en fin de boite pour visualiser que l'exécution a bien eu lieue
# -
# ## Créations de matrice par différentes méthodes
# + jupyter={"outputs_hidden": true}
# Création de vecteurs
v1 = np.arange(0, 10, 1) # create a range
# arguments: start, stop, step
v1 = np.arange(0, 10) # with default step=1
v1 = np.arange(10) # default start=0
print(v1)
v2 = np.linspace(0, 10, 15) # avec linspace, le début et la fin SONT inclus
print(v2)
# + jupyter={"outputs_hidden": false}
# Création de matrices
m0 = np.array([[1, 2], [3, 4]]) # matrice
# matrice = vecteur de vecteurs
# création de listes en utilisant des boucles imbriquées:
mp = [[n+m*10 for n in range(5)] for m in range(5)]
# création d'une structure numpy à partir d'une liste ou d'une liste de liste:
mn = np.array([[n+m*10 for n in range(5)] for m in range(5)]) # ou np.array(mp)
print(mn)
m1 = np.ones((10,2)) # matrice de 1, argument = nuplet avec les dimensions
# ATTENTION np.ones(10,2) ne marche pas
m2 = np.zeros((5,4)) # matrice de 0
m3 = np.eye(4) # matrice identité carrée, arg = dimension
print(m3)
m4 = np.random.rand(5,6) # matrice de nombres aléatoires indépendants, args = dimensions
m5 = np.random.randn(5,6) # tirages selon une gaussienne(mu=0,var=1), args = dimensions
print(m5)
# + jupyter={"outputs_hidden": true}
# concaténation de matrices
m6 = np.vstack((np.array([[1, 2], [3, 4]]), np.ones((3,2))))
m7 = np.vstack((np.array([1, 2, 3]), np.hstack((np.ones((3,2)), np.zeros((3,1))))))
print(m6)
# + jupyter={"outputs_hidden": true}
# jouer avec les types des éléments internes aux matrices
# une matrice d'entier
matInt = np.zeros((5,6), int) # matrice 5x6 de 0 (entiers)
matBool = np.zeros((5,6), bool) # matrice 5x6 de False (booléens)
matBool2 = np.ones((5,6), bool) # matrice 5x6 de True (booléens)
# -
# ## Récupération/affectation de valeurs
# + jupyter={"outputs_hidden": true}
# une matrice
mat = np.ones((5,6))
mat[0,0] # récupération de la première valeur
mat[0,:] # récupération de la première ligne
mat[0,0:2] # récupération des valeurs d'indice 0 et 1
# petites astuces supplémentaires
mat[0,1:] # toute la ligne sauf la première case
mat[0,:-1] # toute la ligne sauf la dernière case
mat[0,:-2] # toute la ligne sauf les deux dernières cases
A = np.array([1,2,3,4,5])
A[1:3] # array([2, 3])
# On peut omettre n'importe lequel des argument dans M[start:stop:step]:
A[::] # indices de début, fin, et pas avec leurs valeurs par défaut
# array([ 1, -2, -3, 4, 5])
A[::2] # pas = 2, indices de début et de fin par défaut
# array([ 1, -3, 5])
A[:3] # les trois premiers éléments (indices 0,1,2)
# array([ 1, -2, -3])
A[3:] # à partir de l'indice 3
# array([4, 5])
# On peut utiliser des indices négatifs :
A[-1] # le dernier élément
# 5
A[-3:] # les 3 derniers éléments
# array([3, 4, 5])
# Affectation:
# une matrice d'entier
mat = np.ones((5,6))
mat[0,0:2] = 1 # affectation en bloc
mat[0,0:2] = np.zeros((1,2)) # affectation en bloc d'une autre matrice
# Matrice VS vecteur !!
A = np.random.rand(5,3) # matrice 5x3
B = A[2,:] # extraction de la troisième ligne...
# il s'agit d'un vecteur !!!
B = A[2:3,:] # extraction de la troisième ligne...
# mais il s'agit d'une matrice (transposable) !!!
# -
# ## Tailles des matrices
# + jupyter={"outputs_hidden": true}
# pour une variable:
mat.shape # (5,6)
mat.shape[0] # 5
mat.shape[1] # 6
n, m = mat.shape # retours multiples
# -
# ## Fonctions de base sur les matrices
# Additions, transposées etc...
# + jupyter={"outputs_hidden": true}
ma = np.random.rand(5,6)
# Transposition
mat = ma.T # pour la transposée
mat = ma.transpose(); # ou bien
mat = np.transpose(ma); # ou bien
# la plupart des fonctions numpy acceptent la syntaxe objet et la syntaxe non-objet.
# + jupyter={"outputs_hidden": true}
# Addition / soustraction
v1 = v1+ 3 # ou v1 += 3 % matrice + scalaire
# changement sur les toutes les valeurs de v1
# NB: le - fonctionne pareil
# multiplication :
# ATTENTION à *
m1 = np.ones((10,1)) * np.array([1,2,3]) # Attention, produit matriciel
m2 = np.ones((10,3)) * 2 # multiplication par un scalaire
m3 = m1 * m2; # multiplication terme à terme
# usage de .dot => toujours matriciel
m1 = np.ones((10,1)).dot(np.array([[1,2,3]])) # Bien mieux: moins d'ambiguité!
# + jupyter={"outputs_hidden": false}
# recherche du min dans une matrice
m1.min() # syntaxe objet
np.min(m1) # autre syntaxe
# distinction min/argmin
m1.argmin()
# travail en ligne/colonne
m1 = np.random.rand(3,4)
# array([[ 0.77846102, 0.22597046, 0.217657 , 0.28958186],
# [ 0.02133707, 0.03258567, 0.81939161, 0.2834734 ],
# [ 0.92120271, 0.68409416, 0.24285983, 0.61582659]])
m1.argmin() # 4
m1.argmin(0) # array([1, 1, 0, 1])
m1.argmin(1) # array([2, 0, 2])
# arrondis
np.round(m1)
np.ceil(m1)
np.floor(m1)
# tris
np.sort(m1) # ligne par ligne
np.sort(m1,0) # colonne par colonne
np.sort(m1,1) # ligne par ligne
# statistique de base
m1.mean() # 0.427 -> sur toute la matrice
m1.mean(0) # array([ 0.57366693, 0.31421676, 0.42663615, 0.39629395])
# colonne par colonne
m1.mean(1) # ligne par ligne
# m1.std...
# m1.sum...
# m1.prod...
# m1.cumsum...
# -
# ### Jouons avec les minima
#
# Gestion particulière du minimum: on a souvent besoin de retourner la valeur minimum parmi 2. En C/JAVA/Matlab, cela est réalisé avec min... Pas en python! => minimum
# + jupyter={"outputs_hidden": true}
# entre 2 valeurs
np.minimum(2,3) # 2
# entre 2 matrices
m1 = random.rand(3,4)
m2 = random.rand(3,4)
np.minimum(m1,m2) # matrice 3x4 contenant les valeurs min d'une comparaison terme à terme
# entre une matrice et un scalaire: pour seuiller
np.minimum(m1,0.5)
# array([[ 0.5 , 0.22597046, 0.217657 , 0.28958186],
# [ 0.02133707, 0.03258567, 0.5 , 0.2834734 ],
# [ 0.5 , 0.5 , 0.24285983, 0.5 ]])
# -
# ## boucles avancées (bien pratiques)
# + jupyter={"outputs_hidden": true}
v0 =np.arange(10)
v1 = np.random.rand(10)
for val0, val1 in zip(v0, v1):
print('indice ',val0, ' et valeur associée ', val1)
# note: il était possible d'obtenir le même résultat avec enumerate:
for i, val in enumerate(v1):
print('indice ',i, ' et valeur associée ', val)
# -
# ## Tests en bloc
# Exercice intéressant pour deux raisons
# 1. connaitre cette syntaxe particulière
# 1. comprendre les messages d'erreur lorsqu'on essaie de faire des tests sur une matrice sans ces instructions
# + jupyter={"outputs_hidden": true}
m = np.array([[1, 2], [3, 4]])
if (m>1).all():
print("(1) sup to 1")
else:
print("(1) NOT sup to 1")
if (m>1).any():
print("(2) sup to 1")
else:
print("(2) NOT sup to 1")
# -
# ## Fonctions et vectorisation des fonctions de base
# Il est évidemment possible de définir des fonctions prenant des structures numpy en argument. Mais il est aussi possible de *vectoriser* une fonction qui n'était pas prévue pour fonctionner sur des matrices. Il s'agit d'une nouvelle manière d'éviter les boucles.
# + jupyter={"outputs_hidden": false}
def theta(x): # signature classique
"""
Scalar implemenation of the Heaviside step function.
"""
if x >= 0:
return 1
else:
return 0
theta_vec = np.vectorize(theta) # notation fonctionnelle (fonction sur des fonctions)
res = theta_vec(np.array([-3,-2,-1,0,1,2,3]))
print(res) # [0 0 0 1 1 1 1]
# -
# ## Vérification de l'état de la mémoire
# + jupyter={"outputs_hidden": true}
# dir() => donne aussi les variables d'environnement, il faut filter:
print([s for s in dir() if '_' not in s])
# pour connaitre le type:
print([(s,eval('type({})'.format(s))) for s in dir() if '_' not in s])
# les commandes who et whos sont élégantes mais ne marchent qu'en ipython
# -
# ## Sauvegarde / chargement depuis numpy
# + jupyter={"outputs_hidden": true}
np.savetxt("random-matrix.txt", m5)
# donne le fichier:
# 1.000000000000000000e+00 2.000000000000000000e+00
# 3.000000000000000000e+00 4.000000000000000000e+00
np.savetxt("random-matrix.csv", m5, fmt='%.5f', delimiter=',')
# donne le fichier:
# 1.00000,2.00000
# 3.00000,4.00000
# -
# ## De numpy à python, usage de pickle
#
# loadtxt/savetxt: idéal pour numpy...
# * Chargement/sauvegarde des matrices, format lisible de l'extérieur si besoin
# * Echanges possibles avec d'autres langages: matlab, JAVA...
# ... Mais pour le python en général, on préfère pickle
# * Serialization généralisé: pour les valeurs, les objets (dont les matrices), les listes, les dictionnaires...
# * Très facile à utiliser
# * Utilisé par tout le monde en python... Donc à connaitre
#
# + jupyter={"outputs_hidden": true}
import pickle as pkl # obligatoire pour pouvoir l'utiliser
# sauvegarde d'un dictionnaire
pkl.dump({"m1":m1, "m2":m2}, open("deuxmatrices.pkl","wb"))
# chargement de données
data = pkl.load(open('deuxmatrices.pkl','rb')) # attention à donner un file + option lecture (pas juste un nom de fichier)
print(data['m1']) # accès standard dans les dictionnaires
# -
# # Exercices de synthèse
# ### Génération de données
#
# Nous souhaitons créer une matrice 10x3 dont la première colonne contient les indices 1 à 10 dans l'ordre. La seconde colonne contiendra des nombres aléatoires entre 0 et 1. La troisième colonne ne contiendra que des 0.
# Vous ajouterez ensuite une ligne en haut de la matrice contenant les indices de colonne 1 à 3.
# NB: vous pouvez créer des matrices dans des matrices, c'est-à-dire faire appel à des fonctions dans les [].
#
# Exemple de résultat possible:
#
# 1.00000 2.00000 3.00000
# 1.00000 0.03479 0.00000
# 2.00000 0.66074 0.00000
# 3.00000 0.15187 0.00000
# 4.00000 0.03640 0.00000
# 5.00000 0.62497 0.00000
# 6.00000 0.54774 0.00000
# 7.00000 0.68919 0.00000
# 8.00000 0.86146 0.00000
# 9.00000 0.72030 0.00000
# 10.0000 0.84590 0.00000
#
# + jupyter={"outputs_hidden": true}
# a vous de jouer !
# -
# ### Récupération de données 'réelles'
#
# Soit le jeu de données suivant:
#
# 14.5 8.5
# 15.5 8.5
# 9 14.5
# 9.5 15.5
# 11 9.5
# 3.5 6
# 11.5 11
# 8.5 5.5
# 3 2
# 17 12
# 6 13
# 10 12.5
# 10 4
# 11.5 5.5
# 13.5 8
#
# 1. Copier ces valeurs (notes d'une classe sur deux épreuves) dans un fichier 'college.dat'
# 2. Importer ces valeurs dans une matrice numpy
# + jupyter={"outputs_hidden": true}
# -
# ### Génération de notes
#
# Nous souhaitons générer aléatoirement les notes de la question précédente sachant que:
# * Le nombre d'élèves est n=15
# * Les notes sont tirées selon une loi gaussienne d'écart-type 4.
# * Utiliser la commande np.random.randn pour générer les tirages puis multiplier par l'écart-type.
# * La première épreuve à une moyenne approximative de 10, la seconde, une moyenne de 8 (on veut donc décaler la première colonne de 10 et la seconde de 8)
# * On veut être sûr que les notes sont supérieures ou égales à 0 (utiliser maximum)
# * On veut être sûr que les notes sont inférieures ou égales à 20 (utiliser minimum)
# * On veut des notes entières (utiliser round)
# + jupyter={"outputs_hidden": true}
# -
# ### Traduction de matrice
# * Générer une matrice aléatoire `m` de taille (15,2) contenant des indices aléatoires entre 0 et 3.
# * Construire le dictionnaire `dico` {1:'titi', 2:'toto', ...}
# * La méthode `get` du dictionnaire permet de traduire une valeur de `m`
# * Utiliser la commande `np.vectorize` pour traduire en une ligne et sans boucle toute la matrice `m` en une matrice `mtxt`
# + jupyter={"outputs_hidden": true}
| S1/MAPSI/TME/TME1/Tuto_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lovely-Rita EDA
#
# ### Notebook by [<NAME>](https://github.com/Atomahawk)
# #### [OpenOakland - Code for America](http://openoakland.org/)
#
# Phases:
# - I. Exploratory
# - II. Display information/ pull out learnings
# - III. Create tool for other people to use (Flask)
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Lovely-Rita-EDA" data-toc-modified-id="Lovely-Rita-EDA-1"><span class="toc-item-num">1 </span>Lovely-Rita EDA</a></div><div class="lev3 toc-item"><a href="#Notebook-by-Andrew-Tom" data-toc-modified-id="Notebook-by-Andrew-Tom-101"><span class="toc-item-num">1.0.1 </span>Notebook by <a href="https://github.com/Atomahawk" target="_blank"><NAME></a></a></div><div class="lev2 toc-item"><a href="#The-Specs" data-toc-modified-id="The-Specs-11"><span class="toc-item-num">1.1 </span>The Specs</a></div><div class="lev2 toc-item"><a href="#Required-Libraries-(Boilerplate-documentation)" data-toc-modified-id="Required-Libraries-(Boilerplate-documentation)-12"><span class="toc-item-num">1.2 </span>Required Libraries (Boilerplate documentation)</a></div><div class="lev2 toc-item"><a href="#Step-1:-Problem-Domain-&-Answering-the-question" data-toc-modified-id="Step-1:-Problem-Domain-&-Answering-the-question-13"><span class="toc-item-num">1.3 </span>Step 1: Problem Domain & Answering the question</a></div><div class="lev2 toc-item"><a href="#Step-2:-Checking-the-data" data-toc-modified-id="Step-2:-Checking-the-data-14"><span class="toc-item-num">1.4 </span>Step 2: Checking the data</a></div><div class="lev3 toc-item"><a href="#Filter-columns" data-toc-modified-id="Filter-columns-141"><span class="toc-item-num">1.4.1 </span>Filter columns</a></div><div class="lev2 toc-item"><a href="#Step-3:-Tidying-the-data" data-toc-modified-id="Step-3:-Tidying-the-data-15"><span class="toc-item-num">1.5 </span>Step 3: Tidying the data</a></div><div class="lev3 toc-item"><a href="#Check-for-Nulls" data-toc-modified-id="Check-for-Nulls-151"><span class="toc-item-num">1.5.1 </span>Check for Nulls</a></div><div class="lev3 toc-item"><a href="#Make-fine_amount-Integers" data-toc-modified-id="Make-fine_amount-Integers-152"><span class="toc-item-num">1.5.2 </span>Make <code>fine_amount</code> Integers</a></div><div class="lev3 toc-item"><a href="#[IN-PROGRESS]-Standardize-datetimes" data-toc-modified-id="[IN-PROGRESS]-Standardize-datetimes-153"><span class="toc-item-num">1.5.3 </span>[IN PROGRESS] Standardize datetimes</a></div><div class="lev3 toc-item"><a href="#Export-cleaned-data-to-reference-later" data-toc-modified-id="Export-cleaned-data-to-reference-later-154"><span class="toc-item-num">1.5.4 </span>Export cleaned data to reference later</a></div><div class="lev2 toc-item"><a href="#Step-4:-Exploratory-analysis" data-toc-modified-id="Step-4:-Exploratory-analysis-16"><span class="toc-item-num">1.6 </span>Step 4: Exploratory analysis</a></div><div class="lev3 toc-item"><a href="#Citations-by-Violation" data-toc-modified-id="Citations-by-Violation-161"><span class="toc-item-num">1.6.1 </span>Citations by Violation</a></div><div class="lev3 toc-item"><a href="#Officer-Citations" data-toc-modified-id="Officer-Citations-162"><span class="toc-item-num">1.6.2 </span>Officer Citations</a></div><div class="lev3 toc-item"><a href="#Citations-by-Zone" data-toc-modified-id="Citations-by-Zone-163"><span class="toc-item-num">1.6.3 </span>Citations by Zone</a></div><div class="lev3 toc-item"><a href="#Findings" data-toc-modified-id="Findings-164"><span class="toc-item-num">1.6.4 </span>Findings</a></div><div class="lev1 toc-item"><a href="#TO-DO" data-toc-modified-id="TO-DO-2"><span class="toc-item-num">2 </span>TO DO</a></div><div class="lev2 toc-item"><a href="#[IN-PROGRESS]-Step-5:-Data-Visualization" data-toc-modified-id="[IN-PROGRESS]-Step-5:-Data-Visualization-21"><span class="toc-item-num">2.1 </span>[IN PROGRESS] Step 5: Data Visualization</a></div><div class="lev3 toc-item"><a href="#These-don't-work...yet" data-toc-modified-id="These-don't-work...yet-211"><span class="toc-item-num">2.1.1 </span>These don't work...yet</a></div>
# -
# ## The Specs
#
# [<NAME>](http://sebastianraschka.com/) created a handy [notebook tool](https://github.com/rasbt/watermark) for documenting what software and hardware we used to perform our analysis. The output is below
# %load_ext watermark
# %watermark -a '<NAME>' -nmv --packages numpy,pandas,pandas_profiling,matplotlib,seaborn
# ## Required Libraries (Boilerplate documentation)
#
# If you don't have Python on your computer, you can use the [Anaconda Python distribution](http://continuum.io/downloads) to install most of the Python packages you need. Anaconda provides a simple double-click installer for your convenience.
#
# This notebook uses several Python packages that come standard with the Anaconda Python distribution. The primary libraries that we'll be using are:
#
# * **NumPy**: Provides a fast numerical array structure and helper functions.
# * **pandas**: Provides a DataFrame structure to store data in memory and work with it easily and efficiently.
# * **scikit-learn**: The essential Machine Learning package in Python.
# * **matplotlib**: Basic plotting library in Python; most other Python plotting libraries are built on top of it.
# * **Seaborn**: Advanced statistical plotting library.
#
# To make sure you have all of the packages you need, install them with `conda`:
#
# conda install numpy pandas scikit-learn matplotlib seaborn
#
# `conda` may ask you to update some of them if you don't have the most recent version. Allow it to do so.
# +
import numpy as np
import pandas as pd
import time
import datetime as dt
import matplotlib.pyplot as plt
import pandas_profiling
import seaborn as sns
import pandas
# %matplotlib inline
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
# -
# ## Step 1: Problem Domain & Answering the question
#
# [[ go back to the top ]](#Table-of-Contents)
#
# Here is defined the question or problem we're looking to solve, and an outcome (or set of outcomes) for our success at solving that task.
#
# **Did you specify the type of data analytic question (e.g. exploration, association causality) before touching the data?**
#
# > We're looking to explore Oakland citation data to reveal any insights or findings relevant for stakeholders and the general public.
#
# **Did you define the metric for success before beginning?**
#
# > - Leading measure of success: any products (interactive visualizations, informative website, or infographic) developed and released to the general public to better communicate these results.
# > - Lagging measure of success: high engagement on information products via web traffic, views/shares, or stakeholder interest / meetings.
# > - Lagging measure of success: parking policies or enforcement policies altered as a result of our analysis.
#
# **Did you understand the context for the question and the policy application?**
#
#
# [](https://youtu.be/vbG7eAe8ZOQ "Watch the video from the International Parking Institute")
#
# > Citations are a consequence of parking mismangement:
# > - poor accessibility to urban spaces,
# > - obstructions to public works operations like street sweepers,
# > - and miscommunication between city planners and citizens over competing priorities.
#
# > Citations themselves are a hindrance - both to residents that struggle or fail to pay on time, and to cities that must respect due process and are overwhelmed by the review of hundreds of appealed citations. What results are poor resident and visitor experiences at best, and animosity and contempt at worse. Parking is about access management, and working to minimize citations city-wide is central to the experience of residents and visitors that enjoy shared spaces around the City of Oakland. Minimizing citations is also part of a larger effort to achieve community and economic development goals.
#
# **Did you consider whether the question could be answered with the available data?**
#
# > Initial examination of the 2016 data yielded the following limitations:
# > - Only about 40% of the data is geo-encoded
# ## Step 2: Checking the data
#
# [[ go back to the top ]](#Table-of-Contents)
#
# The next step is to look at the data we're working with. Even curated data sets from the government can have errors in them, and it's vital that we spot these errors before investing too much time in our analysis.
#
# Generally, we're looking to answer the following questions:
#
# * Is there anything wrong with the data?
# * Are there any quirks with the data?
# * Do I need to fix or remove any of the data?
#
# Let's start by reading the data into a pandas DataFrame.
filename = '/Users/eastblue/ds/CivicTech/OpenOakland/Archive/2016complete-output.csv'
def download(file):
start = time.time()
print(start)
# edited for NA values, and datetime objects
df = pd.read_csv(file, parse_dates=['Ticket Issue Date', 'Ticket Issue Time'], na_values=['0', 'Unknown'], infer_datetime_format=True, low_memory=False)
stop = time.time()
print(stop)
print('elapsed: ', (stop - start), 'seconds')
return df
# %%time
df = download(filename)
df[:3]
# standardize column names
df.columns = df.columns.str.replace('[', '')
df.columns = df.columns.str.replace(']', '')
df.columns = df.columns.str.replace(' ', '_')
# 68 original columns
# 10 are empty
#
df.info()
# ### Filter columns
df.tail(3)
# +
# drop 10 empty columns
df.drop(['Unnamed:_13', 'addressee', 'delivery_line_2', 'ews_match', 'suitelink_match', 'urbanization', 'extra_secondary_number', 'extra_secondary_designator', 'pmb_designator', 'pmb_number'], axis = 1, inplace=True)
#drop empty last row
df.drop([326625], axis=0, inplace=True)
# -
# pull only the columns of interest
citation_cols = ['street', 'city', 'state', 'Ticket_Issue_Date',
'Ticket_Issue_Time', 'Violation_External_Code', 'Violation_Desc_Long',
'Street_No', 'Street_Name', 'Street_Suffix', 'Fine_Amount', 'Badge_#',
'sequence', 'summary', 'delivery_line_1', 'city_name',
'state_abbreviation', 'full_zipcode', 'notes', 'county_name', 'rdi',
'latitude', 'longitude']
df = df[citation_cols]
# Uppercase column names from citation data, lowercase names from UPS delivery data
# df.columns
# +
# reconcile addresses
df = df.drop(['sequence','delivery_line_1', 'city_name', 'state_abbreviation'], axis = 1)
# Note: I can't throw df.fullzipcode because not all locations were a match _. results in NAN
df['street'] = df.street.str.lower() + ', ' + 'Oakland' + ', ' + 'CA'
df.rename(columns={'street': 'citation_address'}, inplace=True)
df = df.drop(['city', 'state', 'Street_No', 'Street_Name', 'Street_Suffix'], axis = 1)
# rename zone & match columns
df.rename(columns={'rdi': 'zone'}, inplace=True)
df.rename(columns={'summary': 'match'}, inplace=True)
df['citation_address'][:3]
# -
df.columns = df.columns.str.lower()
df.columns
# ## Step 3: Tidying the data
#
# [[ go back to the top ]](#Table-of-Contents)
#
# Now that we've identified several errors in the data set, we need to fix them before we proceed with the analysis.
#
# Let's walk through the issues one-by-one.
# ### Check for Nulls
# Number of missing - You can investigate these more later
for col in list(df.columns) :
k = sum(pd.isnull(df[col]))
print(col, '{} nulls'.format(k))
# +
# Fill unknown zones with 'unknown'
df.zone.fillna('Unknown', inplace=True)
df.county_name.fillna('Alameda', inplace=True)
# -
df.zone.unique()
for col in list(df.columns) :
k = sum(pd.isnull(df[col]))
print(col, '{} nulls'.format(k))
# ### Make `fine_amount` Integers
# - in retrospect, try this: https://medium.com/towards-data-science/5-methods-to-remove-the-from-your-data-in-python-and-the-fastest-one-281489382455
df.fine_amount.unique()
# +
def string_dollar_to_float(value):
stripped = str(value).strip('$')
try:
return float(stripped)
except ValueError:
return None
df['fine_amount'] = df['fine_amount'].apply(string_dollar_to_float)
# -
max(df['fine_amount']), min(df['fine_amount'])
# ### [IN PROGRESS] Standardize datetimes
# +
# originally trying to make a unified datetime...or something
# idx = pd.DatetimeIndex(df['ticket_issue_date'])
# df['date'] = idx.date
# idx2 = pd.DatetimeIndex(df['ticket_issue_date'])
# df['time'] = idx2.time
# -
# #### NTS: You were trying to get this to work.
# +
# new_list = []
# def make_datetime(a, b)
# for i in range(len(a)):
# try:
# return dt.datetime.combine(a[i], b[i]))
# except TypeError:
# return None
# df['datetime'] = pd.Series(new_list)
# # df['datetime'] = dt.datetime.combine(idx.date, idx2.time)
# -
# ### Export cleaned data to reference later
# +
df.to_csv('citation-data-clean.csv', index=False)
# READ IT IN USING
# df = pd.read_csv('citation-data-clean.csv')
# -
# ## Step 4: Exploratory analysis
#
# [[ go back to the top ]](#Table-of-Contents)
#
# Exploratory analysis is the step where we start delving deeper into the data set beyond the outliers and errors. We'll be looking to answer questions such as:
#
# * How is my data distributed?
#
# * Are there any correlations in my data?
#
# * Are there any confounding factors that explain these correlations?
#
# Charts for internal use. Save figures with `plt.savefig("image.png")`.
# +
geolocated = df.copy()
geolocated.dropna(inplace=True)
print('Total number of citations: {}'.format(df.shape[0]))
print('Data retention after null omission: {:.2f}%'.format(100*geolocated.shape[0]/df.shape[0]))
total_days = (max(idx) - min(idx)).days
print('Total days:', total_days)
# I'm only dropping data here for convenience, keeping only fully completed records results in 86% data retention (a 9% data loss). Eventually, we want to better reconcile null values.
# -
df.describe(include=['object'])
df.describe(include=['float'])
df.describe(include=['datetime'])
# ### Citations by Violation
# +
# Common citation violations from parking during certain hours and expired meters
fig = df['violation_desc_long'].value_counts()[:10].plot('barh') #title='Top Citations by Violation')
plt.savefig("by_violation_desc.png")
# -
hrs_violation = df[df['violation_desc_long']=='NO PARK CERTAIN HRS']
meter_violation = df[df['violation_desc_long']=='EXPIRED METER']
display_violation = df[df['violation_desc_long']=='NON DISP PKG RECEIPT']
red_zone_violation = df[df['violation_desc_long']=='NO PARKING RED ZONE']
resi_violation = df[df['violation_desc_long']=='RESIDENTIAL PARKING']
print('Num of citations, ','Cost of Citations')
print(len(hrs_violation.fine_amount), sorted(hrs_violation.fine_amount.unique()))
print(len(meter_violation.fine_amount), meter_violation.fine_amount.unique())
print(len(display_violation.fine_amount), display_violation.fine_amount.unique())
print(len(red_zone_violation.fine_amount), sorted(red_zone_violation.fine_amount.unique()))
print(len(resi_violation.fine_amount), resi_violation.fine_amount.unique())
# ### Officer Citations
print(len(df['badge_#'].unique()), 'individual traffic officers.')
# +
officer_citations = pd.DataFrame(df['badge_#'].value_counts())
fig2 = officer_citations['badge_#'][:10].plot('barh', title='Top Citations by Officer Badge Number')
# rects = ax.patches
# # Now make some labels
# labels = ["label%d" % i for i in range(len(rects))]
# for rect, label in zip(rects, labels):
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
# -
print('Legend for the above:\n', officer_citations['badge_#'][:10])
def describe(df, col):
desc = df[col].describe()
# change the 50% index to median
idx = desc.index.tolist()
idx[5] = 'median'
desc.index = idx
return desc
describe(officer_citations, 'badge_#')
# Officer citations are wayyy skewed
sns.countplot(x='badge_#', data=df)
test = officer_citations[officer_citations['badge_#'] > 2000]
61 / 656, sum(test['badge_#']) / 326625
print('97.5th quantile: ', officer_citations['badge_#'].quantile(0.85))
print('Two standard deviations above the median, and the cutoff income is $175,000')
# ### Citations by Zone
# +
zoneinfo = df.zone.value_counts()
print('The percentage of citations in residential areas: {:.2f} % of {} observations.'.format((100*zoneinfo[0] / df.shape[0]), df.shape[0]))
print('The percentage of citations in residential areas: {:.2f} %'
.format(100*zoneinfo[1] / df.shape[0]))
print('The percentage of citations in unknown areas: {:.2f} %'
.format(100*zoneinfo[2] / df.shape[0]))
print('We have geolocation data (lat, long, zone, county, fullzip) for {:.2f} % of the data.'
.format(100*(zoneinfo[1] + zoneinfo[0]) / df.shape[0]))
# -
# Common citation violations in residential over commercial areas
fig3 = df['zone'].value_counts().plot('barh', title='Citations by Zone Category')
df['zone'].value_counts()
# ### Findings
# 1. '1 airport drive' is the number 1 location for citations, receiving 2138 in 2016.
# 2. 174891 'No parking during certain hours' violations (\$66 - \$83) are the most common (Code 10.28.240).
# - Reasons could include: easiest to cite, poor signage, lack of sufficient (free) residential parking (so residents park on the street).
# - Other top violations:
# - 174891 'No parking during certain hours' violations (\$66 - \$83)
# - 36214 expired meter parking citations (\$58)
# - 25656 non-display of parking receipt (\$58)
# - 16659 red zone parking (\$66 - \$171)
# - 11296 residential parking violations (\$83)
# 3. Officer 11 issues the most citations, nearly 2000 more than their. You had to issue more than 6960 citations in 2016 to land in the top 10.
# - The force is clearly partitioned into those who regularly issue parking citations, and those who clearly don't.
# - According to the 5-number summary, 75% of the force issues less than 14 citations.
# - Only 97 officers issue more than 60 citations/yr
# - Only 61 officers issue more than 2000 citations/yr
# - 9\% (61) of the officers issue 95% (308,711) of the citations.
# - Reasons could include: enforcement abuse, erroneous enforcement, recurring offenders along similar routes, high density of violations (i.e. airport, parking garages, event parking areas, etc.)
# 4. Most citations occurred in residential areas (135,480). There were 100,986 citations in unknown areas, and 90,159 citations in commercial areas.
# - Reasons could include: citations are happening in the dead of night, people forget to move their cars in the daytime
#
# ***Need to clean up the timeseries data to be sure***
#
# # TO DO
#
# Investigate:
# - geographic distribution of violations
# - time pattern of violations in oakland
# - transform the datetime objects
# - consolidate dates
# - consolidate and transform times
# - investigate time of day questions
# - upload to anaconda cloud
# - import into tableau
# - work on **presentation slide deck for tonight!!!**
#
# at cfa:
# - upload to github
# - get feedback on presentation
# ## [IN PROGRESS] Step 5: Data Visualization
# [[ go back to the top ]](#Table-of-Contents)
#
# - Tableau! Or plotly...
# - Edit: Definitely going with Tableau Public. Plotly has to pay and has a limit of API limit calls
# - Maybe we'll even ship a d3 vis once we replicate this on all the data.
# - inspiration: usaspending.gov
# ### These don't work...yet
# +
# pandas_profiling.ProfileReport(df)
# +
# this probably isn't the right plot...
# sns.pairplot(df['violation_desc_long', 'fine_amount', hue='zone')
# -
| notebooks/Lovely-Rita.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.0 64-bit
# name: python3
# ---
# ## Importing libraries & getting data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('./dataset/train.csv')
data.head()
data.info()
# ## Cleaning the Data
data.columns
columns_to_drop = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked']
clean_data = data.drop(columns_to_drop , axis=1)
clean_data.head()
# ## Encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
clean_data["Sex"] = le.fit_transform(clean_data["Sex"])
clean_data.head()
clean_data.info()
# ## Handling missing values
sns.heatmap(clean_data.isnull(), yticklabels=False, cmap="viridis", cbar=False)
# +
# age has only 714 entries instead of 891 i.e it consists of missing values
clean_data= clean_data.fillna(clean_data['Age'].mean())
sns.heatmap(clean_data.isnull(), yticklabels=False, cmap="viridis", cbar=False)
# -
# ## Selecting Features
# +
input_cols = [ 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']
output_cols = ['Survived']
X = clean_data[input_cols]
y = clean_data[output_cols]
# -
X.shape , y.shape
# ## Entropy
def entropy(col):
data, counts = np.unique(col, return_counts=True)
total = float(col.shape[0])
entropy = 0.0
for count in counts:
prob = count / total
entropy += prob * np.log2(prob)
return -entropy
col = np.array([4, 4, 4, 3, 3, 32, 3, 4, 1])
entropy(col)
# ## Information Gain
#
# ### --> Note: We are making a Binary Tree, hence split node into 2.
#
# ### --> CONDITION : if a person will buy ps5 or not. Lets say split this across salaries, then fkey = Salaries.
#
# ### --> say you want to split like : salary < 10 lac (left child) & sal > 10 lac (right child) ,then fval = 10
def divide_data(x_data, key, f_val):
right_child = pd.DataFrame([], columns=x_data.columns)
left_child = pd.DataFrame([], columns=x_data.columns)
for xi in range(x_data.shape[0]):
val = x_data[key].iloc[xi]
if val > f_val:
right_child = right_child.append(x_data.loc[xi])
else:
left_child = left_child.append(x_data.loc[xi])
return left_child, right_child
def info_gain(x_data, key, f_val):
left, right = divide_data(x_data, key, f_val)
# now percentage of examples in left and right
l = float(left.shape[0]) / x_data.shape[0]
r = float(right.shape[0]) / x_data.shape[0]
hs = entropy(x_data.Survived)
information_gain = hs - \
(l * entropy(left.Survived) + r * entropy(right.Survived))
return information_gain
# for feature in X.columns:
# print(feature)
# print(info_gain(clean_data , feature , clean_data[feature].mean()))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# ## Decision Tree using Sklearn
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
tree = DecisionTreeClassifier(criterion='entropy' , max_depth=5)
tree.fit(X_train, y_train)
tree.predict(X_test[:10])
y_test[:10]
tree.score(X_test , y_test)
# ## Custom Implementation of DT
# +
class CustomDecisionTree:
def __init__(self, depth=0, max_depth=5):
self.left = None
self.right = None
self.key = None
self.f_val = None
self.max_depth = max_depth
self.depth = depth
self.target = None
def fit(self , X_train):
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
info_gains = []
# calculating info_gain for each feature
for ix in features:
i_gain = info_gain(X_train , ix , X_train[ix].mean())
info_gains.append(i_gain)
# selecting the feature with max info_gain
self.key = features[np.argmax(info_gains)]
self.f_val = X_train[self.key].mean()
# print(self.fkey)
# Creating the Tree & splitting the data
data_left , data_right = divide_data(X_train , self.key , self.f_val)
# reset the index again from starting ,for each subpart
data_left = data_left.reset_index(drop=True)
data_right = data_right.reset_index(drop=True)
# Base Case 1 --> reached leaf node
if data_left.shape[0] == 0 or data_right.shape[0] == 0:
if X_train.Survived.mean() >= 0.5 :
self.target = "Survived"
else:
self.target = "Dead"
return
# Base Case 2 --> stop early when the depth >= max depth
if self.depth >= self.max_depth:
if X_train.Survived.mean() >= 0.5:
self.target = "Survived"
else:
self.target = "Dead"
return
# calling recursion
self.left = CustomDecisionTree(depth=self.depth + 1)
self.left.fit(data_left)
self.right = CustomDecisionTree(depth=self.depth + 1)
self.right.fit(data_right)
def predict(self , test):
if test[self.key] > self.f_val:
# go to right subtree
if self.right is None:
return self.target
return self.right.predict(test)
else:
# go to left subtree
if self.left is None:
return self.target
return self.left.predict(test)
# -
dt = CustomDecisionTree()
# +
# creating our own train-test split
split = int(0.7*clean_data.shape[0])
train_data = clean_data[:split]
test_data = clean_data[split:]
test_data = test_data.reset_index(drop=True)
# -
dt.fit(train_data)
y_predict = []
for i in range(test_data.shape[0]):
y_predict.append(dt.predict(test_data.loc[i]))
y_actual = test_data[output_cols]
y_predict[:10]
y_actual[:10]
data[split:][:10]
# ## Visualizing Decision Tree
import pydotplus
from six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import graphviz
# +
dot_data = StringIO()
export_graphviz(tree ,out_file=dot_data ,filled=True ,rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# saving the image
graph.write_png('titanic_decisiontree.png')
Image(graph.create_png())
| Custom Implementation of ML-algos/Decision Tree/decisiontree_custom_titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import syft as sy
import torch as th
from syft.lib.python.collections import OrderedDict
import collections
alice = sy.VirtualMachine()
alice_client = alice.get_root_client()
# +
# Linear
fc = th.nn.Linear(4,2)
# send
fc_ptr = fc.send(alice_client)
print(f"----fc_ptr----\n{fc_ptr}\n")
# remote call
res_ptr = fc_ptr(th.rand([1,4]))
print(f"----res_ptr----\n{res_ptr}\n")
print(f"----res_ptr.get()----\n{res_ptr.get()}\n")
# remote update state dict
sd2 = OrderedDict(th.nn.Linear(4,2).state_dict())
sd2_ptr = sd2.send(alice_client)
fc_ptr.load_state_dict(sd2_ptr)
# get
print(f"----fc_ptr.get().state_dict()----\n{fc_ptr.get().state_dict()}\n")
print(f"----sd2----\n{sd2}\n")
# +
# ReLU
relu = th.nn.ReLU(inplace=True)
# send
relu_ptr = relu.send(alice_client)
print(f"----relu_ptr----\n{relu_ptr}\n")
# remote call
res_ptr = relu_ptr(th.rand([1,4]))
print(f"----res_ptr----\n{res_ptr}\n")
print(f"----res_ptr.get()----\n{res_ptr.get()}\n")
# get
print(f"----relu_ptr.get()----\n{relu_ptr.get()}\n")
# +
# Sequential
seq = th.nn.Sequential()
seq.add_module("fc1", th.nn.Linear(4,2))
seq.add_module("fc2", th.nn.Linear(2,1))
# send
seq_ptr = seq.send(alice_client)
print(f"----seq_ptr----\n{seq_ptr}\n")
# remote call
res_ptr = seq_ptr(th.rand([1,4]))
print(f"----res_ptr----\n{res_ptr}\n")
print(f"----res_ptr.get()----\n{res_ptr.get()}\n")
# remote update state dict
sd2 = OrderedDict(
th.nn.Sequential(
collections.OrderedDict([
("fc1", th.nn.Linear(4,2)),
("fc2", th.nn.Linear(2,1))
])
).state_dict()
)
sd2_ptr = sd2.send(alice_client)
seq_ptr.load_state_dict(sd2_ptr)
# get
print(f"----seq_ptr.get().state_dict()----\n{seq_ptr.get().state_dict()}\n")
print(f"----sd2----\n{sd2}\n")
# +
# user defined model
class M(th.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc1 = th.nn.Linear(4,2)
self.fc2 = th.nn.Linear(2,1)
def forward(model, x=th.rand(4), th=th):
x = model.fc1(x)
x = model.fc2(x)
return x
m = M()
# local call
x = th.rand(1,4)
print(f"----m(m)----\n{m(x)}\n")
# send
m_ptr = m.send(alice_client)
print(f"----m_ptr----\n{m_ptr}\n")
# remote call
x_ptr = x.send(alice_client)
print(f"----m_ptr(x=x_ptr)).get()----\n{m_ptr(x=x_ptr).get()}\n")
# remote update state dict
sd2 = OrderedDict(M().state_dict())
sd2_ptr = sd2.send(alice_client)
m_ptr.load_state_dict(sd2_ptr)
# get
m_get = m_ptr.get()
print(f"----m_get.state_dict()----\n{m_get.state_dict()}\n")
print(f"----sd2----\n{sd2}\n")
print(f"----type(m_get)----\n{type(m_get)}")
| examples/experimental/tongye/torch_nn_Module.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QuTiP Example: Quantum System Subject to to Coherent Feedback with Discrete Time-Delay
# [<NAME>](http://arnegrimsmo.weebly.com/) <br>
# Université de Sherbrooke <br>
# [<EMAIL>](email:<EMAIL>)
# $\newcommand{\ket}[1]{\left|#1\right\rangle}$
# $\newcommand{\bra}[1]{\left\langle#1\right|}$
# ## Introduction
# This notebook shows how to use the `memorycascade` module, one of the modules for non-Markovian systems in qutip. This module is an implementation of the method introduced in [Phys. Rev. Lett 115, 060402 (2015)](http://journals.aps.org/prl/abstract/10.1103/PhysRevLett.115.060402) ([arXiv link](http://arxiv.org/abs/1502.06959)) to integrate the dynamics of open quantum systems coupled to a coherent feedback loop with a time-delay.
#
# At the end of the notebook we also show how the `memorycascade` module can be used in conjunction with the `transfertensormethod` module.
#
# In this notebook we consider a paradigmatic quantum optics example of a system subject to coherent feedback, namely a two-level atom in front of a mirror. The setup is illustrated in the figure below:
#
# 
#
# An atom is placed a distance $l$ in front of a mirror. The incomming field on the left side, $b_{\text{in}}(t)$, we take to be a vacuum field. The field on the right side of the atom, i.e., the field between the atom and the mirror, creates a coherent feedback loop with time-delay $\tau = l/c$, where $c$ is the speed of light. The atom couples to the input field via a system operator $L_1$, and to the returning feedback field via a system operator $L_2$. We assume that an arbitrary phase shift, $\phi$, can be applied to the feedback field (e.g., there could be a phase-shifter placed in the loop [not shown]). In addition, there can be Markovian non-radiative decay, described by a rate $\gamma_{\rm nr}$. The red arrow denotes a classical drive field, assumed to couple to the atom via a side-channel.
# ## Preamble
# ### Imports
# +
import numpy as np
import scipy as sp
import qutip as qt
from qutip.ipynbtools import version_table
import qutip.nonmarkov.memorycascade as mc
# %matplotlib inline
import matplotlib.pyplot as plt
# -
# ## Problem setup
# +
gamma = 1.0 # coupling strength to feedback reservoir
gamma_nr = 0.2*gamma # non-radiative decay
eps = 1.0*np.pi*gamma # classical drive strength, eps/2 = Rabi frequency
delta = 0. # detuning from the drive frequency
tau = np.pi/(eps) # time-delay, chosen to exactly match the Rabi period due to the drive
print('tau=', tau)
phi = 1.0*np.pi # phase shift in feedback loop
# Hamiltonian and jump operators
H_S = delta*qt.sigmap()*qt.sigmam() + 1j*eps*(qt.sigmam()-qt.sigmap())
# coupling at first port of feedback loop (into the loop)
L1 = sp.sqrt(gamma)*qt.sigmam()
# coupling at second port of feedback loop (out of the loop)
L2 = sp.exp(1j*phi)*L1
# Markovian decay channels
c_ops_markov = [sp.sqrt(gamma_nr)*qt.sigmam()]
# initial state
rho0 = qt.ket2dm(qt.basis(2,0)) # atom start in the excited state
# integration times
times = np.arange(0.0, 3.0*tau, 0.01*tau)
# -
# ## Memory cascade simulation
# The memory cascade method works by mapping the non-Markovian feedback problem onto a problem of $k$ identical cascaded quantum systems, where $(k-1)\tau < t < k\tau$ for a time $t$.
#
# To use the memory cascade method in qutip, first create a `MemoryCascade` object. The syntax is
#
# ````
# sim = MemoryCascade(H_S, L1, L2, S_matrix=None, c_ops_markov=None, integrator='propagator', paralell=False, options=None)
# ````
#
# where
#
# `H_S` is a system Hamiltonian (or a Liouvillian).
#
# `L1` and `L2` are either single system operators, or lists of system operators. `L1` couples the system into the feedback loop, and `L2` couples out of the loop. If `L1` and `L2` are lists, the optional argument `S_matrix` can be used to specify an $S$-matrix that determines which operator in `L1` couples to which operator in `L2` (note that `L1` and `L2` must have the same number of elements). By default `S_matrix` will be set to an $n \times n$ identity matrix, where n is the number of elements in `L1`/`L2`. Having multiple coupling operators into and out of the feedback loop can for example be used to describe composite systems emitting at multiple frequencies. The $S$-matrix can then be used to include, e.g., beam splitters mixing the different signals in the feedback loop.
#
# `c_ops_markov` is an optional list of additional Lindblad operators describing conventional Markovian noise channels, e.g., non-radiative decay.
#
# `integrator` is a string which can be either 'propagator' or 'mesolve', referring to which method will be used to integrate the dynamics. "propagator" tends to be faster for larger systems (longer times)
#
# `parallel` if set to True means the time-integration is parallelized. This is only implemented for `integrator='propagator'`
#
# `options` an instance of the `qutip.Options` class for genereic solver options, used in internal calls to `qutip.propagator()`.
sim = mc.MemoryCascade(H_S, L1, L2, c_ops_markov=c_ops_markov, integrator='mesolve')
# ### Reduced atom dynamics
# To compute the reduced density matrix of the atom at time $t$ with time-delay $\tau$, simply call the method `rhot` of the `MemoryCascade` object.
# %time rho = [sim.rhot(rho0, t, tau) for t in times]
# Now lets plot the atomic inversion as a function of time:
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(times, qt.expect(qt.sigmaz(), rho), linewidth=3.0)
ax.set_xlabel(r'$\gamma t$', fontsize=20)
ax.set_ylabel(r'$\langle \sigma_z \rangle$', fontsize=20)
# ### Output field dynamics
# The `MemoryCascade` class also has a convenient method called `outfieldcorr` that allows you to compute any ouput field correlation function of the type
#
# $$
# \langle c_1(t_1) c_{2}(t_{2}) \dots c_n(t_n) \rangle
# $$
#
# where each $c_i(t_i)$ is one of $b_{\rm out}(t_i)$ or $b_{\rm out}^\dagger (t_i)$ (see the figure at the top). Below we use `outfieldcorr` to compute the photon number and the $g^{(2)}(0,t)$ correlation function of the output field.
#
# The syntax of `outfieldcorr` is
#
# ````
# outfieldcorr(rho0, blist, tlist, tau)
# ````
#
# where
#
# `rho0` is the atom's initial state
#
# `blist` is a list of integers specifying the operators $c_i$, where an entry of `1` means $b_{\rm out}$ and an entry of `2` means $b_{\rm out}^\dagger$. So, for example `blist = [1, 2, 2, 1]` means that we want to compute $\langle b_{\rm out}(t_1) b_{\rm out}^\dagger(t_2) b_{\rm out}^\dagger(t_3) b_{\rm out}(t_4)\rangle$.
#
# `tlist` is the corresponding list of times, $t_1, t_2, \dots, t_n$.
#
# `tau` is as usual the time-delay.
# #### Output field photon number
# %time bdb = [sim.outfieldcorr(rho0, [2, 1], [t, t], tau) for t in times]
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(times, bdb, linewidth=3.0)
ax.set_xlabel(r'$\gamma t$', fontsize=20)
ax.set_ylabel(r'$\langle b^\dagger b \rangle$', fontsize=20)
# #### Output field second order correlation function
# %time g2 = [sim.outfieldcorr(rho0, [2, 1, 2, 1], [times[i], times[i], 0., 0.], tau)/(bdb[0]*bdb[i]) for i in range(len(times))]
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(times, g2, linewidth=3.0)
ax.set_xlabel(r'$\gamma t$', fontsize=20)
ax.set_ylabel(r'$g^{(2)}(0,t)$', fontsize=20)
# ## Extrapolate to large times using the Transfer Tensor Method
# Since the memory cascade method maps the non-Markovian problem onto a chain of $k$ cascaded systems, where $(k-1)\tau < t < k\tau$, it is intractable for large times due to the exponential growth of the Hilbert space with $k$.
#
# A useful approach is therefore to use the memory cascade method in conjunction with the Transfer Tensor Method (TTM), implemented in qutip in the `transfertensormethod` module in the `nonmarkov` subpackage.
#
# The usage of the `transfertensormethod` module is discussed in more detail in the [example-transfer-tensor-method](example-transfer-tensor-method.ipynb) notebook.
import qutip.nonmarkov.transfertensor as ttm
# ### Construct a list of exact timepropagators to learn from
# The `MemoryCascade` class also has a method `propagator` that returns the time-propagator for the atom at a time $t$, i.e., the superoperator $\mathcal{E}(t)$ such that
#
# $$
# \rho(t) = \mathcal{E}(t)\rho(0),
# $$
#
# where $\rho(t)$ is the state of the atom. We compute a list of exact propagators $\mathcal{E}(t_k)$ for a set of "learning times" $t_k$, which we then use as input to the TTM.
learningtimes = np.arange(0, 3*tau, 0.1*tau) # short times to learn from
# %time learningmaps = [sim.propagator(t, tau) for t in learningtimes] # generate exact dynamical maps to learn from
# ### Compute approximate solution for long times using the TTM
longtimes = np.arange(0, 10*tau, 0.1*tau) # long times for extrapolation
# %time ttmsol = ttm.ttmsolve(learningmaps, rho0, longtimes) # extrapolate using TTM
# ### Plot and compare
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(times, qt.expect(qt.sigmaz(), rho), linewidth=3.0)
ax.plot(longtimes, qt.expect(qt.sigmaz(), ttmsol.states), '--k', linewidth=3.0)
ax.set_xlabel(r'$\gamma t$', fontsize=20)
ax.set_ylabel(r'$\langle \sigma_z \rangle$', fontsize=20)
# ## Discussion
# The above example shows how the memory cascade method can work well in conjunction with the TTM. The list of learning times necessary to get good result with the TTM will wary from problem to problem and from parameter set to parameter set. There is also no guarantee for the result being correct, but one can check convergence with increasing learning times.
version_table()
| qutip-notebooks-master/examples/nonmarkov-coherent-feedback.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Xdak5ueCSYdW" colab_type="code" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
import copy
# + id="7hhDl51kTH94" colab_type="code" colab={}
torch.set_printoptions(precision=10)
manualSeed = 1
def set_seed(seed):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
set_seed(manualSeed)
# + id="A_CaNX_WVmCM" colab_type="code" colab={}
n = 2
coeff = [1,0,0]
orig_p = np.poly1d(coeff)
first, last = [-3, 2]
# + [markdown] id="9Mu4zvdHYKK7" colab_type="text"
# # Create Datapoints
# + id="e0SFX_eaX-5P" colab_type="code" colab={}
npoints = 10
def create_data(npoints):
xs = (last-first)*np.random.random_sample((npoints,)) + first
ys = torch.tensor(orig_p(xs)).float().view(npoints,1)
xs = torch.tensor(xs).float().view(npoints,1)
return xs, ys
def create_data_cheb(d):
cheb_n = np.zeros(d)
for i in reversed(range(0,d)):
cheb_n[i] = np.cos(np.pi*(i)/(d-1))
cheb_n = (last-first)*(np.copy(cheb_n) + 1)/(1.*2) + first
ys = torch.tensor(orig_p(cheb_n)).float()
xs = torch.tensor(cheb_n).float().view(d,1)
return xs, ys
xs, ys = create_data_cheb(npoints)
# + id="4qiTTvzVYEnk" colab_type="code" outputId="49f57a2f-f1c4-426a-dafa-244da15ab5b9" executionInfo={"status": "ok", "timestamp": 1591664070280, "user_tz": 420, "elapsed": 2809, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 282}
p_xs = xs.data.numpy()
p_ys = ys.data.numpy()
plt.plot(p_xs, p_ys, 'x', label='data points')
plt.legend()
# + [markdown] id="K5aJCQuAYQXN" colab_type="text"
# # DNN approximating $x^2$ on $[a,b]$ interval
# + id="493xNEkGYGn9" colab_type="code" colab={}
class Net_x2(nn.Module):
def __init__(self, n, d, epsilon, a=0., b=1., w1=None, b1=None, p_k=None):
super(Net_x2, self).__init__()
self.n = n
self.iter = int(np.log(1/epsilon))
#self.lrelu = nn.LeakyReLU(0.1)
print("number of h compositions %d" % self.iter)
self.xrange = np.arange(first, last, 0.0005)
self.a = a
self.b = b
self.c = (b-a)**2/4.
def h(self, x):
return 2*F.relu(x) - 4*F.relu(x-0.5) + 2*F.relu(x-1)
def g1(self, x):
return 2/(self.b-self.a)*F.relu(x-self.a) - 4/(self.b-self.a)*F.relu(x-(self.a+self.b)/2.) + 2/(self.b-self.a)*F.relu(x-self.b)
def h_leaky(self, x):
return 2*self.lrelu(x) - 4*self.lrelu(x-0.5) + 2*self.lrelu(x-1)
def forward(self, x, plot=False):
out = F.relu((self.a+self.b)*x - self.a*self.b)
x = self.g1(x)
if plot:
test_x = torch.tensor(self.xrange).float().view(self.xrange.shape[0],1)
plt.plot(self.xrange, test_x.data.numpy(), label='level 00')
nn_ys = F.relu((self.a+self.b)*test_x - self.a*self.b).data.numpy()
test_x = self.g1(test_x)
for j in range(self.iter):
out = out - self.c*x*(2.0**(-(j)*self.n))
if plot:
nn_ys = nn_ys - (self.c*test_x*(2.0**(-(j)*self.n))).data.numpy()
plt.plot(self.xrange, (test_x*(2.0**(-(j)*self.n))).data.numpy(), ':', label='level %d'%j)
plt.plot(self.xrange, nn_ys, label='out %d'%j)
test_x = self.h(test_x)
x = self.h(x)
return out
# + id="wO2m234iYbTB" colab_type="code" outputId="7e74e2db-8194-490f-b8b5-6e26763ab828" executionInfo={"status": "ok", "timestamp": 1591664070281, "user_tz": 420, "elapsed": 2802, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
eps = 0.00001
net = Net_x2(n, 1, eps, a=first, b=last)
#net = copy.deepcopy(net0)
criterion = nn.MSELoss()
net.eval()
# + [markdown] id="SNH0ex7sbXLd" colab_type="text"
# # test accuracy
# + id="H_Ge6ewYbfJ1" colab_type="code" colab={}
test_xs, test_ys = create_data(10000)
# + id="JMuF2AvaYbVs" colab_type="code" outputId="644807cf-6bca-4f40-d5b0-38e048226618" executionInfo={"status": "ok", "timestamp": 1591664070549, "user_tz": 420, "elapsed": 3061, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
output = net(test_xs)
loss = criterion(output, test_ys)
print("loss ", loss.data)
# + id="kbwEXE1Aod04" colab_type="code" outputId="7fce1d8f-9ce2-4778-cee8-8ec74eb3a995" executionInfo={"status": "ok", "timestamp": 1591664071000, "user_tz": 420, "elapsed": 3506, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 550}
f, axes = plt.subplots(1,1, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')
def plot_p_nn(net):
xs, xys = test_xs[:100], test_ys[:100]
nn_ys = net(xs).data.numpy()
axes.plot(xys.numpy().squeeze(), ':', label='orig_x^2', color='r', linewidth=3)
axes.plot(nn_ys, color='k', label='nn')
axes.legend()
plot_p_nn(net)
axes.set_title('Approximation p^%d(x) using NN'%n)
plt.show()
# + [markdown] id="58WERux8boAQ" colab_type="text"
# # plot NN approximation and original $p(x)=x^2$
# + id="5Dh2PcHhYbXm" colab_type="code" outputId="5ca364f5-070e-4d9f-d09e-641eba8112a0" executionInfo={"status": "ok", "timestamp": 1591664071001, "user_tz": 420, "elapsed": 3501, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 68}
xs = torch.tensor(np.array([0.2, 0.4, 0.8])).float().view(3,1)
nn_ys = net(xs).data.numpy()
print(nn_ys)
# + id="5DEChZuyYbaH" colab_type="code" outputId="4461d12a-f7f0-4b8d-d291-f424b962cc87" executionInfo={"status": "ok", "timestamp": 1591664072383, "user_tz": 420, "elapsed": 4877, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 550}
f, axes = plt.subplots(1,1, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')
def plot_p_nn(orig_p, net, n1):
x = np.arange(first, last, 0.0001)
xs = torch.tensor(x).float().view(x.shape[0],1)
nn_ys = net(xs, plot=True).data.numpy()
axes.plot(x, orig_p(x), '--', label='orig_p')
axes.plot(x, nn_ys, color='k', label='nn')
axes.legend()
plot_p_nn(orig_p, net, 0)
axes.plot(p_xs, p_ys, 'o', color='k', label='data points')
axes.set_title('Approximation p^%d(x) using NN'%n)
plt.show()
# + [markdown] id="FidXnTmPab1k" colab_type="text"
# # error dependency on $\epsilon$
#
#
# + id="0yA0o3gVaa7j" colab_type="code" outputId="18b3bd35-9b00-4ece-a6eb-b3c5c35034ec" executionInfo={"status": "ok", "timestamp": 1591664072715, "user_tz": 420, "elapsed": 5203, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg41VcyjOlxvSSwaaqMiBm1XEWP9CJdo2pAEg3OVg=s64", "userId": "17331026392526770865"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
f, axes = plt.subplots(1,1, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')
test_xs, test_ys = create_data(10000)
epsilons = [0.1**i for i in range(10)]
losses = []
for eps in epsilons:
net = Net_x2(n, 1, eps, a=first, b=last)
net.eval()
output = net(test_xs)
print(output.size(), test_ys.size())
loss = criterion(output, test_ys).data
print("eps", eps, "loss ", loss)
losses += [loss]
axes.plot(epsilons, losses, '--', label='orig_p')
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_xlabel("epsilon")
axes.set_ylabel("MSE")
axes.legend()
# + id="Odpi-ahrYbcd" colab_type="code" colab={}
# + id="19ayBY9aYbfG" colab_type="code" colab={}
# + id="5UXEiJlfYbhh" colab_type="code" colab={}
| net_x2_ab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep MicroBiome
#
# Aug. 14. 2019
# @ Youngwon (<EMAIL>)
# +
import os
import json
import numpy as np
import pandas as pd
import copy
import logging
import sys
import keras.backend as k
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
os.environ['CUDA_VISIBLE_DEVICES']=''
# -
from deepbiome.deepbiome import *
if not tf.__version__.startswith('2'):
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
k.set_session(tf.Session(config=config))
# ## Pick Models
# +
save = False
kfold=1000
# kfold=20
network_model_keys = ['optimizer','lr','decay']
architecture_keys = ['weight_decay', 'weight_l1_penalty', #'weight_l2_penalty',
'tree_thrd', 'weight_initial',
'batch_normalization','drop_out']
network_training_keys = ['batch_size','epochs']
logging.basicConfig(format = '[%(name)-8s|%(levelname)s|%(filename)s:%(lineno)s] %(message)s',
level=logging.DEBUG)
log = logging.getLogger()
# +
#######################################################################
# filenames = 'simulation_s0.Rmd'
# models = [
# 'simulation_s0/simulation_s0_deep',
# 'simulation_s0/simulation_s0_deep_l1',
# 'simulation_s0/simulation_s0_deepbiome',
# ]
# models_aka = [
# 'DNN',
# 'DNN+$\ell_1$',
# 'DeepBiome',
# ]
# num_classes = 0
########################################################################
# filenames = 'simulation_s1.Rmd'
# models = [
# 'simulation_s1/simulation_s1_deep',
# 'simulation_s1/simulation_s1_deep_l1',
# 'simulation_s1/simulation_s1_deepbiome',
# ]
# models_aka = [
# 'DNN',
# 'DNN+$\ell_1$',
# 'DeepBiome',
# ]
# num_classes = 0
# ########################################################################
# filenames = 'simulation_s2.Rmd'
# models = [
# 'simulation_s2/simulation_s2_deep',
# 'simulation_s2/simulation_s2_deep_l1',
# 'simulation_s2/simulation_s2_deepbiome',
# ]
# models_aka = [
# 'DNN',
# 'DNN+$\ell_1$',
# 'DeepBiome',
# ]
# num_classes = 1
# #######################################################################
# filenames = 'simulation_s3.Rmd'
# models = [
# 'simulation_s3/simulation_s3_deep',
# 'simulation_s3/simulation_s3_deep_l1',
# 'simulation_s3/simulation_s3_deepbiome',
# ]
# models_aka = [
# 'DNN',
# 'DNN+$\ell_1$',
# 'DeepBiome',
# ]
# num_classes = 3
# # ########################################################################
# filenames = 'simulation_s4.Rmd'
# models = [
# 'simulation_s4/simulation_s4_deep',
# 'simulation_s4/simulation_s4_deep_l1',
# 'simulation_s4/simulation_s4_deepbiome',
# ]
# models_aka = [
# 'DNN',
# 'DNN+$\ell_1$',
# 'DeepBiome',
# ]
# num_classes = 0
######################################################################
filenames = 'simulation_s5.Rmd'
models = [
'simulation_s5/simulation_s5_deep',
'simulation_s5/simulation_s5_deep_l1',
'simulation_s5/simulation_s5_deepbiome',
]
models_aka = [
'DNN',
'DNN+$\ell_1$',
'DeepBiome',
]
num_classes = 0
########################################################################
# +
model_network_info = {}
model_path_info = {}
for model_path in models:
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
model_path_info[model_path] = config_data.get_config_map()
model_network_info[model_path] = config_network.get_config_map()
if num_classes == 0: y_names = ['loss','correlation_coefficient']
elif num_classes==1: y_names = ['loss','binary_accuracy','sensitivity','specificity','gmeasure', 'auc']
else: y_names=['loss','categorical_accuracy','precision','recall','f1', 'auc']
if num_classes == 0: measure_index = np.array([0,1])
elif num_classes==1: measure_index = np.array([2,3,4,1,5])
else: measure_index = np.array([1,2,3,4,5])
# -
# ## Accuracy
results = []
# log.info('%20s & %s' % ('model', '& '.join(['%s ' % name for name in np.array(y_names)[[measure_index]]])))
# print('%10s & %s \\\\\ \hline' % ('model', '& '.join(['%7s & (sd) ' % name for name in np.array(y_names)[[measure_index]]])))
# for model, aka in zip(models, models_aka):
# evaluation = np.load('%s/eval.npy' % model)
# log.info('%20s: %s' % (aka, ''.join(['%10.4f (%10.4f)'%(mean, std) for mean, std in zip(np.mean(evaluation, axis=0),np.std(evaluation, axis=0))])))
# results.append(np.vstack([np.mean(evaluation, axis=0),np.std(evaluation, axis=0)]).transpose())
for model, aka in zip(models, models_aka):
train_evaluation = np.load('%s/train_eval.npy' % model)[:,measure_index]
train_res = '&'.join(['%7.3f & %7.3f'%(mean, std) for mean, std in zip(np.nanmean(train_evaluation, axis=0),np.nanstd(train_evaluation, axis=0))])
test_evaluation = np.load('%s/test_eval.npy' % model)[:,measure_index]
test_res = '&'.join(['%7.3f & %7.3f'%(mean, std) for mean, std in zip(np.nanmean(test_evaluation, axis=0),np.nanstd(test_evaluation, axis=0))])
# log.info('%s & %s & %s \\\\' % (aka, train_res, test_res))
print('%10s & %s & %s \\\\' % (aka, test_res, train_res))
# results.append(np.vstack([np.mean(evaluation, axis=0),np.std(evaluation, axis=0)]).transpose())
# # Weight estimation of DeepBiom
#
# We identify the largest weight estimatio of neurons in two hidden layers; by doing this, we can identify the strongest phylogenetic connections. We compute the True Positive Rate (``TPR``, sensitivity), True Negative Rate (``TNR``, specificity), and their geometric mean (i.e., ``g-Measure``). The false discovery rate (FDR) would be ``FDR = 1-TPR`` in our case.
# ## DNN + $\ell_1$
# +
num=1
model_path = models[num]
model_aka = models_aka[num]
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
try: path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
except: pass
try: path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
except: pass
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
path_info['model_info']['model_dir'] = './%s/%s'%(model_path,path_info['model_info']['model_dir'])
log.info('%22s : %s' % ('model', model_path))
log.info('%22s : %s' % ('model_aka', model_aka))
for k in architecture_keys:
log.info('%22s : %s' % (k, network_info['architecture_info'].get(k, None)))
for k in network_model_keys:
log.info('%22s : %s' % (k, network_info['model_info'].get(k, None)))
for k in network_training_keys:
log.info('%22s : %s' % (k, network_info['training_info'].get(k, None)))
# -
tw_1 = np.load('%s/tw_1.npy' % path_info['data_info']['data_path'])
tw_2 = np.load('%s/tw_2.npy' % path_info['data_info']['data_path'])
tw_3 = np.load('%s/tw_3.npy' % path_info['data_info']['data_path'])
tw_4 = np.load('%s/tw_4.npy' % path_info['data_info']['data_path'])
true_tree_weight_list = []
for fold in range(kfold):
true_tree_weight_list.append(np.array([tw_1[fold],tw_2[fold],tw_3[fold],tw_4[fold]]))
# true_tree_weight_list = np.array(true_tree_weight_list)
# np.save('../deepbiome/tests/data/true_weight_list.npy', true_tree_weight_list)
trained_weight_path_list = ['%s/weight/weight_%d.h5' % (path_info['model_info']['model_dir'], i) for i in range(kfold)]
lvl_category_dict = np.load('%s/lvl_category.npy' % '/'.join(path_info['data_info']['tree_info_path'].split('/')[:-1]),
allow_pickle=True)
summary = deepbiome_taxa_selection_performance(log, network_info, path_info, num_classes,
true_tree_weight_list, trained_weight_path_list,
lvl_category_dict = lvl_category_dict)
summary.iloc[0,0] = model_aka
summary
# +
print('%7s & %7s & %12s & %s' % ('Model', 'PhyloTree', 'True (Total)', ' & '.join(summary.columns[4:])))
print('---------------------------------------------------------------------------------------------------------------')
for i in range(summary.shape[0]):
print('%10s & %7s & %7d (%d) & ' % tuple(summary.iloc[i,:4]) + ' &'.join(['%6.3f' % val for val in summary.iloc[i,4:]]) + ' \\\\')
# if save:
# # filenametexa = '.'.join(["%s_select_texa_1" % filename.split('.')[0], filename.split('.')[1]])
# colname = ['Tree','True (Total)','Selected','Sensitivity','Specificity','gMeasure','Accuracy']
# with open('%s/%s' % (analysis_dir, filename), mode='a') as f:
# # f.write('---\ntitle: "%s texa selection ver.1"\noutput: html_document\n---\n\n' % filename.split('.')[0])
# f.write('\n## Texa Selection Preformance (ver 1): %s\n\n' % model_aka)
# f.write('| %s |\n' % ('|'.join([v for v in colname])))
# f.write('|'+'---|'*len(colname)+'\n')
# for value in values:
# f.write('| %s |\n' % ('|'.join(value)))
# -
# ## DeepBiome
# +
num=2
model_path = models[num]
model_aka = models_aka[num]
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
try: path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
except: pass
try: path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
except: pass
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
path_info['model_info']['model_dir'] = './%s/%s'%(model_path,path_info['model_info']['model_dir'])
log.info('%22s : %s' % ('model', model_path))
log.info('%22s : %s' % ('model_aka', model_aka))
for k in architecture_keys:
log.info('%22s : %s' % (k, network_info['architecture_info'].get(k, None)))
for k in network_model_keys:
log.info('%22s : %s' % (k, network_info['model_info'].get(k, None)))
for k in network_training_keys:
log.info('%22s : %s' % (k, network_info['training_info'].get(k, None)))
# -
# ### Performance
tw_1 = np.load('%s/tw_1.npy' % path_info['data_info']['data_path'])
tw_2 = np.load('%s/tw_2.npy' % path_info['data_info']['data_path'])
tw_3 = np.load('%s/tw_3.npy' % path_info['data_info']['data_path'])
tw_4 = np.load('%s/tw_4.npy' % path_info['data_info']['data_path'])
true_tree_weight_list = []
for fold in range(kfold):
true_tree_weight_list.append(np.array([tw_1[fold],tw_2[fold],tw_3[fold],tw_4[fold]]))
# true_tree_weight_list = np.array(true_tree_weight_list)
# np.save('../deepbiome/tests/data/true_weight_list.npy', true_tree_weight_list)
trained_weight_path_list = ['%s/weight/weight_%d.h5' % (path_info['model_info']['model_dir'], i) for i in range(kfold)]
lvl_category_dict = np.load('%s/lvl_category.npy' % '/'.join(path_info['data_info']['tree_info_path'].split('/')[:-1]),
allow_pickle=True)
summary = deepbiome_taxa_selection_performance(log, network_info, path_info, num_classes,
true_tree_weight_list, trained_weight_path_list,
lvl_category_dict = lvl_category_dict)
summary.iloc[0,0] = model_aka
summary
# +
print('%7s & %7s & %12s & %s' % ('Model', 'PhyloTree', 'True (Total)', ' & '.join(summary.columns[4:])))
print('---------------------------------------------------------------------------------------------------------------')
for i in range(summary.shape[0]):
print('%10s & %7s & %7d (%d) & ' % tuple(summary.iloc[i,:4]) + ' &'.join(['%6.3f' % val for val in summary.iloc[i,4:]]) + ' \\\\')
# if save:
# # filenametexa = '.'.join(["%s_select_texa_1" % filename.split('.')[0], filename.split('.')[1]])
# colname = ['Tree','True (Total)','Selected','Sensitivity','Specificity','gMeasure','Accuracy']
# with open('%s/%s' % (analysis_dir, filename), mode='a') as f:
# # f.write('---\ntitle: "%s texa selection ver.1"\noutput: html_document\n---\n\n' % filename.split('.')[0])
# f.write('\n## Texa Selection Preformance (ver 1): %s\n\n' % model_aka)
# f.write('| %s |\n' % ('|'.join([v for v in colname])))
# f.write('|'+'---|'*len(colname)+'\n')
# for value in values:
# f.write('| %s |\n' % ('|'.join(value)))
| examples/analysis-simulation_s5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tinydata] *
# language: python
# name: conda-env-tinydata-py
# ---
import pandas as pd
from pandas import Series, DataFrame
import re
import numpy as np
google_rumors=pd.read_csv('../data/Google_rumors.csv')
google_rumors = google_rumors.drop(['title','link','visible_link','date','rank'],axis=1)
google_rumors.summary = google_rumors.summary.str.replace(r'.*?年.*?月.*?日\s*\-\s','')
google_rumors
# result = pd.DataFrame()
google_rumors_result = []
labels = ["contradiction", "entailment", "neutral"]
for value in google_rumors.values:
# for sentence in re.findall(r'(.*?)。',value[1]):
sentence = value[1]
sentence = re.sub(r'[^。].*?','',sentence)
# sentence = '' if '...' in sentence else sentence
sentence = '' if '没有此网页的信息' in sentence else sentence
sentence = sentence.strip()
if sentence:
google_rumors_result += [[
'zh',
labels[value[2]],
sentence,
value[0],
]]
# + jupyter={"outputs_hidden": true}
google_rumors_result
# +
# DXY RUMOR
# -
dxy_rumors=pd.read_csv('../data/DXY_rumors.csv')
dxy_rumors = dxy_rumors.drop(['_id','id','mainSummary','summary','sourceUrl','crawlTime'],axis=1)
dxy_rumors.columns = ['keyword', 'summary','label']
dxy_rumors
# result = pd.DataFrame()
dxy_rumors_result = []
labels = ["contradiction", "entailment", "neutral"]
for value in dxy_rumors.values:
sentence = value[1]
# sentence = '' if '...' in sentence else sentence
sentence = '' if '没有此网页的信息' in sentence else sentence
sentence = sentence.strip()
if sentence:
dxy_rumors_result += [[
'zh',
labels[value[2]],
sentence,
value[0],
]]
# + jupyter={"outputs_hidden": true}
dxy_rumors_result
# -
# +
# DXY News
# -
dxy_news=pd.read_csv('../data/DXY_news.csv')
dxy_news = dxy_news.drop(['_id','id','pubDate','infoSource','sourceUrl','provinceId','crawlTime','entryWay','infoType','dataInfoState','dataInfoOperator','dataInfoTime','provinceName','createTime','modifyTime','adoptType','body'],axis=1)
dxy_news.columns = ['keyword', 'summary']
dxy_news['label'] = 1
dxy_news
# result = pd.DataFrame()
dxy_news_result = []
labels = ["contradiction", "entailment", "neutral"]
for value in dxy_news.values:
for sentence in re.findall(r'(.*?)。',value[1]):
sentence = re.sub(r'[[^\u4e00-\u9fa5]]','',sentence)
sentence = '' if '...' in sentence else sentence
sentence = '' if '例' in value[0] else sentence
sentence = '' if '例' in sentence else sentence
sentence = '' if '没有此网页的信息' in sentence else sentence
sentence = sentence.strip()
if sentence:
dxy_news_result += [[
'zh',
labels[value[2]],
sentence,
value[0],
]]
# + jupyter={"outputs_hidden": true}
dxy_news_result
# -
data = pd.DataFrame(
google_rumors_result+dxy_news_result
)
data.to_csv('../data/trainn.tsv', sep='\t',index=False)
# rumor_nli = pd.DataFrame(dxy_rumors_result)
# rumor_nli.to_csv('../data/test.tsv', sep='\t',index=False)
# result = google_rumors_result+dxy_news_result
num_train = [int(0.8*len(google_rumors_result)), int(0.8*len(dxy_news_result))]
data_train = pd.DataFrame(
google_rumors_result[0:num_train[0]]+
dxy_news_result[0:num_train[1]]
).sample(frac=1)
data_test = pd.DataFrame(
google_rumors_result[num_train[0]+1:len(google_rumors_result)]+
dxy_news_result[num_train[1]+1:len(dxy_news_result)]
).sample(frac=1)
data_train.to_csv('../data/train.tsv', sep='\t',index=False)
data_test.to_csv('../data/test.tsv', sep='\t',index=False)
data_train
pd.where
train_data=pd.read_csv('../data/train.tsv', sep='\t')
train_data.to_csv('../data/train.tsv', sep='\t',index=False)
import re
re.sub(r'[^\u4e00-\u9fa5]','','dsajhdasDAs阿斯顿')
| notebook/nli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## listas, tuplas y conjuntos
#
# Es un tipo de dato que se crea a partir de otros de una manera secuenciada, por ejemplo, si queremos la lista con los digitos debemos escribir como sigue;
#
# ```Python
# Lista=[El1,El2,...,Eln]
# ```
#
# por ejemplo, la lista de digitos seria la siguiente:
Digitos=[0,1,2,3,4,5,6,7,8,9]
type(Digitos)
# si quiero saber cuantos elementos tiene la lista entonces utilizo `len`
len(Digitos)
# Recordemos que en los textos podiamos determinar el caracter n-esimo del tecto de la siguiente forma
texto='hola'
texto[1]
texto[3]
# en las listas podemos hacer algo similar
Digitos
Digitos[5]
Lista2=['Uno','Dos','Tres','Cuatro','Cinco','Seis','Siete','Ocho','Nueve','Cero']
Lista2[3]
# Observe qu el elemento n de la lista es el elemento n+1-esimo pues Python cuenta desde cero.
# El primer elemento
print(Digitos[0])
print(Lista2[0])
# el ultimo elemento
print(Digitos[9])
print(Lista2[9])
# En este ultimo ejercicio consultamos el ultimo elemento, en Python lo podemos hacer usando indices negativos (indice: el valor que hace referencia al n+1-esimo elemnto de la lista, el valor de los parentesis cuadrados que permite acceder a los elementos dentr de la lista)
Digitos[-1]
Digitos[-2]
# Los indices negativos permiten que se corra la lista de forma invertida
Digitos[-10]
# invertir lista
Digitos[::-1]
Digitos.reverse()
# podemos usar un metodo de listas llamado `'.reverse()'`
Digitos
Digitos.reverse()
Digitos
# Podemos contar las veces que aparece un elemento en una lista
Digitos.count(4) #tab para ver opciones
Lista3=[1,2,2,3,3,3,4,4,4,4]
Lista3[5]
Lista3.append(5)
Lista3
# al final agrega el nuevo digito con `append` si quiero agregarlo al inicio utilizo `insert`
Lista3.insert(3,10)
Lista2
Lista2.insert(5,'hola')
Lista2
Lista2.remove('hola')
Lista2
esta funcion solo quita el primer elemento
Lista3.remove(4)
Lista3
# PARA ELIMINAR TODOS SE USA CLEAR
# El metodo `pop` remueve atraves del indice
Lista2+Lista3
# con el operador + se concatenan listas, tambien se puede usar el metodo `extend`
Lista2.extend(Lista3)
Lista2
# # Condicional
# es una estructura de control que permite ejecutar una subcortina segun se satisfaga una condicion inicial:
# 'Si__________ condicion____ entonces_________resultado__'
if 4>5:
print('Cuatro es mayor que cinco')
if 4<5:
print('Cuatro es menor que cinco')
# Un iterable especial:
# No todas las secuencias (iterables) se definen como listas tambien se puede utilizar funciones previas que generan listas secuenciadas de manera automatica
# # Bucle
# Un bucle es una estructura especial que permite recorrer los elementos de un iterable, en particular podemos recorrer los elementos de una lista:
# ```Python
# for i in Lista:
# ejercicio_i
# resp
# fin
Lista2[:8]
for i in Lista2:
print(i.replace('o','a'))
print('----')
list(range(10))
# `range (m,n,p)` es un objeto especial de python lo convertiremos en lista con list para entender que contiene
range(10) #range(0,n) genera una lista de enteros ordenada iniciando en 0 terminando en n-1
list(range(10))
list(range(2,10)) #range(m,n) genera una lista de enteros ordenada iniciando en m terminando en n-1 y en saltos de a p
list(range(2,10,2))
list(range(2,10,5))
lista_100=range(1,101)
lista_100
# Usando `for` podemos hacer cosas interesantes
for i in range(1,101):
if i%6==0:
print(i) ##el espacio es la identacion, python tenemos una linea lider y una esclava, una depende de la otra
for i in range(1,21):
print('El numero que vamos a estudiar es',i)
print(i,'es un numero que al dividirse por 3 tiene residuo',i%3)
if i%6==0:
print('Este numero es especial porque es un multiplo de 3')
print('***********************************************************')
print()
print()
| clase6.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .ts
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: TypeScript
// language: typescript
// name: tslab
// ---
// # Getting started with tslab
//
// This notebook show the basic usage of tslab with TypeScript.
//
// ## More examples
//
// After you go through this notebook, see these notebooks to understand advanced topics.
//
// - [Getting started for JavaScript users](getting_started_javascript.ipynb)
// - [Deep learning in JavaScript with TensorFlow.js](tensorflow.ipynb)
// ## Writing and running your code interactively
//
// - Write and run TypeScript interactively.
// - To complete code, press `Tab`.
// - To inspect code, press `Shift-Tab`.
// +
import * as tslab from 'tslab';
console.log('Hello, tslab!')
console.log('Versions:', tslab.versions)
// -
/** naiveFib calculates Fibonacci number in a naive way */
function naiveFib(n: number): number {
if (n > 1) {
return naiveFib(n - 1) + naiveFib(n - 2);
}
return 1;
}
{
// This is 40x slower if you use Python.
const n = 40;
const start = Date.now()
console.log(`naiveFib(${n}) = ${naiveFib(n)} (took ${Date.now() - start}ms)`)
}
// # class and interface
//
// You can use all TypeScript features including `class` and `interface` with the power of type-safety.
// +
interface Hello {
getMessage(): string;
}
class HelloImpl implements Hello {
private name: string;
constructor(name: string) {
this.name = name;
}
getMessage(): string {
return `Hello, ${this.name}!`;
}
}
function printMessage(h: Hello) {
console.log(h.getMessage());
}
printMessage(new HelloImpl('tslab'));
// -
// ## Promise and async await
//
// tslab supports `async` and `await` from ES2017. tslab also supports top-level `await`.
// You can call asynchronous functions easily in tslab.
let sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
const start = Date.now();
// Top-level await.
await sleep(500);
`Slept ${Date.now() - start}[ms]`
// If you don't `await`, tslab shows `Promise` immediately.
sleep(500);
// ## display
//
// To display non-plain text data, use `tslab.display`.
import fs from "fs";
import * as tslab from "tslab";
// ### display HTML
tslab.display.html('Hello <span style="color:#007ACC;font-size:x-large"><b>Type</b>Script</span>!')
// ### display images
tslab.display.jpeg(fs.readFileSync('images/nodejs-new-pantone-black.jpg'))
// ### animation
//
// `tslab.newDisplay` creates a new `Display` instance to display and update rich contents.
{
let display = tslab.newDisplay();
for (let progress = 1; progress <= 100; progress++) {
await sleep(20);
display.text(`progress: ${progress}%`);
}
}
// ## Supported JavaScript/TypeScript language features
//
// `tslab` uses `TypeScript 3.7` internally. You can use all of latest JavaScript language features and even the latest features supported from `TypeScript 3.7`.
//
// ### ES2015 (ES6)
//
// Demostrations of selected [ES2015 features](https://babeljs.io/docs/en/learn/).
{
// class
class MyObj {
abc: number;
xyz: string;
constructor(abc: number, xyz: string) {
this.abc = abc;
this.xyz = xyz;
}
}
// const and let
const obj = new MyObj(123, 'hello');
// Destructuring
let {abc, xyz: klm} = obj;
// Enhanced Object Literals
console.log({abc, klm});
// Spread
let [x, y, z] = [1, 2, 3];
console.log({x, y, z})
// Generator and for-of
let array: number[] = [];
for (let e of (function*(n: number) {
for (let i = 0; i < n; i += 2) {
yield i;
}
})(10)) {
array.push(e * e);
}
console.log('array =', array);
}
// ### ES2017
//
// tslab supports `async` and `await` from ES2017. tslab also supports top-level `await`.
// You can call asynchronous functions easily in tslab.
let fn = async () => {
let sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
const start = Date.now();
await sleep(500);
return `Slept ${Date.now() - start}[ms]`
}
await fn();
// ### ESNext
// TypeScript supports some advanced JavaScript features.
{
// 1. Optional Chaining
// https://devblogs.microsoft.com/typescript/announcing-typescript-3-7/#optional-chaining
let obj = {x: null};
console.log('obj?.x?.z ==', obj?.x?.z);
// 2. Nullish Coalescing
// https://devblogs.microsoft.com/typescript/announcing-typescript-3-7/#nullish-coalescing
let x = null, y = 0, z = 'hello';
console.log('x ?? y ?? z ==', x ?? y ?? z);
}
| notebooks/getting_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this project, we'll work with data from the [CIA World Factbook](https://www.cia.gov/library/publications/the-world-factbook/), a compendium of statistics about all of the countries on Earth. The Factbook contains demographic information like:
#
# - `population` - The population as of 2015.
# - `population_growth` - The annual population growth rate, as a percentage.
# - `area` - The total land and water area.
# In this guided project, we'll use SQL in Jupyter Notebook to explore and analyze data from this database. If you want to work on this project in your computer, you can download the [SQLite factbook.db database](https://dsserver-prod-resources-1.s3.amazonaws.com/257/factbook.db)
#
# We'll use the following code to connect our Jupyter Notebook to our database file:
# %%capture
# %load_ext sql
# %sql sqlite:///factbook.db
# To run SQL queries in this project we add `%%sql` on its own line to the start of our query. So to execute the query above, we'll use this code:
# + language="sql"
# SELECT *
# FROM sqlite_master
# WHERE type='table';
# -
# Here are the descriptions for some of the columns:
#
# - `name` - The name of the country.
# - `area` - The total land and sea area of the country.
# - `population` - The country's population.
# - `population_growth`- The country's population growth as a percentage.
# - `birth_rate` - The country's birth rate, or the number of births a year per 1,000 people.
# - `death_rate` - The country's death rate, or the number of death a year per 1,000 people.
# - `area`- The country's total area (both land and water).
# - `area_land` - The country's land area in [square kilometers](https://www.cia.gov/library/publications/the-world-factbook/rankorder/2147rank.html).
# - `area_water` - The country's waterarea in square kilometers.
# + language="sql"
# SELECT *
# FROM facts
# LIMIT 5;
# -
# #### Summary Statistics
#
# Let's start by calculating some summary statistics and look for any outlier countries.
#
# We'll Write a single query that returns the:
#
# - Minimum population
# - Maximum population
# - Minimum population growth
# - Maximum population growth
# + language="sql"
# SELECT MIN(population) AS 'Minimum_population',
# MAX(population) AS 'Maximum_population',
# MIN(population_growth) AS 'Minimum_population_growth',
# MAX(population_growth) AS 'Maximum_population_growth'
# FROM facts
# -
# A few things stick out from the summary statistics in the last screen:
#
# - There's a country with a population of `0`
# - There's a country with a population of `7256490011` (or more than 7.2 billion people)
#
# Let's use subqueries to zoom in on just these countries without using the specific values.
#
# Specifically, we will:
#
# 1. Write a query that returns the countrie(s) with the minimum population.
# 2. Write a query that returns the countrie(s) with the maximum population.
# + language="sql"
# SELECT name
# FROM facts
# WHERE population = (SELECT MIN(population)
# FROM facts)
# + language="sql"
# SELECT name
# FROM facts
# WHERE population = (SELECT MAX(population)
# FROM facts)
# -
# It seems like the table contains a row for the whole world, which explains the population of over 7.2 billion. It also seems like the table contains a row for Antarctica, which explains the population of 0. This seems to match the CIA Factbook [page for Antarctica](https://www.cia.gov/library/publications/the-world-factbook/geos/ay.html):
# 
# Now that we know this, we should recalculate the summary statistics we calculated earlier, while excluding the row for the whole world and the row for antarctica
# + language="sql"
# SELECT MIN(population) AS 'Minimum_population',
# MAX(population) AS 'Maximum_population',
# MIN(population_growth) AS 'Minimum_population_growth',
# MAX(population_growth) AS 'Maximum_population_growth'
# FROM facts
# WHERE (population <> (SELECT MAX(population)
# FROM facts)) AND (population <> (SELECT MIN(population)
# FROM facts))
# -
# Above, we find that the the minimum population is of a country in the dataset is `48` while the maximum is `1,367,485,388` (Over 1 billion!)
#
# #### Finding the average world population
#
# Next we'll calculate the average value for the following columns:
#
# - `population`
# - `area`
#
# We should take care of discarding the row for the whole planet and antarctica
# + language="sql"
# SELECT AVG(population) AS 'Average world population',
# AVG(area) AS 'Average world area'
# FROM facts
# WHERE (population <> (SELECT MAX(population)
# FROM facts)) AND (population <> (SELECT MIN(population)
# FROM facts))
# -
# #### Finding the densely populated countries
#
# To finish, we'll build on the query we wrote for the previous query to find countries that are densely populated. We'll identify countries that have:
#
# - Above average values for population.
# - Below average values for area.
#
# Specifically, We'll Write a query that finds all countries meeting both of the following conditions:
#
# - The `population` is above average.
# - The `area` is below average.
#
# The above parameters determines the density of a location
# + language="sql"
# SELECT *
# FROM facts
# WHERE population > (SELECT AVG(population)
# FROM facts
# WHERE population <> (SELECT MAX(population)
# FROM facts)
# )
#
# AND area < (SELECT AVG(area)
# FROM facts
# WHERE population <> (SELECT MAX(population)
# FROM facts)
# )
# -
# Some of these countries are generally known to be densely populated, so we have confidence in our results!
#
# That's it for the guided steps. Here are some next steps for you to explore:
#
# - What country has the most people? What country has the highest growth rate?
# - Which countries have the highest ratios of water to land? Which countries have more water than land?
# - Which countries will add the most people to their population next year?
# - Which countries have a higher death rate than birth rate?
# - What countries have the highest `population/area` ratio and how does it compare to list we found previously?
| Analytics_Projects/Analyzing_CIA_Factbook_Data_Using_SQL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Distances & Algo
import Levenshtein
Levenshtein.distance('saturday','sunday')
all_words =['car','hexadecimal','fridge','chair','water',
'pickle','pizza','burger','Register','mobile',
'utensils','painting','curtains','window','pencil']
def auto_correct(word):
distances = []
for i in all_words:
distances.append(Levenshtein.distance(i,word))
return all_words[distances.index(min(distances))]
auto_correct('frdg')
auto_correct('pncl')
| Levenshtein_distance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Distributed PyTorch with Horovod
# In this tutorial, you will train a PyTorch model on the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset using distributed training via [Horovod](https://github.com/uber/horovod) across a GPU cluster.
# ## Prerequisites
# * If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`
# * Review the [tutorial](../train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) on single-node PyTorch training using Azure Machine Learning
# +
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
# -
# ## Diagnostics
# Opt-in diagnostics for better experience, quality, and security of future releases.
# + tags=["Diagnostics"]
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics=True)
# -
# ## Initialize workspace
#
# Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
# +
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
# -
# ## Create or attach existing AmlCompute
# You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.
#
# **Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.
#
# As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "gpucluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target.')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# use get_status() to get a detailed status for the current AmlCompute.
print(compute_target.get_status().serialize())
# -
# The above code creates GPU compute. If you instead want to create CPU compute, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`.
# ## Train model on the remote compute
# Now that we have the AmlCompute ready to go, let's run our distributed training job.
# ### Create a project directory
# Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.
# +
import os
project_folder = './pytorch-distr-hvd'
os.makedirs(project_folder, exist_ok=True)
# -
# ### Prepare training script
# Now you will need to create your training script. In this tutorial, the script for distributed training of MNIST is already provided for you at `pytorch_horovod_mnist.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.
#
# However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.
#
# To do so, in `pytorch_horovod_mnist.py`, we will first access the Azure ML `Run` object within the script:
# ```Python
# from azureml.core.run import Run
# run = Run.get_context()
# ```
# Later within the script, we log the loss metric to our run:
# ```Python
# run.log('loss', loss.item())
# ```
# Once your script is ready, copy the training script `pytorch_horovod_mnist.py` into the project directory.
# +
import shutil
shutil.copy('pytorch_horovod_mnist.py', project_folder)
# -
# ### Create an experiment
# Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed PyTorch tutorial.
# +
from azureml.core import Experiment
experiment_name = 'pytorch-distr-hvd'
experiment = Experiment(ws, name=experiment_name)
# -
# ### Create a PyTorch estimator
# The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch).
# +
from azureml.core.runconfig import MpiConfiguration
from azureml.train.dnn import PyTorch
estimator = PyTorch(source_directory=project_folder,
compute_target=compute_target,
entry_script='pytorch_horovod_mnist.py',
node_count=2,
distributed_training=MpiConfiguration(),
use_gpu=True)
# -
# The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters.
# ### Submit job
# Run your experiment by submitting your estimator object. Note that this call is asynchronous.
run = experiment.submit(estimator)
print(run)
# ### Monitor your run
# You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. You can see that the widget automatically plots and visualizes the loss metric that we logged to the Azure ML run.
# +
from azureml.widgets import RunDetails
RunDetails(run).show()
# -
# Alternatively, you can block until the script has completed training before running more code.
run.wait_for_completion(show_output=True) # this provides a verbose log
| code/2.distributed-pytorch-with-horovod.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## String representation
#
# * By default, `str()` simply calls `repr()`
# * But `repr()` does not call `str()`
# * By default, `__format__()` calls `__str__()`
#
# ### repr
#
# * Exactness is more important than human-friendliness
# * Suited for debugging
# * Includes identifying information
# * Generally best for logging
#
# ### str
#
# * Human-friendly representation of and object
# * Suited to display information to the user
# * Does not include identifying information
#
# ### format
#
# * Used in the string's format method
# * Receives a parameter with the format specification
class Point2D:
def __init__(self, x, y):
self._x = x
self._y = y
def __str__(self):
return '({}, {})'.format(self._x, self._y)
def __repr__(self):
return 'Point2D(x={},y={})'.format(self._x, self._y)
def __format__(self, f):
return '[Formatted point: {}, {}, {}]'.format(self._x, self._y, f)
p = Point2D(x=23, y=42)
str(p)
repr(p)
print('{}'.format(p))
print('{:r4e}'.format(p))
| String_representation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/fridaruh/Curso_Intro_AI_Crehana/blob/master/Plantilla_PLN_Spotify.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="YNtf59igZYK4"
# Importa el archivo de datos de Spotify
# + id="A4zPEj3qY-Te"
# + [markdown] id="Yp4SOjcQGtXl"
# Visualiza los primeros 5 registros
# + id="OKtSg-yiZrZy"
# + [markdown] id="hR2rjfqjZ7-8"
# Convierte los tweets a listas
# + id="aZ6Q5lpQZstP"
# + [markdown] id="S00dAjUwHBPz"
# Ejecuta el patrón
# + id="YERcfSWNaCLW"
pattern = r'''(?x) # Flag para iniciar el modo verbose
(?:[A-Z]\.)+ # Hace match con abreviaciones como U.S.A.
| \w+(?:-\w+)* # Hace match con palabras que pueden tener un guión interno
| \$?\d+(?:\.\d+)?%? # Hace match con dinero o porcentajes como $15.5 o 100%
| \.\.\. # Hace match con puntos suspensivos
| [][.,;"'?():-_`] # Hace match con signos de puntuación
'''
# + [markdown] id="XJuaJzStHDGR"
# Importa nltk, y el tokenizador
# + id="wQ1R3esHacVp"
# + [markdown] id="XR-RT4YBHHKW"
# Genera un vector vacío y un ciclo for que:
#
# * Conviérta a minúsculas las palabras
# * Tokenize los tweets y los pase por el patrón
# * Pegue los tokens en el vector vacío
#
#
#
#
# + id="8w5AxkqObNJA"
# + [markdown] id="pBk1yCeNHcGe"
# "Aplana" la lista de listas para que se convierta a **una sola** lista
# + id="ef-V26M_bnFW"
# + [markdown] id="M8X1BLfxHian"
# Importa la librería de string y convierte los signos de puntuación a una lista
# + id="OHrob5uKb0SP"
# + [markdown] id="34eWDfWkHoix"
# Descarga la lista de stopwords en español
# + id="der1Z0BNcXgW"
# + [markdown] id="7xFVy1v1HsgM"
# Genera una nueva variable donde pase por un ciclo for para eliminar las stop words
# + id="BPQ2s5nFcx_f"
# + [markdown] id="lh9m3YcRH0FH"
# Genera una nueva variable donde pase por un ciclo for para eliminar los signos de puntuación
# + id="fdZr3c4Sc__W"
# + [markdown] id="pwS9_iwHH4GS"
# Calcula con la función FreqDist la frecuencia de las palabras y almacenalos en una variable
# + id="AeJqPmEQdFiV"
# + [markdown] id="oEJ2kXpUH-ZK"
# Imprime las 20 palabras más comunes
# + id="smu5U9XCdM99"
# + [markdown] id="I36KAFENICML"
# Si crees que es necesario genera una lista de palabras a omitir
# + id="QD9SSIk8dQ2B"
# + [markdown] id="ocI2q0n9IGip"
# Corre un ciclo for para omitir las palabras de nuestra lista
# + id="omnXYfMSdeVW"
# + [markdown] id="jWJ36SXWIKU7"
# Calcula nuevamente las frecuencias de las palabras
# + id="5up2DDpvdkW2"
# + [markdown] id="PeUUE5qJIN_I"
# Imprime las 20 palabras más comunes
# + id="Mpfuj9mCdneH"
# + [markdown] id="tp9XjXubISRX"
# Importa las librerías para generar la nube de palabras y poder visualizarlas
# + id="GUC2ZRyGdo8m"
# + [markdown] id="8QOZAJlXIW1g"
# Genera la gráfica de la nube de palabras
# + id="NO4PNlhgeRyt"
| Plantilla_PLN_Spotify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What is a Jupyter notebook?
#
# #### Application for creating and sharing documents that contain:
# - live code
# - equations
# - visualizations
# - explanatory text
#
# Home page: http://jupyter.org/
# # Notebook tutorials
# - [Quick Start Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/)
# - [User Documentation](http://jupyter-notebook.readthedocs.io/en/latest/)
# - [Examples Documentation](http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/examples_index.html)
# - [Cal Tech](http://bebi103.caltech.edu/2015/tutorials/t0b_intro_to_jupyter_notebooks.html)
# # Notebook Users
# - students, readers, viewers, learners
# - read a digital book
# - interact with a "live" book
# - notebook developers
# - create notebooks for students, readers, ...
# # Notebooks contain cells
# - Code cells
# - execute computer code (Python, or many other languages)
# - Markdown cells
# - documentation, "narrative" cells
# - guide a reader through a notebook
# # Following cells are "live" cells
print ("Hello Jupyter World!; You are helping me learn")
(5+7)/4
import numpy as np
my_first_array = np.arange(11)
print (my_first_array)
| numpy-data-science-essential-training/Ex_Files_NumPy_Data_EssT/Exercise Files/Ch 1/dashboard/.ipynb_checkpoints/Introduction to Jupyter Notebooka-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import Modules and Data
# +
import pickle
import pandas as pd
import nltk
from nltk import sent_tokenize
from nltk.tokenize import MWETokenizer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from project4 import processing, display_topics, inertia_curve, display_cluster, pickle_stuff, group_by_sentiment
# +
filepath = '/Users/tim/src/Metis/Project_4/data/interim/aloha_reviews2.pkl'
with open(filepath, 'rb') as pkl:
df = pickle.load(pkl)
pd.options.display.max_colwidth = 1000
# -
raw_reviews = df['text']
# ### Clean Data
processed_text = processing(raw_reviews)
filepath = '/Users/tim/src/Metis/Project_4/data/interim/processed_text.pkl'
with open(filepath, 'wb') as pkl:
pickle.dump(processed_text, pkl)
# +
# filepath = '/Users/tim/src/Metis/Project_4/data/interim/processed_text.pkl'
# with open(filepath, 'rb') as pkl:
# processed_text = pickle.load(pkl)
# -
# ### Tokenize
mwe_tokenizer = MWETokenizer([('customer','service'), ('hard', 'to'), ('service', 'calls'),
('over', 'seas'), ('follows', 'up'), ('user', 'friendly'), ('long','time'),
('front', 'of', 'house'), ('back', 'of', 'house'), ('behind', 'the', 'times'),
('out', 'of', 'date'), ('easy', 'to')])
sent_token = mwe_tokenizer.tokenize(sent_tokenize(processed_text))
len(sent_token)
# ### Vectorize Data
stopset = set(nltk.corpus.stopwords.words('english'))
new_list= ('pos', 'aloha')
stopset.update(new_list)
#stopset
# #### Unigrams
# +
cv = CountVectorizer(stop_words=stopset)
cv_tfidf = TfidfVectorizer(stop_words=stopset)
x_cv = cv.fit_transform(sent_token)
x_tfidf = cv_tfidf.fit_transform(sent_token).toarray()
cv_vector = pd.DataFrame(x_cv.toarray(), columns=cv.get_feature_names())
tf_vector = pd.DataFrame(x_tfidf,columns=cv_tfidf.get_feature_names())
# -
vectors = {}
vectors['cv_vector'] = cv_vector
vectors['tf_vector'] = tf_vector
pickle_stuff(vectors)
# #### Bigrams
gram2_cv = CountVectorizer(ngram_range=(2, 2), stop_words = stopset)
x_gram2_cv = gram2_cv.fit_transform(sent_token)
gram2_cv_vector = pd.DataFrame(x_gram2_cv.toarray(), columns=gram2_cv.get_feature_names())
gram2_tf = TfidfVectorizer(ngram_range=(2, 2), stop_words = stopset)
x_gram2_tf = gram2_tf.fit_transform(sent_token)
gram2_tf_vector = pd.DataFrame(x_gram2_tf.toarray(), columns=gram2_tf.get_feature_names())
vectors = {}
vectors['gram2_cv_vector'] = gram2_cv_vector
vectors['gram2_tf_vector'] = gram2_tf_vector
pickle_stuff(vectors)
# #### Trigrams
gram3_cv = CountVectorizer(ngram_range=(3, 3), stop_words = stopset)
x_gram3_cv = gram3_cv.fit_transform(sent_token)
gram3_cv_vector = pd.DataFrame(x_gram3_cv.toarray(), columns=gram3_cv.get_feature_names())
gram3_tf = TfidfVectorizer(ngram_range=(3, 3), stop_words = stopset)
x_gram3_tf = gram3_tf.fit_transform(sent_token)
gram3_tf_vector = pd.DataFrame(x_gram3_tf.toarray(), columns=gram3_tf.get_feature_names())
vectors = {}
vectors['gram3_cv_vector'] = gram3_cv_vector
vectors['gram3_tf_vector'] = gram3_tf_vector
pickle_stuff(vectors)
| Notebooks/2_clean_and_tokenize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
import os
import sys
import pandas as pd
import muscope_loader
# +
print(sys.modules['muscope_loader'].__file__)
muscope_loader_dp = os.path.dirname(sys.modules['muscope_loader'].__file__)
downloads_dp = os.path.join(muscope_loader_dp, 'downloads')
hl2a_18Sdiel_xls_fp = os.path.join(downloads_dp, 'Caron_HL2A_18Sdiel_seq_attrib_v2.xls')
print(hl2a_18Sdiel_xls_fp)
os.path.exists(hl2a_18Sdiel_xls_fp)
# -
readme_block_2_df = pd.read_excel(
hl2a_18Sdiel_xls_fp,
sheet_name='README',
header=22,
index_col='Cast #',
skip_rows=range(23),
skip_footer=5,
usecols=range(9, 17)
)
readme_block_2_df.head()
core_attr_plus_data_df = pd.read_excel(
hl2a_18Sdiel_xls_fp,
sheet_name='core attributes + data',
skiprows=(0,2)
)
# column 10 header is on the wrong line
column_10_header = core_attr_plus_data_df.columns[9]
core_attr_plus_data_df.rename(columns={column_10_header: 'seq_name'}, inplace=True)
core_attr_plus_data_df.head()
core_attr_plus_data_df = core_attr_plus_data_df.assign(
file_=[os.path.join('/iplant', s) for s in core_attr_plus_data_df.seq_name])
core_attr_plus_data_df.head()
import datetime
core_attr_plus_data_df.collection_date[0]
core_attr_plus_data_df.collection_time[0]
core_attr_plus_data_df.collection_time[0].isoformat(timespec='seconds')
for r, row in core_attr_plus_data_df.iterrows():
print(row.collection_time)
break
core_attr_plus_data_df.station = [ (s if isinstance(s, float) else float(s[1:])) for s in core_attr_plus_data_df.station]
core_attr_plus_data_df.head()
| muscope/cruise/notebooks/parse_hl2a_xls.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Depth Controller
# ## Dynamic model based on BlueROV2 Heavy using the Fossen's Model
# <img src="bluerov.png" style="width:550px;height:270px" />
# ## **Control Strategy**
# <img src="strategy.png" style="width:600px;height:400px"/>
# ## **Code**
import numpy as np
import yaml
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
# +
##Global
m = 11.5 #kg
W = 112.8 #N
B = 114.8 #N 114.8
Zg = 0.02 #m
Ix = 0.16 #kg*m^2
Iy = 0.16 #kg*m^2
Iz = 0.16 #kg*m^2
##Added Mass
X_du = -5.5 #kg
Y_dv = -12.7 #kg
Z_dw = -14.57 #kg
K_dp = -0.12 #kg*m^2/rad
M_dq = -0.12 #kg*m^2/rad
N_dr = -0.12 #kg*m^2/rad
##Linear Damping
Xu = -4.03 #Ns/m
Yv = -6.22 #Ns/m
Zw = -5.18 #Ns/m
Kp = -0.07 #Ns/rad
Mq = -0.07 #Ns/rad
Nr = -0.07 #Ns/rad
##Quadratic Damping
Xuu = -18.18 #Ns^2/m^2
Yvv = -21.66 #Ns^2/m^2
Zww = -36.99 #Ns^2/m^2
Kpp = -1.55 #Ns^2/rad^2
Mqq = -1.55 #Ns^2/rad^2
Nrr = -1.55 #Ns^2/rad^2
MAX_Z_FORCE = 160.0
# -
class Controller:
"""PID controller."""
def __init__(self, Kp, Ki, Kd, origin_time=None):
if origin_time is None:
origin_time = 0
# Gains for each term
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.windup_guard = 0.0
# Corrections (outputs)
self.Cp = 0.0
self.Ci = 0.0
self.Cd = 0.0
self.previous_time = origin_time
self.sp_previous_time = origin_time
self.previous_error = 0.0
self.previous_setpoint = 0.0
self.previous_altitud_error = 0.0
self.previous_altitud = 0.0
def setWindup(self, windup):
self.windup_guard = windup
def Update(self, error, current_time):
"""Update PID controller."""
dt = current_time - self.previous_time
if dt <= 0.0:
return 0
de = error - self.previous_error
self.Cp = error
self.Ci += error * dt
self.Cd = de / dt
##Anti-windup
if (self.Ci < -self.windup_guard):
self.Ci = -self.windup_guard
elif (self.Ci > self.windup_guard):
self.Ci = self.windup_guard
#Update previous values
self.previous_time = current_time
self.previous_error = error
return (
(self.Kp * self.Cp) # proportional term
+ (self.Ki * self.Ci) # integral term
+ (self.Kd * self.Cd) # derivative term
)
def derivative_error(self, current_altitud_error, dt) :
""" Computes the angular position error from altitude error """
de = current_altitud_error - self.previous_altitud_error
self.previous_altitud_error = de
return (de/dt)
def altitud_controller(self, error, current_time, x_, v_):
##position
phi = x_[3]
theta = x_[4]
##velocity
w = v_[2]
c1 = np.cos(phi)
c2 = np.cos(theta)
control_signal = self.Update(error, current_time)
temp = -control_signal*(m - Z_dw)
tau_linear = temp - ((Zw + Zww*np.absolute(w))*w + (W-B)*c2*c1)
if tau_linear > MAX_Z_FORCE:
tau_linear = MAX_Z_FORCE
if tau_linear < -MAX_Z_FORCE:
tau_linear = -MAX_Z_FORCE
return tau_linear
class Bluerov:
"""Bluerov Class"""
def __init__(self, position, velocity, acceleration, tau):
self.x_ = position
self.v_ = velocity
self.acc = acceleration
self.tau = tau
self.T = np.array([[0.707, 0.707, -0.707, -0.707, 0, 0, 0, 0],
[-0.707, 0.707, -0.707, 0.707, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 1, 1, -1],
[0.06, -0.06, 0.06, -0.06, -0.218, -0.218, 0.218, 0.218],
[0.06, 0.06, -0.06, -0.06, 0.120, -0.120, 0.120, -0.120],
[-0.1888, 0.1888, 0.1888, -0.1888, 0, 0, 0, 0 ]])
T_trans = self.T.transpose()
temp = np.dot(self.T, T_trans)
temp = np.linalg.inv(temp)
self.T_plus = np.dot(T_trans, temp)
K = np.array([40, 40, 40, 40, 40, 40, 40, 40])
self.K_ = np.diag(K)
self.K_inv = np.linalg.inv(self.K_)
self.model(self.x_, self.v_, self.acc, self.tau)
def model(self, x_, v_, acc, tau) :
##velocity
u = v_[0]
v = v_[1]
w = v_[2]
p = v_[3]
q = v_[4]
r = v_[5]
##position
#x = x_(1); y = x_(2); z = x_(3);
phi = x_[3]
theta = x_[4]
psi = x_[5]
##Force/Torque
X = tau[0]
Y = tau[1]
Z = tau[2]
K = tau[3]
M = tau[4]
N = tau[5]
u_dot = acc[0]
v_dot = acc[1]
w_dot = acc[2]
p_dot = acc[3]
q_dot = acc[4]
r_dot = acc[5]
c1 = np.cos(phi)
c2 = np.cos(theta)
c3 = np.cos(psi)
s1 = np.sin(phi)
s2 = np.sin(theta)
s3 = np.sin(psi)
t2 = np.tan(theta)
##Resulting acceleration
u_dot = ((Xu + Xuu*np.absolute(u))*u -(Z_dw + m)*q*w -(W-B)*s2 - m*Zg*q_dot + X)/(m-X_du)
v_dot = ((Yv + Yvv*np.absolute(v))*v + (Z_dw + m)*p*w + X_du*u*r + (W-B)*c2*s1 + m*Zg*p_dot + Y)/(m - Y_dv)
w_dot = ((Zw + Zww*np.absolute(w))*w - (X_du - m)*q*u + (Y_dv - m)*p*v + (W-B)*c2*c1 + Z)/(m - Z_dw)
p_dot = ((Kp + Kpp*np.absolute(p))*p - (Y_dv - Z_dw)*w*v - (M_dq - N_dr)*r*q -(Iz - Iy)*r*q - Zg*W*c2*s1 + m*Zg*v_dot + K)/(m - Z_dw)
q_dot = ((Mq + Mqq*np.absolute(q))*q - (Z_dw - X_du)*u*w - (N_dr - K_dp)*p*r - (Ix-Iz)*p*r - Zg*W*s2 - m*Zg*u_dot + M)/(Iy-M_dq) #Iy-M_dq = 0.28
r_dot = ((Nr + Nrr*np.absolute(r))*r - (X_du - Y_dv)*u*v - (K_dp - M_dq)*p*q - (Iy - Ix)*p*q + N)/(Iz - N_dr)
acc_vec = np.array([u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])
return acc_vec
def thruster_system(self, u):
F = np.dot(self.K_, u)
tau = np.dot(self.T, F)
#print(tau)
return tau
def control_allocation(self, tau):
temp = np.dot(self.T_plus, tau)
u = np.dot(self.K_inv, temp)
return u
def kinematics(self, v, p) :
""" Given the current velocity and the previous position computes the p_dot """
roll = p[3]
pitch = p[4]
yaw = p[5]
rec = [np.cos(yaw)*np.cos(pitch), -np.sin(yaw)*np.cos(roll)+np.cos(yaw)*np.sin(pitch)*np.sin(roll), np.sin(yaw)*np.sin(roll)+np.cos(yaw)*np.cos(roll)*np.sin(pitch),
np.sin(yaw)*np.cos(pitch), np.cos(yaw)*np.cos(roll)+np.sin(roll)*np.sin(pitch)*np.sin(yaw), -np.cos(yaw)*np.sin(roll)+np.sin(pitch)*np.sin(yaw)*np.cos(roll),
-np.sin(pitch), np.cos(pitch)*np.sin(roll), np.cos(pitch)*np.cos(roll)]
rec = np.array(rec).reshape(3,3)
to = [1.0, np.sin(roll)*np.tan(pitch), np.cos(roll)*np.tan(pitch),
0.0, np.cos(roll), -np.sin(roll),
0.0, np.sin(roll)/np.cos(pitch), np.cos(roll)/np.cos(pitch)]
to = np.array(to).reshape(3,3)
p_dot = np.zeros(6)
p_dot[0:3] = np.dot(rec, v[0:3])
p_dot[3:6] = np.dot(to, v[3:6])
return p_dot
# Sensor simulation
def position_sensing(self, p_real):
pos_uncert_amplitude = 0.02 # 10m max error
orient_uncert_amplitude = 0.5 # 5degr max error
# No noise settings
#pos_uncert_amplitude = 0.0
#orient_uncert_amplitude = 0.0
p_noisy = np.zeros_like(p_real)
p_noisy[0:3] = p_real[0:3] + np.array([np.random.rand()*pos_uncert_amplitude,
np.random.rand()*pos_uncert_amplitude,
np.random.rand()*pos_uncert_amplitude])
p_noisy[3:7] = p_real[3:7] + np.array([np.random.rand()*np.pi/180.0*orient_uncert_amplitude,
np.random.rand()*np.pi/180.0*orient_uncert_amplitude,
np.random.rand()*np.pi/180.0*orient_uncert_amplitude])
return p_noisy
def velocity_sensing(self, v_real):
lin_vel_uncert_amplitude = 0.02 # 0.1 m/s max error
ang_vel_uncert_amplitude = 0.5 # 1 deg/s max error
# No noise settings
#lin_vel_uncert_amplitude = 0.0
#ang_vel_uncert_amplitude = 0.0
v_noisy = np.zeros_like(v_real)
v_noisy[0:3] = v_real[0:3] + np.array([np.random.rand()*lin_vel_uncert_amplitude,
np.random.rand()*lin_vel_uncert_amplitude,
np.random.rand()*lin_vel_uncert_amplitude])
v_noisy[3:7] = v_real[3:7] + np.array([np.random.rand()*np.pi/180.0*ang_vel_uncert_amplitude,
np.random.rand()*np.pi/180.0*ang_vel_uncert_amplitude,
np.random.rand()*np.pi/180.0*ang_vel_uncert_amplitude])
return v_noisy
def integral(self, x_dot, x, t) :
""" Computes the integral o x dt """
x_ = (x_dot * t) + x
#print(x_)
return x_
# +
## The main loop of the simulation
num_actuators = 8
# Settings of the simulation time in seconds
period = 0.01
t_max = 100.0
t_max_ = t_max/2.0
initcond = [{'p_a':0.0,'d_a':5.0,},
{'p_a':0.0,'d_a':10.0,},
{'p_a':0.0,'d_a':15.0,},
{'p_a':0.0,'d_a':20.0,},
{'p_a':10.0,'d_a':25.0,},
{'p_a':10.0,'d_a':35.0,},
{'p_a':15.0,'d_a':50.0,},
{'p_a':50.0,'d_a':45.0,},
{'p_a':50.0,'d_a':40.0,},
{'p_a':50.0,'d_a':35.0,},
{'p_a':50.0,'d_a':30.0,},
{'p_a':50.0,'d_a':25.0,},
{'p_a':50.0,'d_a':20.0,},
{'p_a':50.0,'d_a':0.0,},]
pltnames = []
for trial in initcond:
# Input on initial conditions:
prev_altitude = trial['p_a']
#desired_altitud = -34
desired_altitud = trial['d_a']
# Setting initial conditions:
#p = np.array(np.zeros(6))
p = np.array([0.0,0.0,prev_altitude,0.0,0.0,0.0])
v = np.array(np.zeros(6))
acc = np.array(np.zeros(6))
# Not used?
time = np.arange(0.0,t_max,period)
time2 = np.arange(0.0,t_max_,period)
# Initialise the position/velocity storage variable to the initial values
p_log = p
v_log = v
altitud_log = 0.0
w_velocity_log = 0.0
#Here you can change the resulting force for each degree of freedom
tau = np.array([[0], [0], [0], [0], [0], [0]]) # Force vector
bluerov = Bluerov(p, v, acc, tau)
#u = bluerov.control_allocation(tau) #This function return the thruster's control input
z_controller = Controller(15, 0.0, 0.1, 0)
z_head_controller = Controller(5.0, 0.0, 5.3, 0)
z_controller.setWindup(20)
z_head_controller.setWindup(20)
count = 0
w_ref = 0.0
w_error = 0.0
altitude = prev_altitude
w_velocity = 0.0
p_sens = np.array(np.zeros(6))
v_sens = np.array(np.zeros(6))
for tstep in time[1:]:
# Sensors
#p_sens = bluerov.position_sensing(p)
#v_sens = bluerov.velocity_sensing(v)
#altitude = p_sens[2]
#w_velocity = v_sens[2]
count = count + 1
if (count % 2) == 0:
# Sensors
p_sens = bluerov.position_sensing(p)
v_sens = bluerov.velocity_sensing(v)
altitude = p_sens[2]
w_velocity = v_sens[2]
## External loop Cascade Controller 50Hz
altitude_error = altitude - desired_altitud
w_ref = z_head_controller.Update(altitude_error, tstep)
v_dot = np.squeeze(np.asarray(v_dot))
v = bluerov.integral(v_dot, v, period)
p_dot = bluerov.kinematics(v,p)
p = bluerov.integral(p_dot, p, period)
## Internal loop controller (Nonlinear decoupling) 100Hz
w_error = w_ref - w_velocity
linear_z_tau = z_controller.altitud_controller(w_error, tstep, p_sens, v_sens)
linear_tau = np.array([[0], [0], [linear_z_tau], [0], [0], [0]]) # Force vector
##This function return the thruster's control input
u = bluerov.control_allocation(linear_tau)
##Force/Torque input
tau = bluerov.thruster_system(u)
v_dot = bluerov.model(p, v, acc, tau)
acc = v_dot
#v_dot = np.squeeze(np.asarray(v_dot))
#v = bluerov.integral(v_dot, v, period)
#p_dot = bluerov.kinematics(v,p)
#p = bluerov.integral(p_dot, p, period)
if tstep >= (t_max - period*10):
if desired_altitud == 35.0:
print("% 6.2f " % (altitude))
# Saving the simulated values for later plotting
altitud_log = np.vstack((altitud_log, altitude))
w_velocity_log = np.vstack((w_velocity_log, w_velocity))
# The plotting
fig, axs = plt.subplots(2,1, sharex='col')
axs[0].plot(time, altitud_log)
axs[0].plot([time[0], time[-1]], [trial['d_a'], trial['d_a']], color='k', linestyle='--', linewidth=1)
axs[0].set_ylim([-10.0,60.0])
axs[0].set_title('Position, start depth=%.2f, desir. depth=%.2f'%(trial['p_a'],trial['d_a']))
axs[0].legend(['z',], loc="center left")
axs[1].plot(time, w_velocity_log)
axs[1].set_ylim([-2.5,2.5])
axs[1].set_title('Speed start depth=%.2f, desir. depth=%.2f'%(trial['p_a'],trial['d_a']))
axs[1].legend(['w'], loc="center left")
plt.savefig('plot-p_a=%f,d_a=%f.png'%(trial['p_a'],trial['d_a']), dpi=150)
pltnames.append('plot-p_a=%f,d_a=%f.png'%(trial['p_a'],trial['d_a']))
# -
| fossen_depth_controller/bluerov_model_v1.6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exploration of olympic history data
# - Download the zip all CSVs from here: [120 years of Olympic history: athletes and results](https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results).
# - Read the [dataset overview](https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results/home)
# - Use pandas.ipynb from the videos as a reference - https://github.com/justmarkham/pandas-videos/
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load athlete_events.csv into a datatrame called `df`
df = pd.read_csv('athlete_events.csv')
# Note, in this case the CSV file is in the same folder as the Jupyter notebook.
# You can use the !dir command to see the path to this notebook and other files in the same dir.
# !dir
df.head()
df.describe()
# ### Youngest and oldest participants
# Note that the youngest participant was 10 and the oldest 97 ! Find who they are and what sports they cometed in.
# - hint: read about idxmin() function
# Youngest participant
# Note that We must use `loc` to access the row, otherwise df[123] is trying to find a column called 123 and fails
df.loc[df.Age.idxmin()]
# Oldest participant
# Note that df.Age and df['Age'] are the same thing
df.loc[df['Age'].idxmax()]
# ### How many unique athletes are listed in the dataset?
df.ID.nunique()
# Note that number of unique names is slightly lower. A bonus question, what's the most popular full name in the dataset?
df.Name.nunique()
# ### How many unique athletes got any medals?
df[df.Medal.notnull()].ID.nunique()
# ### How many medals of each type were awarded?
df.groupby('Medal').ID.count()
# Note that we could replace ID with any field that has no missing values, the count would be the same.
# Checking that ID indeed has no missing values.
df.ID.isnull().any()
# ### Waht's the newest olympic sport in this dataset?
year_sport_first_appeared = df.groupby('Sport').Year.min()
year_sport_first_appeared.head()
# Now we can either sort by the year and list several newest sports
year_sport_first_appeared.sort_values(ascending=False).head()
# Or just get the sport where the year of first appearance is the highest using idxmax()
# Note, that after groupby('Sports').min() the index of the resulting DataFrame is Sports - the field groupped by,
# therefore idxmax returns the sports name.
year_sport_first_appeared.idxmax()
# ### What sport had the most participants in the latest summer games?
# When were the last summer games
df[df.Season=='Summer'].Year.max()
# Let's keep the subset of data about the last summer games in a separate DataFrame
last_summer = df[(df.Year==2016) & (df.Season=='Summer')]
# The check for season==summer is redundant because nowadays the summer and winter
# games don't happen in the same year, but was not always the case.
last_summer.head()
last_summer.groupby('Sport').ID.count().sort_values(ascending=False)
# Again, we could use idxmax(), but this way we get a better feel for relative popularity of
# different sports and what sports there are.
# ### List all the cities that hosted any olympic games more than once
games_count = df.groupby('City').Year.nunique()
games_count.head()
games_count[games_count>=2]
# ### Plot histograms of Age, Height and Weight of athletes
df.hist(['Age', 'Height'], bins=30)
# We can first select the column(s) and then call .hist() on the resulting pd.Series object, both ways work ok
df.Weight.hist()
# ### Plot the histograms again, but separately for men and women
df_f = df[df.Sex=='F']
df_m = df[df.Sex=='M']
df_f.Height.hist(alpha=0.5, label='F')
df_m.Height.hist(alpha=0.5, label='M')
plt.legend()
# There is also the `by` keyword, but in this case it's more informative to have both histograms on the same axes.
df.hist('Age', by='Sex')
# In the next lectures we will talk a bit more about Seaborn library which provides a slightly
# better looking version of such histograms via sns.distplot()
# Displot is normalized to the total number of points, this way we don't get the large difference
# between the total heights of the histograms which makes it easier to concentrate on the difference
# in distributions of athlete weight.
sns.distplot(df_f.Weight.dropna(), label='F')
sns.distplot(df_m.Weight.dropna(), label='M')
plt.legend()
# The pink text is a deprecation warning from Matplotlib which should be fixed in future versions of Seaborn.
# ### Here is an attempt to plot the number of participants as a function of year. What happaned in mid 1990s that makes this graph look so weird? Plot a more useful graph (or several)
df.groupby(['Year'])['ID'].count().plot()
plt.grid()
# The summer and winter games were held in the same year until 1994.
# Until 1994 we get the total number of participants from both winter and summer games of that yer.
# After 1994 we see the line jumping between the alternating number for summer and winter games.
# Here is a plot of summer and winter games separately
for season in ['Summer', 'Winter']:
df[df.Season==season].groupby('Year').ID.count().plot(label=season, marker='.')
plt.legend()
plt.grid()
# ### Plot the average height of participants as a function of year - did they grow taller during the last century? (population on average did)
df.groupby('Year').Height.mean().plot()
plt.ylabel('Averabe height of participants')
# Looks like they didn't grow
# There seems to be a notable dip in 1960
# And we can see again the alternating values after 1994 for the winter and summer games
# But the variance of heights did grow quite a lot. Except it was interestingly high in the first games.
# Something that might be interesting to investigate (could be due to the much lower number of participants in the first years)
df[df.Season=='Summer'].groupby('Year').Height.std().plot(marker='.')
# An illustration of the growing variance of athelete heights
# This is a fairly poor illustration, though (suggest your improvements)
df.plot(x='Year', y='Height', marker='.', linestyle='', alpha=0.1)
# We will talk about box and violin plots later. This box plot clearly shows
# that the variance of height in summer games is much larger compared to winter games.
sns.boxplot(data=df[df.Year>1990], x='Year', y='Height')
# Height in different sports since 1980
heights = df[df.Height.notnull()].groupby('Sport').Height.mean().sort_values()
heights
| ML1/exercises/olympic_history_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
###
# THIS IS REQUIRED IN A NOTEBOOK TO RESET THE WORKING DIRECTORY
# FROM THE DEFAULT WORKING DIRECTORY ESTABLISHED FOR THE NOTEBOOK
#
# NOTE: BEFORE RUNNING PYTHON CODE GENERATED FROM THIS NOTEBOOK,
# THIS CODE CELL SHOULD BE DELETED.
###
import os
os.chdir('../..') # change to ROOT_DIR specified in the configuration file
#%%
from msw.model_stacking import ModelTrainer, ModelPerformanceTracker
from sklearn.ensemble import RandomForestClassifier as ThisModel
# +
#%%
#
# Set up model for training
#
this_model = ModelTrainer(
ModelClass=ThisModel, #Model algorithm
model_params=dict(n_estimators=20,n_jobs=-1), #hyper-parameters
test_prediction_method = 'all_data_model',
model_id='L0RF2_NB', # Model Identifier
feature_set='KFSBSLN' # feature set to use
)
model_tracker = ModelPerformanceTracker(model_trainer=this_model)
# -
#%%
#
# clear out old results
#
this_model.cleanPriorResults()
#%%
#
# train model on all the data
#
this_model.trainModel()
#%%
# create Test predictions
this_model.createTestPredictions()
#%%
#
# create Kaggle submission
#
this_model.createKaggleSubmission()
# +
#%%
#
# record model performance metrics
#
model_tracker.recordModelPerformance()
#%%
# -
| samples/classification/models/L0RF2_NB/train_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + dc={"key": "3"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 1. Of cats and cookies
# <p><a href="https://www.facebook.com/cookiecatsgame">Cookie Cats</a> is a hugely popular mobile puzzle game developed by <a href="http://tactile.dk">Tactile Entertainment</a>. It's a classic "connect three"-style puzzle game where the player must connect tiles of the same color to clear the board and win the level. It also features singing cats. We're not kidding! Check out this short demo:</p>
# <p><a href="https://youtu.be/GaP5f0jVTWE"><img src="https://assets.datacamp.com/production/project_184/img/cookie_cats_video.jpeg" style="width: 500px"></a></p>
# <p>As players progress through the levels of the game, they will occasionally encounter gates that force them to wait a non-trivial amount of time or make an in-app purchase to progress. In addition to driving in-app purchases, these gates serve the important purpose of giving players an enforced break from playing the game, hopefully resulting in that the player's enjoyment of the game being increased and prolonged.</p>
# <p><img src="https://assets.datacamp.com/production/project_184/img/cc_gates.png" alt></p>
# <p>But where should the gates be placed? Initially the first gate was placed at level 30, but in this notebook we're going to analyze an AB-test where we moved the first gate in Cookie Cats from level 30 to level 40. In particular, we will look at the impact on player retention. But before we get to that, a key step before undertaking any analysis is understanding the data. So let's load it in and take a look!</p>
# + dc={"key": "3"} tags=["sample_code"]
# Importing pandas
import pandas as pd
# Reading in the data
df = pd.read_csv('datasets/cookie_cats.csv')
# Showing the first few rows
df.head()
# + dc={"key": "10"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 2. The AB-test data
# <p>The data we have is from 90,189 players that installed the game while the AB-test was running. The variables are:</p>
# <ul>
# <li><code>userid</code> - a unique number that identifies each player.</li>
# <li><code>version</code> - whether the player was put in the control group (<code>gate_30</code> - a gate at level 30) or the group with the moved gate (<code>gate_40</code> - a gate at level 40).</li>
# <li><code>sum_gamerounds</code> - the number of game rounds played by the player during the first 14 days after install.</li>
# <li><code>retention_1</code> - did the player come back and play <strong>1 day</strong> after installing?</li>
# <li><code>retention_7</code> - did the player come back and play <strong>7 days</strong> after installing?</li>
# </ul>
# <p>When a player installed the game, he or she was randomly assigned to either <code>gate_30</code> or <code>gate_40</code>. As a sanity check, let's see if there are roughly the same number of players in each AB group. </p>
# + dc={"key": "10"} tags=["sample_code"]
# Counting the number of players in each AB group.
df.groupby('version')['userid'].count()
# + dc={"key": "17"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 3. The distribution of game rounds
# <p><img src="https://assets.datacamp.com/production/project_184/img/mr_waffles_smiling.png" style="width:200px; float:left"> </p>
# <p>It looks like there is roughly the same number of players in each group, nice!</p>
# <p>The focus of this analysis will be on how the gate placement affects player retention, but just for fun: Let's plot the distribution of the number of game rounds players played during their first week playing the game.</p>
# + dc={"key": "17"} tags=["sample_code"]
# This command makes plots appear in the notebook
# %matplotlib inline
# Counting the number of players for each number of game rounds
plot_df = df.groupby('sum_gamerounds')['userid'].count()
# Plotting the distribution of players that played 0 to 100 game rounds
ax = plot_df.head(n=100).plot(x="sum_gamerounds", y="userid", kind="hist")
ax.set_xlabel("Game Rounds")
ax.set_ylabel("User Count")
# + dc={"key": "24"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 4. Overall 1-day retention
# <p>In the plot above we can see that some players install the game but then never play it (0 game rounds), some players just play a couple of game rounds in their first week, and some get really hooked!</p>
# <p>What we want is for players to like the game and to get hooked. A common metric in the video gaming industry for how fun and engaging a game is <em>1-day retention</em>: The percentage of players that comes back and plays the game <em>one day</em> after they have installed it. The higher 1-day retention is, the easier it is to retain players and build a large player base. </p>
# <p>As a first step, let's look at what 1-day retention is overall.</p>
# + dc={"key": "24"} tags=["sample_code"]
# The % of users that came back the day after they installed
df['retention_1'].sum() / df['retention_1'].count()
# + dc={"key": "31"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 5. 1-day retention by AB-group
# <p><img src="https://assets.datacamp.com/production/project_184/img/belle_cookie.png" style="width:200px; float:right"> </p>
# <p>So, a little less than half of the players come back one day after installing the game. Now that we have a benchmark, let's look at how 1-day retention differs between the two AB-groups.</p>
# + dc={"key": "31"} tags=["sample_code"]
# Calculating 1-day retention for each AB-group
df.groupby('version')['retention_1'].sum() / df.groupby('version')['userid'].count()
# + dc={"key": "38"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 6. Should we be confident in the difference?
# <p>It appears that there was a slight decrease in 1-day retention when the gate was moved to level 40 (44.2%) compared to the control when it was at level 30 (44.8%). It's a small change, but even small changes in retention can have a large impact. But while we are certain of the difference in the data, how certain should we be that a gate at level 40 will be worse in the future?</p>
# <p>There are a couple of ways we can get at the certainty of these retention numbers. Here we will use bootstrapping: We will repeatedly re-sample our dataset (with replacement) and calculate 1-day retention for those samples. The variation in 1-day retention will give us an indication of how uncertain the retention numbers are.</p>
# + dc={"key": "38"} tags=["sample_code"]
# Creating an list with bootstrapped means for each AB-group
boot_1d = []
for i in range(500):
boot_mean = df.sample(frac=1, replace=True).groupby('version')['retention_1'].mean()
boot_1d.append(boot_mean)
# Transforming the list to a DataFrame
boot_1d = pd.DataFrame(boot_1d)
# A Kernel Density Estimate plot of the bootstrap distributions
boot_1d.plot(kind='kde')
# + dc={"key": "46"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 7. Zooming in on the difference
# <p>These two distributions above represent the bootstrap uncertainty over what the underlying 1-day retention could be for the two AB-groups. Just eyeballing this plot, we can see that there seems to be some evidence of a difference, albeit small. Let's zoom in on the difference in 1-day retention</p>
# <p>(<em>Note that in this notebook we have limited the number of bootstrap replication to 500 to keep the calculations quick. In "production" we would likely increase this to a much larger number, say, 10 000.</em>)</p>
# + dc={"key": "46"} tags=["sample_code"]
# Adding a column with the % difference between the two AB-groups
boot_1d['diff'] = (boot_1d['gate_30'] - boot_1d['gate_40']) / boot_1d['gate_40'] * 100
# Ploting the bootstrap % difference
ax = boot_1d['diff'].plot(kind = 'kde')
ax.set_xlabel("% difference in means")
# + dc={"key": "53"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 8. The probability of a difference
# <p><img src="https://assets.datacamp.com/production/project_184/img/ziggy_smiling.png" style="width:200px; float:left"> </p>
# <p>From this chart, we can see that the most likely % difference is around 1% - 2%, and that most of the distribution is above 0%, in favor of a gate at level 30. But what is the <em>probability</em> that the difference is above 0%? Let's calculate that as well.</p>
# + dc={"key": "53"} tags=["sample_code"]
# Calculating the probability that 1-day retention
# is greater when the gate is at level 30.
prob = (boot_1d['diff'] > 0).sum() / len(boot_1d)
# Pretty printing the probability
'{:.1%}'.format(prob)
# + dc={"key": "60"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 9. 7-day retention by AB-group
# <p>The bootstrap analysis tells us that there is a high probability that 1-day retention is better when the gate is at level 30. However, since players have only been playing the game for one day, it is likely that most players haven't reached level 30 yet. That is, many players won't have been affected by the gate, even if it's as early as level 30. </p>
# <p>But after having played for a week, more players should have reached level 40, and therefore it makes sense to also look at 7-day retention. That is: What percentage of the people that installed the game also showed up a week later to play the game again.</p>
# <p>Let's start by calculating 7-day retention for the two AB-groups.</p>
# + dc={"key": "60"} tags=["sample_code"]
# Calculating 7-day retention for both AB-groups
df.groupby('version')['retention_7'].sum() / df.groupby('version')['userid'].count()
# + dc={"key": "67"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 10. Bootstrapping the difference again
# <p>Like with 1-day retention, we see that 7-day retention is slightly lower (18.2%) when the gate is at level 40 than when the gate is at level 30 (19.0%). This difference is also larger than for 1-day retention, presumably because more players have had time to hit the first gate. We also see that the <em>overall</em> 7-day retention is lower than the <em>overall</em> 1-day retention; fewer people play a game a week after installing than a day after installing.</p>
# <p>But as before, let's use bootstrap analysis to figure out how certain we should be of the difference between the AB-groups.</p>
# + dc={"key": "67"} tags=["sample_code"]
# Creating a list with bootstrapped means for each AB-group
boot_7d = []
for i in range(500):
boot_mean = ...
boot_7d.append(boot_mean)
# Transforming the list to a DataFrame
# ... YOUR CODE FOR TASK 10 ...
# Adding a column with the % difference between the two AB-groups
boot_7d['diff'] = ...
# Ploting the bootstrap % difference
ax = ...
ax.set_xlabel("% difference in means")
# Calculating the probability that 7-day retention is greater when the gate is at level 30
prob = ...
# Pretty printing the probability
# ... YOUR CODE FOR TASK 10 ...
# + dc={"key": "74"} deletable=false editable=false run_control={"frozen": true} tags=["context"]
# ## 11. The conclusion
# <p>The bootstrap result tells us that there is strong evidence that 7-day retention is higher when the gate is at level 30 than when it is at level 40. The conclusion is: If we want to keep retention high — both 1-day and 7-day retention — we should <strong>not</strong> move the gate from level 30 to level 40. There are, of course, other metrics we could look at, like the number of game rounds played or how much in-game purchases are made by the two AB-groups. But retention <em>is</em> one of the most important metrics. If we don't retain our player base, it doesn't matter how much money they spend in-game.</p>
# <p><img src="https://assets.datacamp.com/production/project_184/img/cookie_yellow.png" style="width:100px; float:center"> </p>
# <p>So, why is retention higher when the gate is positioned earlier? One could expect the opposite: The later the obstacle, the longer people are going to engage with the game. But this is not what the data tells us. The theory of <em>hedonic adaptation</em> can give one explanation for this. In short, hedonic adaptation is the tendency for people to get less and less enjoyment out of a fun activity over time if that activity is undertaken continuously. By forcing players to take a break when they reach a gate, their enjoyment of the game is prolonged. But when the gate is moved to level 40, fewer players make it far enough, and they are more likely to quit the game because they simply got bored of it. </p>
# + dc={"key": "74"} tags=["sample_code"]
# So, given the data and the bootstrap analysis
# Should we move the gate from level 30 to level 40 ?
move_to_level_40 = ... # True or False ?
| Mobile Games A/B Testing with Cookie Cats/notebook.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0-DEV
# language: julia
# name: julia-1.6
# ---
# # Algebraic Manipulation
#
#
# Here is a small sample of algebraic manipulation functions in `Symata`.
using Symata # load Symata and enter symata mode
OutputStyle(JupyterForm)
# ### Common subexpression elimination
ex = (x-y)*(z-y) + Sqrt((x-y)*(z-y))
# `Cse(expr)` recursively replaces subexpressions that occur more than once in `expr` with names. The transformed expression is returned with a list of rules that can be used to recover `expr`.
Cse(ex)
# Applying in order the replacement rules in the second list to the expression in the first list results in the original expression.
#
# To apply the rules, we will use `Splat`, which works like this,
f(a,b,Splat([c,d]))
# and `Fold`, which works like this,
Fold(f, [x,a,b,c])
# Apply the replacement rules like this,
Fold(ReplaceAll, Splat(Cse(ex)))[1]
# Check that the reconstructed expression is equal to the original expression.
Fold(ReplaceAll, Splat(Cse(ex)))[1] == ex
ClearAll(ex)
# ### `Together` and `Apart`
# `Together` rewrites rational expressions as a single fraction.
Together(1/x + 1/y + 1/z)
Together(1/(x*y) + 1/y^2)
Together(1/(1 + 1/x) + 1/(1 + 1/y))
# By default, `Together` only works at the topmost level.
Together(Exp(1/x + 1/y))
# `Together` is applied at all levels if the option `Deep` is true.
Together(Exp(1/x + 1/y), Deep => True)
# `Apart` gives the partial fraction decomposition of a rational expression
Apart(y/(x + 2)/(x + 1), x)
# If the denominator has non-rational roots, the option ``Full => True`` must be given.
Apart(y/(x^2 + x + 1), x, Full=>True)
# ### `Collect`
#
# Collect coefficients of powers of `x`.
Collect(a*x^2 + b*x^2 + a*x - b*x + c, x)
# Collect coefficients of an expression.
Collect(a*x*Log(x) + (b+a)*(x*Log(x)), x*Log(x))
# ### Version and date
VersionInfo()
InputForm(Now())
# ### Original Version and date
# VersionInfo()
#
# symata version 0.3.0-dev.7
# julia version 0.6.0-dev.435
# python version 2.7.12
# sympy version 1.0
# Now()
# 2016−11−28T22:34:33.713
| TutorialNotebooks/AlgebraicTransformation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
In this notebook, the bell state circuit and deutsch algorithm will be modeled using both IBM Qiskit and pgmpy.
The results are compared at the end.
"""
# +
# Imports (some imports are probably not necessary)
import numpy as np
from qiskit import IBMQ, Aer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, assemble, transpile
from qiskit.visualization import plot_histogram
import qiskit.quantum_info as qi
# Here, we construct the bell state circuit using Qiskit
# The bit input string allows us to choose an initialization of either |00>, |01>, |10>, or |11>
inputString = "10"
circ = QuantumCircuit(2,2)
if(inputString == "01"):
circ.x(1)
elif(inputString == "10"):
circ.x(0)
elif(inputString == "11"):
circ.x(0)
circ.x(1)
# The rest of the circuit transforms the qubits into a bell state pair
circ.barrier()
circ.h(0)
circ.cx(0,1)
circ.barrier()
circ.draw()
# +
# Here, we obtain the state vector for the system after the cx gate
BellState = qi.Statevector.from_instruction(circ)
BellStateVec = BellState.__array__()
BellStateDM = (qi.DensityMatrix.from_instruction(circ)).__array__()
# The format of the state vector is [|00>, |10>, |01>, |11>]
print(BellStateVec)
print(BellStateDM)
# +
# Here, we model the bell state circuit using pgmpy
# Comment out these lines
import sys
sys.path.insert(0, 'C:\\Users\\masch\\QuantumComputing\\QCompMAS\\pgmpy')
# Imports
from pgmpy.models import BayesianNetwork
from pgmpy.factors.discrete.CPD import TabularCPD
import numpy as np
from pgmpy.inference import VariableElimination
# Initialize Bayesian Network representing bell state circuit
# Each random variable is labeled qXmY, which represents the state of qubit qX at moment Y.
# The moments match with the circuit diagram in cell 2, starting with moment 0, 1, 2, etc.
# Links between random variables are present if their is a dependence between the qubit states.
bellState = BayesianNetwork([('q0m0', 'q0m1'), ('q0m1', 'q0m2'), ('q1m0', 'q1m1'), ('q1m1', 'q1m2'), ('q0m1', 'q1m2')])
"""
Conditional Amplitude Distribution (CAD) for each qubit state
For this circuit, each CAD consists of:
variable name (qubit state, see above). For instance, q0m1
variable cardinality (always 2, representing amplitudes for |0> and |1> for the qubit state of interest)
values - consists of a list of two lists [[...],[...]], the first list corresponds to |0>, the second to |1>
For the first list, the entries correspond to the resulting state of the qubit state of interest, given
a certain input state of evidence variables (in this case, previous qubit states).
(for q0m1): If the evidence is one qubit state q0m0, the values entry is:
[[A(|q0m1> = |0> given |q0m0> = |0>, A(|q0m1> = |0> given |q0m0> = |1>)], [A(|q0m1> = |1> given |q0m0> = |0>, A(|q0m1> = |1> given |q0m0> = |1>)]
evidence (list of qubit states for which the current qubit state depends on)
evidence cardinality (list of 2s, based upon how many evidence qubits there are)
"""
# Initialization
if(inputString == "00"):
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[1], [0]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[1], [0]])
elif(inputString == "01"):
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[1], [0]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[0], [1]])
elif(inputString == "10"):
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[0], [1]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[1], [0]])
elif(inputString == "11"):
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[0], [1]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[0], [1]])
elif(inputString == "custom"):
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[1j], [0]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[1/np.sqrt(2)], [-1j/np.sqrt(2)]])
cpd_q0m1 = TabularCPD(variable='q0m1', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)]], evidence = ['q0m0'], evidence_card = [2])
cpd_q1m1 = TabularCPD(variable = 'q1m1', variable_card = 2, values = [[1, 0], [0,1]], evidence = ['q1m0'], evidence_card = [2])
cpd_q1m2 = TabularCPD(variable='q1m2', variable_card = 2, values = [[1,0,0,1], [0,1,1,0]], evidence = ['q0m1', 'q1m1'], evidence_card = [2,2])
cpd_q0m2 = TabularCPD(variable='q0m2', variable_card = 2, values = [[1, 0], [0, 1]], evidence = ['q0m1'], evidence_card = [2])
# Add the CADs to the Bayesian Network, and perform variable elimination to simulate the circuit.
bellState.add_cpds(cpd_q0m0, cpd_q1m0, cpd_q0m1, cpd_q0m2, cpd_q1m2, cpd_q1m1)
bellStateInfer = VariableElimination(bellState)
q1 = bellStateInfer.query(['q1m2', 'q0m2'])
# Printing the results will display the state of the qubit system at the end of the circuit
# Note that the ordering may not be desirable. This is dealt with below.
print(q1)
# +
# Obtain the ordering of the variables in the display above, as well as their values
q1Vars = q1.variables
q1Values = q1.values
print(q1Vars)
print(q1Values)
# +
# Here, we construct the deutsch algorithm using Qiskit
# The bit input string allows us to choose a specific function: "ab" -> f(0) = a, f(1) = b
inputString = "01"
# Circuit setup
circ = QuantumCircuit(2,2)
circ.barrier()
circ.h(0)
circ.x(1)
circ.h(1)
circ.barrier()
# Function implementation
if(inputString == "01"):
circ.cx(0,1)
elif(inputString == "10"):
circ.cx(0,1)
circ.x(1)
elif(inputString == "11"):
circ.cx(0,1)
circ.x(1)
circ.cx(0,1)
# End of Circuit
circ.barrier()
circ.h(0)
circ.draw()
# +
# Here, we obtain the state vector for the system after the hadamard gate
dj = qi.Statevector.from_instruction(circ)
djVec = dj.__array__()
djDM = (qi.DensityMatrix.from_instruction(circ)).__array__()
# The format of the state vector is [|00>, |10>, |01>, |11>]
print(djVec)
print(djDM)
# +
# Deutsch Algorithm using pgmpy
# In this case, q1m2 = q1m3, so q1m3 is not incorporated here
dj = BayesianNetwork([('q0m0', 'q0m1'), ('q1m0', 'q1m1'), ('q0m1', 'q1m2'), ('q0m1', 'q0m2'), ('q0m2', 'q0m3'), ('q1m1', 'q1m2')])
# Function determined by inputString (see above)
if(inputString == "00"):
cad = [[1,0,1,0],[0,1,0,1]]
elif(inputString == "01"):
cad = [[1,0,0,1],[0,1,1,0]]
elif(inputString == "10"):
cad = [[0,1,1,0],[1,0,0,1]]
elif(inputString == "11"):
cad = [[0,1,0,1],[1,0,1,0]]
cpd_q0m0 = TabularCPD(variable = 'q0m0', variable_card = 2, values = [[1], [0]])
cpd_q1m0 = TabularCPD(variable = 'q1m0', variable_card = 2, values = [[0], [1]])
cpd_q0m1 = TabularCPD(variable='q0m1', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), 1/-np.sqrt(2)]], evidence = ['q0m0'], evidence_card = [2])
cpd_q1m1 = TabularCPD(variable='q1m1', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), 1/-np.sqrt(2)]], evidence = ['q1m0'], evidence_card = [2])
cpd_q0m2 = TabularCPD(variable='q0m2', variable_card = 2, values = [[1, 0], [0, 1]], evidence = ['q0m1'], evidence_card = [2])
cpd_q1m2 = TabularCPD(variable='q1m2', variable_card = 2, values = cad, evidence = ['q0m1', 'q1m1'], evidence_card = [2,2])
cpd_q0m3 = TabularCPD(variable = 'q0m3', variable_card = 2, values = [[1/np.sqrt(2), 1/np.sqrt(2)], [1/np.sqrt(2), 1/-np.sqrt(2)]], evidence = ['q0m2'], evidence_card = [2])
dj.add_cpds(cpd_q0m0, cpd_q1m0, cpd_q0m1, cpd_q0m2, cpd_q1m2, cpd_q1m1, cpd_q0m3)
djInfer = VariableElimination(dj)
q2 = djInfer.query(['q0m3','q1m2'])
print(q2)
# +
# Obtain the ordering of the variables in the display above, as well as their values
q2Vars = q2.variables
q2Values = q2.values
print(q2Vars)
print(q2Values)
# +
def bitListBack(n):
N = 2**n
numList = []
numFormat = "0" + str(n) + "b"
for i in range(N):
numList.append((str(format(i,numFormat))[::-1]))
return numList
def QiskitDict(stateVec,n):
qbits = bitListBack(n)
QbitDict = {}
for i in range(2**n):
QbitDict[qbits[i]]=np.round(stateVec[i],4)
return QbitDict
print("BellStateCircuit")
print(QiskitDict(BellStateVec,2))
# -
print("Deutsch Algorithm")
print(QiskitDict(djVec,2))
# +
# Obtain the ordering of the variables in the display above, as well as their values
valArr = q1.variables
valuesArr = q1.values
def create_var_order(orderArr):
currNum = 0
numArr = []
for order in orderArr:
if len(order) == 4:
currNum = order[1]
numArr.append(currNum)
return numArr
def bitList(n):
N = 2**n
numList = []
numFormat = "0" + str(n) + "b"
for i in range(N):
numList.append((str(format(i,numFormat))))
return numList
def columnize(listOfBits):
n = len(listOfBits[0])
holder = []
for i in range(n):
col = []
for bit in listOfBits:
col.append(bit[i])
holder.append(col)
return holder
def reform():
varOrderArr = create_var_order(valArr)
listOfBits = bitList(len(varOrderArr))
columns = columnize(listOfBits)
rearrangedColumns = [None]*len(columns)
for index, order in enumerate(varOrderArr):
rearrangedColumns[index] = columns[int(order)]
numOfCols = len(rearrangedColumns)
bitStr = ""
finalBitArr = []
for bitIndex in range(len(rearrangedColumns[0])):
for num in range(numOfCols):
bitStr+=str(rearrangedColumns[num][bitIndex])
finalBitArr.append(bitStr)
bitStr = ""
return finalBitArr
def createHashTable():
resHash = {}
bitOrder=reform()
valuesFlat = valuesArr.flatten()
for index, key in enumerate(bitOrder):
resHash[key] = np.round(valuesFlat[index], 4)
return resHash
q1PgmpyHash = createHashTable()
valArr = q2.variables
valuesArr = q2.values
q2PgmpyHash = createHashTable()
print(q1PgmpyHash == QiskitDict(BellStateVec,2))
print(q2PgmpyHash == QiskitDict(djVec,2))
print(q2PgmpyHash)
print(QiskitDict(djVec,2))
print(reform())
# -
def cpd_2_dm(obj,rvs,var):
numQubits = len(var)
numRVs = len(rvs)
varOrder = obj.variables
numVars = len(varOrder)
qubitOrdering = []
rvsOrdering = []
for i in range(numQubits):
v = var[i]
j = 0
while(j < numVars and v != varOrder[j]):
j += 1
qubitOrdering.append(2**(numVars - j - 1))
for i in range(numRVs):
v = rvs[i]
j = 0
while(j < numVars and v != varOrder[j]):
j += 1
rvsOrdering.append(2**(numVars - j - 1))
vals = (obj.values).flatten()
dm = np.zeros((2**numQubits,2**numQubits),dtype="complex_")
numEvents = 2**numRVs
numPermutations = 2**numQubits
for i in range(numEvents):
val1 = 0
for j in range(numRVs):
val1 += ((i//(2**j))%2)*rvsOrdering[numRVs - j - 1]
arr1 = np.zeros((numPermutations,1),dtype="complex_")
arr2 = np.zeros((1,numPermutations),dtype="complex_")
for j in range(numPermutations):
val2 = val1
for k in range(numQubits):
val2 += ((j//(2**k))%2)*qubitOrdering[numQubits - k - 1]
arr1[j][0] = vals[val2]
arr2[0][j] = np.conj(vals[val2])
dm += np.matmul(arr1,arr2)
return dm
X = cpd_2_dm(q1,[],['q1m2', 'q0m2'])
print(X)
X = cpd_2_dm(q2,[],['q1m2', 'q0m3'])
print(X)
| Notebooks/Variable Elimination.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EXERCÍCIOS SINGLETON
#
# [](https://colab.research.google.com/github/catolicasc-joinville/lp1-notebooks/blob/master/3-padroes-de-projeto/2.1-exercicios.ipynb) [launch](https://colab.research.google.com/github/catolicasc-joinville/lp1-notebooks/blob/master/3-padroes-de-projeto/2.1-exercicios.ipynb)
# 1) A classe `Increment` apresentada abaixo deve incrementar em 1 toda vez que é instanciada. Converta ela para uma classe do tipo Singleton para que este comportamento seja executado como esperado:
# +
class Incremental:
count = 0
def increment(self):
self.count += 1
def __str__(self):
return f"Current value {self.count}"
i1 = Incremental()
i1.increment()
print(i1)
i2 = Incremental()
i2.increment()
print(i2)
# -
# A saída deveria ser:
#
# ```
# Current value 1
# Current value 2
# ```
#
# Implemente a classe Incremental usando o padrão Singleton:
# 2) Implemente uma classe chamada `BancoDeDados` que possui uma única instância e nos permite adicionar e consultar elementos. A classe `BancoDeDados` deve ser implementada como um Singleton:
| python/padroes-de-projeto/singleton-exercicios.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! pip install tweepy
# ! pip install numpy
# ! pip install pandas
# ! pip install torch
# ! pip install detoxify
import os
import re
import tweepy
import numpy as np
import pandas as pd
from detoxify import Detoxify
TWITTER_API_KEY = os.environ.get('TWITTER_API_KEY')
TWITTER_API_SECRET = os.environ.get('TWITTER_API_SECRET')
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET)
api = tweepy.API(auth)
tp = api.get_user('franciscojarceo')
# + tags=[]
tp._json['name']
# -
def get_all_tweets(screen_name):
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name, count=200)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print(f"getting tweets before {oldest}")
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print(f"...{len(alltweets)} tweets downloaded so far")
return alltweets
sheels = get_all_tweets('pitdesi')
type(sheels)
sres = []
for i in sheels:
cleaned = clean_tweet(i._json['text'])
sres.append((i._json['id'], i._json['text'], cleaned))
sdf = pd.DataFrame(sres, columns=['tweet_id', 'tweet', 'cleaned_tweet'])
for t in sdf[sdf['tweet'].str.contains('tweet')]['tweet']:
print(t)
mytweets = get_all_tweets('franciscojarceo')
mytweets[2]._json['in_reply_to_status_id']
mytweets[1]._json['in_reply_to_status_id']
mytweets[1]._json['id'] #['text']
'in_reply_to_status_id' in mytweets[1]._json
def clean_tweet(x):
try:
clean = re.sub("@[A-Za-z0-9_]+","", x).strip()
replytweet = re.match('… https://t.co/*', clean)
if replytweet is not None:
if replytweet.end() > 0:
return None
return clean
except Exception as e:
print(e)
return x
# +
# %%time
dv = -1.0
dvd = {
'toxicity': dv,
'severe_toxicity': dv,
'obscene': dv,
'threat': dv,
'insult': dv,
'identity_hate': dv
}
res = []
tweeters = []
preds = []
for i in mytweets:
if i._json['in_reply_to_status_id'] is not None:
cleaned = clean_tweet(i._json['text'])
res.append((i._json['id'], i._json['text'], cleaned))
prediction = [dvd if cleaned is None else Detoxify('original').predict(cleaned)]
tweeters.append(prediction)
# -
len(predictions2['toxicity'])
res = []
for i in mytweets:
if i._json['in_reply_to_status_id'] is not None:
cleaned = clean_tweet(i._json['text'])
res.append((i._json['id'], i._json['text'], cleaned))
cdf = pd.DataFrame(res, columns=['tweet_id', 'tweet', 'cleaned_tweet'])
# %%time
predictions2 = Detoxify('original').predict(cdf[cdf['cleaned_tweet'].isnull()==False]['cleaned_tweet'].tolist())
tdfp.reset_index(drop=True).index
tdfp = pd.concat([pd.DataFrame(t) for t in tweeters], axis=0)
tdfp
tdf = pd.DataFrame(res, columns=['tweet_id', 'tweet'])
tdfp = pd.concat([pd.DataFrame(t) for t in tweeters], axis=0).reset_index(drop=True)
xvars = [
'toxicity', 'severe_toxicity', 'obscene', 'threat', 'insult','identity_hate'
]
for x in xvars:
tdf[x] = tdfp[x]
tdf.shape, tdfp.shape
tdf.head()
tdf['risk'] = tdf[xvars].max(axis=1)
tdf['risk'].describe()
tdf.sort_values(by='risk', ascending=False)[['tweet_id', 'tweet', 'risk']]
# # END
| twitter-scan/backend/app/Twitter-Mining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import imageio
# %matplotlib inline
# ### __init__
tfd = tfp.distributions
# +
prior_mean = [0, 0]
prior_cov = 1/2 * np.eye(2)
multiDist = tfd.MultivariateNormalFullCovariance(loc=prior_mean, covariance_matrix=prior_cov)
# +
# multiDist.mean().numpy()
# multiDist.covariance().numpy()
# returns the probability desnity function at that point
# multiDist.prob([-1., 0]).numpy()
# -
# ### linear_function
# +
x = tf.constant([1.0, 2.0,4.0], dtype=tf.float32)
noise = tfd.Normal(loc=0, scale=1 / np.sqrt(25))
x.numpy()
y = (5 + x).numpy()
y
# -
Noise = tf.cast(noise.sample(len(x)), tf.float32).numpy()
y + Noise
# ### get_design_matrix
# +
N = 2
M = 2
matrix = np.ones((N,M)) + [[1,0],[2,4]]
matrix
# matrix[:,1]
# -
# ### update_prior
# ## Posterior
#
# The posterior is proportional to the product of the likelihood function and the prior:
#
# $$p(\mathbf{\theta}|\mathcal{D})=\mathcal{N}(\mathbf{\theta}| \mu',\Sigma') \propto p(\mathcal{D}|\mathbf{\theta}) p(\mathbf{\theta})$$
# and the moments are:
# $$\mu' = \Sigma'(\Sigma^{-1}\mu + \beta X^T\mathbf{y})$$
# $$\Sigma' = (\Sigma^{-1} + \beta XX^T)^{-1}$$
#
# which are used in the function $\verb|update_prior|$.
matrix.T.dot(matrix)
np.matmul(matrix.T,matrix)
# ### plot_prior
x = np.linspace(-1,1,5)
x
x = np.linspace(-1, 1, 5)
y = np.linspace(-1, 1, 5)
xx,yy = np.meshgrid(x, y)
# np.meshgrid(x, y)[0][1][1]
# np.meshgrid(x, y)[1][1][1]
print('y coordinates in order:')
print(yy)
print('')
print('x coordinates in order:')
print(xx)
plt.plot(xx, yy, marker = 'x', c = 'orange')
# +
# help(np.dstack)
print(np.dstack((xx,yy)).shape)
print('')
print(np.dstack((xx,yy)))
# multiDist.prob(np.dstack((xx, yy))).numpy()
# -
z = multiDist.prob(np.dstack((xx,yy)))
z
# +
# ax = plt.axes(projection='3d')
p = np.ones(2)
p[0] = 0.75
p[1] = 0.75
plt.contourf(x, y, z,cmap='plasma')
plt.plot(p[0], p[1], marker = 'x', c = 'orange')
plt.title("My countour plot")
plt.xlabel("x")
plt.ylabel("y")
ax = plt.axes()
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
# plt.clf()
plt.show()
# -
# ### plot_likelihood
# We visualize the likelihood distribution which identifies which parameters are likely generated by the given batch of data. The log likelihood function:
#
# $$\ln p(\mathcal{D}|\mathbf{\theta})= \frac{N}{2}\ln\beta - \frac{N}{2}\ln(2\pi) - \frac{\beta}{2}\sum^N_{n=1}\{y_n-\mathbf{\theta}^T\mathbf{x}_n\}^2$$
# viewed as a function of $\mathbf{\theta}$ is
# $$p(\mathcal{D}|\mathbf{\theta})\propto \exp(-\sum^N_{n=1}\{y_n-\mathbf{\theta}^T\mathbf{x}_n\}^2)$$.
# +
a = tf.constant([0,0])
b = tf.constant([0,0])
x = np.linspace(-1, 1, 5)
y = np.linspace(-1, 1, 5)
theta_0, theta_1 = np.meshgrid(x, y)
least_squares_sum = 0
for point, target in zip(a, b):
least_squares_sum += (target - (theta_0 + theta_1 * point))**2
# print(target)
# z = np.exp(-0.2*least_squares_sum)
z = least_squares_sum
print(z)
# plt.contourf(x, y, z, cmap='plasma')
# theta_0
# -
# ### prediction_mean_std
#
#
# We can write that the density of the new predicted value $y'$ given the previous data y as the expected value of the likelihood of the new data under the posterior density $p(\theta | y)$'
#
# Finally, the predictive distribution is estimated from the formula:
# $$p(y|X, \mathcal{D}) = \int p(y|X,\mathbf{\theta})p(\mathbf{\theta}|\mathcal{D})d\mathbf{\theta}$$
# which is obtained by convolving two Gaussians:
#
# $$p(y|X, \mathcal{D})=\mathcal{N}(y|\mu_*,\Sigma_*)$$
# with
# $$\mu_* = X^T\mu'$$
# $$\Sigma_* = \beta^{-1} + X^T\Sigma'X$$
#
# +
no_samples = 2
design_mtx = np.ones((2,2))
design_mtx[:,1] = [2,3]
print(design_mtx)
print('')
prediction = []
for index in range(no_samples):
x = design_mtx[index, :]
print(x)
predictive_std = np.sqrt(1/2 + x.T.dot(prior_cov.dot(x)))
predictive_mean = np.array(prior_mean).dot(x)
prediction.append((predictive_mean, predictive_std))
prediction
# -
# ### plot_data_space
prediction_means = [x[0] for x in prediction]
print(prediction_means)
y_upper = [x[0] + 1 * x[1] for x in prediction]
print(y_upper)
y_lower = [x[0] - 1 * x[1] for x in prediction]
print(y_lower)
# plt.scatter(0.5,0.5,marker='o')
X = tf.constant([0.5,0.75])
T = tf.constant([0.5,-0.5])
for point, target in zip(X,T):
print([point.numpy(),target.numpy()])
# +
X = tf.constant([2.5,1.75])
T = tf.constant([1.5,-0.5])
# plt.title('Data Space (iteration {})'.format(1))
# plt.xlabel('$x$')
# plt.ylabel('$y$')
# ax = plt.axes()
# ax.set_xlim(-1, 1)
# ax.set_ylim(-1, 1)
# plot generated data points
for point, target in zip(X, T):
plt.scatter(x=point.numpy(), y=target.numpy(), marker ='o', c='blue', alpha=0.7)
# plot confidence bounds
print(x)
plt.fill_between(x, y_upper, y_lower, where=y_upper >= y_lower, facecolor='orange', alpha=0.3)
# # plot prediction mean
# plt.plot(x, prediction_means, '-r', label='Prediction mean', c='orange', linewidth=2.0, alpha=0.8)
# # plot real function
# plt.plot(x, tf.constant([1.0, 2.0], dtype=tf.float32), '-r', label='Target function', c='red', linewidth=2.0, alpha=0.8)
# -
# ## Overview
# Our goal is create a parametrized linear regression model under additive Gaussian noise:
#
# $$f(x)=\theta_0 + \theta_1x$$ so that $y=f(x) + \epsilon$. The parameters $\mathbf{\theta}$ can be found by sequentially collecting samples $\mathcal{D}:=(X,\mathbf{y})=\{(x_n,y_n)\}_{n=1}^N$ from the target function and using this data in the Bayesian framework to approach the true value of parameters. However, finding the parameter distribution is merely an intermediate goal. Once we have established the distribution of coefficients through Bayesian treatment, we are able to predict y for every new input $\mathbf{x}$:
#
# $$p(y|X, \mathcal{D}) = \int p(y|X,\mathbf{\theta})p(\mathbf{\theta}|\mathcal{D})d\mathbf{\theta}$$
#
#
# The Bayesian model assigns this probability to each value of y for a given x to arrive at a probability distribution over parameters and not only point estimates. Additionally, the Bayesian approach does not suffer from over-fitting.
# The prior is chosen to be a Gaussian distribution,
#
# $$p(\mathbf{\theta})=\mathcal{N}(\mathbf{\theta}|\mu, \Sigma)$$
#
# with mean $\mu=\mathbf{0}$ and the variance $\Sigma=\alpha^{-1}\mathbf{I}$ is governed by a precision parameter $\alpha$ so that the prior is isotrophic. $\beta$ is the precision of the Gaussian additive noise $\epsilon$:
#
# $$p(y|X, \mathbf{\theta}) = \mathcal{N}(y| f(X), \beta^{-1}).$$
#
#
# The constructor shows the experimental setup. We define precision parameters $\alpha=2$ and $\beta=25$, as well as the coefficients $\mathbf{\theta}$ of the “unknown” linear function:
#
# $$f(\mathbf{x}) = \theta_0+\theta_1x = -0.3 + 0.5x$$
#
# We visualize the likelihood distribution which identifies which parameters are likely generated by the given batch of data. The log likelihood function:
#
# $$\ln p(\mathcal{D}|\mathbf{\theta})= \frac{N}{2}\ln\beta - \frac{N}{2}\ln(2\pi) - \frac{\beta}{2}\sum^N_{n=1}\{y_n-\mathbf{\theta}^T\mathbf{x}_n\}^2$$
# viewed as a function of $\mathbf{\theta}$ is
# $$p(\mathcal{D}|\mathbf{\theta})\propto \exp(-\sum^N_{n=1}\{y_n-\mathbf{\theta}^T\mathbf{x}_n\}^2)$$.
#
#
# ## Posterior
#
# The posterior is proportional to the product of the likelihood function and the prior:
#
# $$p(\mathbf{\theta}|\mathcal{D})=\mathcal{N}(\mathbf{\theta}| \mu',\Sigma') \propto p(\mathcal{D}|\mathbf{\theta}) p(\mathbf{\theta})$$
# and the moments are:
# $$\mu' = \Sigma'(\Sigma^{-1}\mu + \beta X^T\mathbf{y})$$
# $$\Sigma' = (\Sigma^{-1} + \beta XX^T)^{-1}$$
#
# which are used in the function $\verb|update_prior|$.
#
#
# ## Predictive Distribution
#
# Finally, the predictive distribution is estimated from the formula:
# $$p(y|X, \mathcal{D}) = \int p(y|X,\mathbf{\theta})p(\mathbf{\theta}|\mathcal{D})d\mathbf{\theta}$$
# which is obtained by convolving two Gaussians:
#
# $$p(y|X, \mathcal{D})=\mathcal{N}(y|\mu_*,\Sigma_*)$$
# with
# $$\mu_* = X^T\mu'$$
# $$\Sigma_* = \beta^{-1} + X^T\Sigma'X$$
#
#
x = np.linspace(-1, 1, 100)
y = np.linspace(-1, 1, 100)
theta_0, theta_1 = np.meshgrid(x, y)
np.dstack((theta_0,theta_1))
| Chapter3-GPs/present3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy
import toyplot.layout
# **Shortest Paths**
toyplot.layout._floyd_warshall_shortest_path(2, numpy.array([[0, 1]]))
toyplot.layout._floyd_warshall_shortest_path(3, numpy.array([[0, 1], [1, 2]]))
toyplot.layout._floyd_warshall_shortest_path(3, numpy.array([[0, 1], [1, 2], [2, 0]]))
# **Adjacency Lists**
edges = numpy.array([[0, 1]])
toyplot.graph(edges, width=200);
print toyplot.layout._adjacency_list(2, edges)
edges = numpy.array([[0, 1], [1, 2]])
toyplot.graph(edges, width=200);
print toyplot.layout._adjacency_list(3, edges)
edges = numpy.array([[0, 1], [1, 2], [0, 2]])
toyplot.graph(edges, width=200);
print toyplot.layout._adjacency_list(3, edges)
# **Tree Detection**
edges = numpy.array([[0, 1]])
toyplot.graph(edges, width=200);
print toyplot.layout._require_tree(toyplot.layout._adjacency_list(2, edges))
edges = numpy.array([[0, 1], [0, 2]])
toyplot.graph(edges, width=200);
print toyplot.layout._require_tree(toyplot.layout._adjacency_list(3, edges))
edges = numpy.array([[0, 1], [1, 2]])
toyplot.graph(edges, width=200);
print toyplot.layout._require_tree(toyplot.layout._adjacency_list(3, edges))
edges = numpy.array([[0, 1], [1, 2], [2, 1]])
toyplot.graph(edges, width=200, layout=toyplot.layout.FruchtermanReingold(toyplot.layout.CurvedEdges()));
print toyplot.layout._require_tree(toyplot.layout._adjacency_list(3, edges))
| sandbox/graph-algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Classification of classy Shakespeare plays
# #### Data Source:
# https://www.kaggle.com/kingburrito666/shakespeare-plays
# #### Step 1: Data Extraction
import pandas as pd
# Loading data into data frame
players_df = pd.read_csv("../data/external/Shakespeare_data.csv")
players_df.head(3)
# Removing not required columns
players_df = players_df.drop(['PlayerLinenumber'], axis=1)
players_df = players_df.drop(['ActSceneLine'], axis=1)
players_df.head(3)
# Removing null values from Player column
players_df = players_df.dropna(axis=0, subset=['Player'])
players_df.head()
# Checking data types in dataframe
players_df.dtypes
# #### Step 2: Data Tranformation
# Using Encoders for transforming the object data types
# #### One hot encoding for transforming Play column
players_df = pd.get_dummies(players_df, columns=['Play'])
# #### Label Encoding for transforming Player and PlayerLine columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
players_df['Player'] = le.fit_transform(players_df['Player'].astype('str'))
players_df['PlayerLine'] = le.fit_transform(players_df['PlayerLine'].astype('str'))
players_df.head(3)
# #### Step 3: Model Training and Testing data sets
from sklearn.model_selection import train_test_split
# Defining Features
X=players_df.drop(['Player'], axis=1)
# Defining Targets
y=players_df['Player']
# Splitting the training and testing data sets
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# #### Step 4: Building Models
# 1. Using Decision Tree to find the accuracy
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier().fit(X_train, y_train)
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# 2. Using Logistic Regression to find the accuracy
# +
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X, y)
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(logreg.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(logreg.score(X_test, y_test)))
# -
# 3. Using K-Nearest Neighbours to find the accuracy
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
print('Accuracy of K-NN classifier on training set: {:.2f}'
.format(knn.score(X_train, y_train)))
print('Accuracy of K-NN classifier on test set: {:.2f}'
.format(knn.score(X_test, y_test)))
# 4. Using Gaussian Naive Bayes to find the accuracy
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
print('Accuracy of GNB classifier on training set: {:.2f}'
.format(gnb.score(X_train, y_train)))
print('Accuracy of GNB classifier on test set: {:.2f}'
.format(gnb.score(X_test, y_test)))
# __Conclusion__: Accuracy of 69% is predicted for Decision Tree classifier on the test data set, which is much better than the other classifiers.
| notebooks/YY-IGNORE-ToBeOrNot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome
#
# Welcome to the course material for _Advanced Python3: Object oriented programming, databases and visualisation_. This course is developed with <NAME> and covers the following schedule over two days, where each lecture covers both theory as well as hands-on practice:
#
# ### Day 1
#
# 09:00-09:30 L00 Welcome and practicalities
#
# 09:30-15:00 L01 Object-oriented programming (OOP) in Python
#
# - Introduction to OOP in Python
# - Objects, classes, methods, attributes
# - Inheritance and polymorphism
# - Modules and packaging
#
# 15:00-16:00 L02 Databases, part I
#
# - Introduction to relational databases and (Postgre)SQL
#
# ### Day 2
#
# 09:00-11:30 L02 Databases, part II
#
# - Object Relational Mapping: SQLAlchemy
# - PostgreSQL support using Psycopg2
# - Django
#
# 12:30-16:00 L03 Advanced Visualisation
#
# - Production-ready figures with Matplotlib and Plotly
# - Interactive visualisation in the browser using Bokeh
#
#
# ## Requirements and installation instructions
#
# The course is based on Python 3 and covers django, sqlalchemy, psycopg, Matplotlib, Plotly, and Bokeh libraries. The course material is provided as Jupyter Notebooks.
#
# We strongly recommend using [Anaconda](https://www.anaconda.com/distribution/) to set up a dedicated virtual environment for the course material. This avoids potential conflicts between the libraries required for the practical parts of the course and any previously installed Python libraries. To set up a new conda environment and activate it run
# ```
# conda create -n python-course
# conda activate python-course
# ```
#
# To exit a conda environment run
# ```
# conda deactivate
# ```
#
# Once the environment is activated you can install all required packages:
#
# ```
# conda install -c conda-forge jupyterlab
# ```
#
# To install the visualisation tools:
#
# ```
# conda install -c conda-forge matplotlib
# conda install -c plotly plotly
# conda install -c bokeh bokeh
# ```
#
# Even though we will not explicitly cover it in this course, some of the Plotly libraries require pandas:
#
# ```
# conda install -c conda-forge pandas
# ```
#
# To be able to display Plotly plots in JupyterLab a few other things [need to be installed and set up](https://plot.ly/python/getting-started/#jupyterlab-support-python-35):
#
# ```
# conda install -c conda-forge nodejs
# conda install -c conda-forge ipywidgets
#
# # Avoid "JavaScript heap out of memory" errors during extension installation
# # (OS X/Linux)
# export NODE_OPTIONS=--max-old-space-size=4096
# # (Windows)
# set NODE_OPTIONS=--max-old-space-size=4096
#
# # Jupyter widgets extension
# jupyter labextension install @jupyter-widgets/jupyterlab-manager
#
# # jupyterlab renderer support
# jupyter labextension install jupyterlab-plotly
#
# # FigureWidget support
# jupyter labextension install plotlywidget
#
# # Unset NODE_OPTIONS environment variable
# # (OS X/Linux)
# unset NODE_OPTIONS
# # (Windows)
# set NODE_OPTIONS=
# ```
#
# The database tools require their own environments to avoid conflicts:
#
# ```
# conda create --name python-course-db
# conda activate python-course-db
# conda install -c conda-forge django
# conda install -c conda-forge sqlalchemy
# conda install -c conda-forge psycopg2
# ```
#
# If you are familiar with ```pandas``` and would like to convert databases to Dataframes you can also install it in the db environment:
#
# ```
# conda install -c conda-forge pandas
# ```
#
# Since we work in Jupyter notebooks we need to be able to serve conda environments to Jupyter Lab. This is done by installing the iPython kernel in each conda environment:
#
# ```
# conda install -c conda-forge ipykernel
# ipython kernel install --user --name=<any_name_for_kernel>
# ```
#
#
# ## Getting started with JupyterLab
#
# The course material is provided as Jupyter Notebooks and is best accessed via [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/getting_started/installation.html) (available in conda). To start a JupyterLab session run
#
# ```
# jupyter lab
# ```
#
# This should automatically open a window in your browser. If not, copy the URL from the commands output. It should be of the format http://localhost:8888/?token=xxxxx or http://127.0.0.1:8888/?token=xxxxx.
| L00_welcome.ipynb |
/ -*- coding: utf-8 -*-
/ ---
/ jupyter:
/ jupytext:
/ text_representation:
/ extension: .q
/ format_name: light
/ format_version: '1.5'
/ jupytext_version: 1.14.4
/ kernelspec:
/ display_name: MariaDB
/ language: SQL
/ name: mariadb_kernel
/ ---
/ # Prudence When Altering Tables
#mysqldump --user='sergior' -p \;
#La linea anterior se encarga de ir al usuario;
#rookery birds > /tmp/birds.sql;
#La linea anterior se encarga de que se salve en el directorio tmp la tabla birds por si debe hacerse algun cambio;
#mysqldump --user='sergior' -p \;
#Igual que en la celda pasada;
#rookery > rookery.sql;
#Se quiere salvar toda la base de datos;
#mysql --user='sergior' -p \;
#Ahora se toma sin la extension dump;
#rookery < rookery-ch2-end.sql;
#COn el comando anterior se restaura la base de datos a una previa realizada y todos los datos se van a perder;
/ # Essential Changes
SHOW databases;
USE rookery;
ALTER TABLE bird_families
ADD COLUMN order_id INT;
#El comando anterior añade una columna a la tabla bird FAMILIES, y luego crea una columna tipo entero;
CREATE TABLE test.birds_new LIKE birds;
#El comando anterior crea una tabla en la base de datos test de respaldo, como copia de birds;
use test;
show tables;
USE rookery;
USE test;
DESCRIBE birds_new;
#Lo anterior muestra la descripcion de la tabla birds en la base de datos test;
#Lo anterior solo copio la estructura de la tabla, lo que sigue va a meter los datos en la tabla de copia;
INSERT INTO birds_new
SELECT * FROM rookery.birds;
CREATE TABLE birds_new_alternative
SELECT * FROM rookery.birds;
#El comando anterior crea una base de datos alternativa tambien de birds, pero inmediatamente le pasa los datos;
DESCRIBE birds_new_alternative;
#Lo anterior no copia la primary key con el autoincremento;
#En lo anterior, es mejor usar el primer metodo que usa el INSERT INTO y luego SELECT;
DROP TABLE birds_new_alternative;
show tables;
DESCRIBE birds_new;
#Ahora vamos a alterar la tabla agregando una columna;
ALTER TABLE birds_new
ADD COLUMN wing_id CHAR(2);
DESCRIBE birds_new;
ALTER TABLE birds_new
DROP COLUMN wing_id;
#AHORA VAMOS A CREAR DE NUEVO LA COLUMNA PERO EN UN orden predeterminado;
ALTER TABLE birds_new
ADD COLUMN wing_ig CHAR(2) AFTER family_id;
DESCRIBE birds_new;
#Si quiere colocarlo de primero, puede agregar FIRST;
ALTER TABLE birds_new
ADD COLUMN body_id CHAR(2) AFTER wing_ig,
ADD COLUMN bill_id CHAR(2) AFTER body_id,
ADD COLUMN endangered BIT DEFAULT b'1' AFTER bill_id,
CHANGE COLUMN common_name common_name VARCHAR(255);
DESCRIBE birds_new;
UPDATE birds_new SET endangered =0
WHERE bird_id IN(1,2,4,5);
SELECT bird_id, scientific_name, common_name,endangered
FROM birds_new
WHERE endangered;
#Lo anterior se pudo dejar solo WHERE endangered;
#Porque el tipo de dato , BIT permite reconocer solo donde hay el hecho de que endagered=1;
SELECT * FROM birds_new
WHERE NOT endangered;
#Lo anterior solo supone que, no existe un valor en endangered;
UPDATE birds_new SET endangered=1;
#LO anterior es por actualizar a tods con valores de 1;
ALTER TABLE birds_new
MODIFY COLUMN endangered
ENUM('Extinct',
'Extinct in Wild',
'Threatened - Critically Endangered',
'Threatened - Endangered',
'Threatened - Vulnerable',
'Lower Risk - Conservation Dependent',
'Lower Risk - Near Threatened',
'Lower Risk - Least Concern')
AFTER family_id;
SHOW COLUMNS FROM birds_new LIKE 'endangered';
UPDATE birds_new
SET endangered=8;
#Ajusta con numeros los valores predeterminados anteriormente de endangered;
DESCRIBE birds_new;
SELECT * FROM birds_new;
/ # Dynamic Columns
SHOW TABLES;
USE rookery;
SHow tables;
USE birdwatchers;
show tables;
USE birdwatchers;
CREATE TABLE surveys
(survey_id INT AUTO_INCREMENT KEY,
survey_name VARCHAR(255));
CREATE TABLE survey_questions
(question_id INT AUTO_INCREMENT KEY,
survey_id INT,
question VARCHAR(255),
choices BLOB);
CREATE TABLE survey_answers
(answer_id INT AUTO_INCREMENT KEY,
human_id INT,
question_id INT,
date_answered DATETIME,
answer VARCHAR(255));
SHOW TABLES;
#AHora vamos a insertar datos;
/ +
INSERT INTO surveys (survey_name)
VALUES("Favorite Birding Location");
INSERT INTO survey_questions
(survey_id, question, choices)
VALUES(LAST_INSERT_ID(),
"What's your favorite setting for bird-watching?",
COLUMN_CREATE('1', 'forest', '2', 'shore', '3', 'backyard') );
INSERT INTO surveys (survey_name)
VALUES("Preferred Birds");
INSERT INTO survey_questions
(survey_id, question, choices)
VALUES(LAST_INSERT_ID(),
"Which type of birds do you like best?",
COLUMN_CREATE('1', 'perching', '2', 'shore', '3', 'fowl', '4', 'rapture') );
/ -
DESCRIBE survey_questions;
SELECT * FROM surveys;
SELECT COLUMN_GET(choices, 3 AS CHAR)
AS 'Location'
FROM survey_questions
WHERE survey_id = 1;
INSERT INTO survey_answers
(human_id, question_id, date_answered, answer)
VALUES
(29, 1, NOW(), 2),
(29, 2, NOW(), 2),
(35, 1, NOW(), 1),
(35, 2, NOW(), 1),
(26, 1, NOW(), 2),
(26, 2, NOW(), 1),
(27, 1, NOW(), 2),
(27, 2, NOW(), 4),
(16, 1, NOW(), 3),
(3, 1, NOW(), 1),
(3, 2, NOW(), 1);
SELECT IFNULL(COLUMN_GET(choices, answer AS CHAR), 'total')
AS 'Birding Site', COUNT(*) AS 'Votes'
FROM survey_answers
JOIN survey_questions USING(question_id)
WHERE survey_id = 1
AND question_id = 1
GROUP BY answer WITH ROLLUP;
DESCRIBE survey_answers;
SELECT * FROM survey_answers;
SELECT * FROM surveys;
/ # Setting a Column's Default Value
CREATE TABLE rookery.conservation_status
(status_id INT AUTO_INCREMENT PRIMARY KEY,
conservation_category CHAR(10),
conservation_state CHAR(25) );
INSERT INTO rookery.conservation_status
(conservation_category, conservation_state)
VALUES('Extinct','Extinct'),
('Extinct','Extinct in Wild'),
('Threatened','Critically Endangered'),
('Threatened','Endangered'),
('Threatened','Vulnerable'),
('Lower Risk','Conservation Dependent'),
('Lower Risk','Near Threatened'),
('Lower Risk','Least Concern');
SELECT * FROM rookery.conservation_status;
show tables;
Use rookery;
show tables;
use test;
show tables;
ALTER TABLE birds_new
CHANGE COLUMN endangered conservation_status_id INT DEFAULT 8;
ALTER TABLE birds_new
ALTER conservation_status_id SET DEFAULT 7;
ALTER TABLE birds_new
ALTER conservation_status_id DROP DEFAULT;
/ # Setting the Value of AUTO_INCREMENT
SELECT auto_increment
FROM information_schema.tables
WHERE table_name = 'birds';
USE rookery
ALTER TABLE birds
AUTO_INCREMENT = 10;
/ # Another Method to Alter and Create a Table
CREATE TABLE birds_new LIKE birds;
DESCRIBE birds;
DESCRIBE birds_new;
SELECT * FROM birds_new;
SHOW CREATE TABLE birds;
ALTER TABLE birds_new
AUTO_INCREMENT = 6;
CREATE TABLE birds_details
SELECT bird_id, description
FROM birds;
DESCRIBE birds_details;
ALTER TABLE birds
DROP COLUMN description;
RENAME TABLE rookery.birds TO rookery.birds_old,
test.birds_new TO rookery.birds;
SHOW TABLES IN rookery LIKE 'birds%';
DROP TABLE birds_old;
/ # Reordering a Table
DROP TABLE IF EXISTS country_codes;
CREATE TABLE country_countries(
code1 char(2) DEFAULT NULL,
code2 char(2) DEFAULT NULL,
code3 char(3) DEFAULT NULL,
country_name varchar(255) DEFAULT NULL,
country_name_italian varchar(255) DEFAULT NULL,
KEY code1 (code1),
KEY code2 (code2)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
LOCK TABLES country_countries WRITE;
INSERT INTO country_countries VALUES ('af','AF','AFG','Afghanistan','Afghanistan'),('ax','AX','ALA','Åland',NULL),('al','AL','ALB','Albania','Albania'),('dz','DZ','DZA','Algeria','Algeria'),('as','AS','ASM','American Samoa','Samoa Americane'),('ad','AD','AND','Andorra','Andorra'),('ao','AO','AGO','Angola','Angola'),('ai','AI','AIA','Anguilla','Anguilla'),('aq',NULL,NULL,'Antarctica',NULL),('ag','AG','ATG','Antigua and Barbuda','Antigua e Barbuda'),('ar','AR','ARG','Argentina','Argentina'),('am','AM','ARM','Armenia','Armenia'),('aw','AW','ABW','Aruba','Aruba'),('ac',NULL,NULL,'Ascension Island',NULL),('au','AU','AUS','Australia','Australia'),('at','AT','AUT','Austria','Austria'),('az','AZ','AZE','Azerbaijan','Azerbaijan'),('bs','BS','BHS','Bahamas','Bahamas'),('bh','BH','BHR','Bahrain','Bahrain'),('bd','BD','BGD','Bangladesh','Bangladesh'),('bb','BB','BRB','Barbados','Barbados'),('by','BY','BLR','Belarus','Bielorussia'),('be','BE','BEL','Belgium','Belgio'),('bz','BZ','BLZ','Belize','Belize'),('bj','BJ','BEN','Benin','Benin'),('bm','BM','BMU','Bermuda','Bermuda'),('bt','BT','BTN','Bhutan','Bhutan'),('bo','BO','BOL','Bolivia','Bolivia'),('ba','BA','BIH','Bosnia and Herzegovina','Bosnia-Erzegovina'),('bw','BW','BWA','Botswana','Botswana'),('bv',NULL,NULL,'Bouvet Island',NULL),('br','BR','BRA','Brazil','Brasile'),('vg','IO','VGB','British Virgin Islands','Isole Vergini Britanniche'),('io',NULL,NULL,'British Indian Ocean Territory',NULL),('bn','BN','BRN','Brunei Darussalam','Brunei Darussalam'),('bg','BG','BGR','Bulgaria','Bulgaria'),('bf','BF','BFA','Burkina Faso','Burkina Faso'),('bi','BI','BDI','Burundi','Burundi'),('kh','KH','KHM','Cambodia','Cambogia'),('cm','CM','CMR','Cameroon','Camerun'),('ca','CA','CAN','Canada','Canada'),('cv','CV','CPV','Cape Verde','Capo Verde'),('ky','KY','CYM','Cayman Islands','Isole Cayman'),('cf','CF','CAF','Central African Republic','Repubblica Centroafricana'),('td','TD','TCD','Chad','Ciad'),('cl','CL','CHL','Chile','Cile'),('cn','CN','CHN','China','Cina'),('cx',NULL,NULL,'Christmas Island',NULL),('cc',NULL,NULL,'Cocos (Keeling) Island',NULL),('co','CO','COL','Colombia','Colombia'),('km','KM','COM','Comoros','Comore'),('cg','CG','COG','Republic of Congo','Repubblica del Congo'),('cd','CD','COD','Democratic Republic of Congo','Repubblica Democratica del Congo'),('ck','CK','COK','Cook Islands','Isole Cook'),('cr','CR','CRI','Costa Rica','Costa Rica'),('ci','CI','CIV','Cote d\'Ivoire','Costa d\'Avorio'),('hr','HR','HRV','Croatia','Croazia'),('cu','CU','CUB','Cuba','Cuba'),('cy','CY','CYP','Cyprus','Cipro'),('cz','CZ','CZE','Czech Republic','Repubblica Ceca'),('dk','DK','DNK','Denmark','Danimarca'),('dj','DJ','DJI','Djibouti','Gibuti'),('dm','DM','DMA','Dominica','Dominica'),('do','DO','DOM','Dominican Republic','Repubblica Dominicana'),('ec','EC','ECU','Ecuador','Ecuador'),('eg','EG','EGY','Egypt','Egitto'),('sv','SV','SLV','El Salvador','El Salvador'),('gq','GQ','GNQ','Equatorial Guinea','Guinea Equatoriale'),('er','ER','ERI','Eritrea','Eritrea'),('ee','EE','EST','Estonia','Estonia'),('et','ET','ETH','Ethiopia','Etiopia'),('fk','FK','FLK','Falkland Islands (Malvinas)','Isole Falkland'),('fo','FO','FRO','Faroe Islands','Isole Faroe'),('fj','FJ','FJI','Fiji','Fiji'),('fi','FI','FIN','Finland','Finlandia'),('fr','FR','FRA','France','Francia'),('gf','GF','GUF','French Guiana','Guyana Francese'),('pf','PF','PYF','French Polynesia','Polinesia Francese'),('tf',NULL,NULL,'French Southern Territories',NULL),('ga','GA','GAB','Gabon','Gabon'),('gm','GM','GMB','Gambia','Gambia'),('ge','GE','GEO','Georgia','Georgia'),('de','DE','DEU','Germany','Germania'),('gh','GH','GHA','Ghana','Ghana'),('gi','GI','GIB','Gibraltar','Gibilterra'),('gr','GR','GRC','Greece','Grecia'),('gl','GL','GRL','Greenland','Groenlandia'),('gd','GD','GRD','Grenada','Grenada'),('gp','GP','GLP','Guadeloupe','Guadalupa'),('gu','GU','GUM','Guam','Guam'),('gt','GT','GTM','Guatemala','Guatemala'),('gg','GG','GGY','Guernsey',NULL),('gn','GN','GIN','Guinea','Guinea'),('gw','GW','GNB','Guinea-Bissau','Guinea-Bissau'),('gy','GY','GUY','Guyana','Guyana'),('ht','HT','HTI','Haiti','Haiti'),('hm',NULL,NULL,'Heard and McDonald Islands',NULL),('hn','HN','HND','Honduras','Honduras'),('hk','HK','HKG','Hong Kong','Hong Kong'),('hu','HU','HUN','Hungary','Ungheria'),('is','IS','ISL','Iceland','Islanda'),('in','IN','IND','India','India'),('id','ID','IDN','Indonesia','Indonesia'),('ir','IR','IRN','Iran','Iran'),('iq','IQ','IRQ','Iraq','Iraq'),('ie','IE','IRL','Ireland','Eire'),('im','IM','IMN','Isle of Man','Isola di Man'),('il','IL','ISR','Israel','Israele'),('it','IT','ITA','Italy','Italia'),('jm','JM','JAM','Jamaica','Giamaica'),('jp','JP','JPN','Japan','Giappone'),('je','JE','JEY','Jersey',NULL),('jo','JO','JOR','Jordan','Giordania'),('kz','KZ','KAZ','Kazakhstan','Kazakistan'),('ke','KE','KEN','Kenya','Kenya'),('ki','KI','KIR','Kiribati','Kiribati'),('kp','KP','PRK','North Korea','Corea del Nord'),('kr','KR','KOR','South Korea','Corea del Sud'),('kw','KW','KWT','Kuwait','Kuwait'),('kg','KG','KGZ','Kyrgyzstan','Kirghizistan'),('la','LA','LAO','Laos','Laos'),('lv','LV','LVA','Latvia','Lettonia'),('lb','LB','LBN','Lebanon','Libano'),('ls','LS','LSO','Lesotho','Lesotho'),('lr','LR','LBR','Liberia','Liberia'),('ly','LY','LBY','Libya','Libia'),('li','LI','LIE','Liechtenstein','Liechtenstein'),('lt','LT','LTU','Lithuania','Lituania'),('lu','LU','LUX','Luxembourg','Lussemburgo'),('mo','MO','MAC','Macau','Macao'),('mk','MK','MKD','Macedonia','Macedonia'),('mg','MG','MDG','Madagascar','Madagascar'),('mw','MW','MWI','Malawi','Malawi'),('my','MY','MYS','Malaysia','Malesia'),('mv','MV','MDV','Maldives','Maldive'),('ml','ML','MLI','Mali','Mali'),('mt','MT','MLT','Malta','Malta'),('mh','MH','MHL','Marshall Islands','Isole Marshall'),('mq','MQ','MTQ','Martinique','Martinica'),('mr','MR','MRT','Mauritania','Mauritania'),('mu','MU','MUS','Mauritius','Mauritius'),('yt','YT','MYT','Mayotte','Mayotte'),('mx','MX','MEX','Mexico','Messico'),('fm','FM','FSM','Micronesia','Stati Federati della Micronesia'),('md','MD','MDA','Moldova','Moldavia'),('mc','MC','MCO','Monaco','Monaco'),('mn','MN','MNG','Mongolia','Mongolia'),('ms','MS','MSR','Montserrat','Montserrat'),('ma','MA','MAR','Morocco','Marocco'),('mz','MZ','MOZ','Mozambique','Mozambico'),('mm','MM','MMR','Myanmar','Myanmar'),('na','NA','NAM','Namibia','Namibia'),('nr','NR','NRU','Nauru','Nauru'),('np','NP','NPL','Nepal','Nepal'),('nl','NL','NLD','Netherlands','Paesi Bassi'),('an','AN','ANT','Netherlands Antilles','Antille Olandesi'),('nc','NC','NCL','New Caledonia','Nuova Caledonia'),('nz','NZ','NZL','New Zealand','Nuova Zelanda'),('ni','NI','NIC','Nicaragua','Nicaragua'),('nu','NU','NIU','Niue','Niue'),('ne','NE','NER','Niger','Niger'),('ng','NG','NGA','Nigeria','Nigeria'),('nf','NF','NFK','Norfolk Island','Isola Norfolk'),('mp','MP','MNP','Northern Mariana Islands','Isole Marianne Settentrionali'),('no','NO','NOR','Norway','Norvegia'),('om','OM','OMN','Oman','Oman'),('pk','PK','PAK','Pakistan','Pakistan'),('pw','PW','PLW','Palau','Palau'),('ps','PS','PSE','Occupied Palestinian Territory','Territori Palestinesi Occupati'),('pa','PA','PAN','Panama','Panamá'),('pg','PG','PNG','Papua New Guinea','Papua Nuova Guinea'),('py','PY','PRY','Paraguay','Paraguay'),('pe','PE','PER','Peru','Perù'),('ph','PH','PHL','Philippines','Filippine'),('pn','PN','PCN','Pitcairn Island','Pitcairn'),('pl','PL','POL','Poland','Polonia'),('pt','PT','PRT','Portugal','Portogallo'),('pr','PR','PRI','Puerto Rico','Porto Rico'),('qa','QA','QAT','Qatar','Qatar'),('re','RE','REU','Reunion','Reunion'),('ro','RO','ROU','Romania','Romania'),('ru','RU','RUS','Russia','Federazione Russa'),('rw','RW','RWA','Rwanda','Ruanda'),('sh','SH','SHN','Saint Helena','Sant\'Elena'),('kn','KN','KNA','Saint Kitts and Nevis','Saint Kitts e Nevis'),('lc','LC','LCA','Saint Lucia','Santa Lucia'),('pm','PM','SPM','Saint Pierre and Miquelon','Saint Pierre e Miquelon'),('vc','VC','VCT','Saint Vincent and the Grenadines','Saint Vincent e Grenadine'),('ws','WS','WSM','Samoa','Samoa'),('sm','SM','SMR','San Marino','San Marino'),('st','ST','STP','Sao Tome and Principe','Sao Tome e Principe'),('sa','SA','SAU','Saudia Arabia','Arabia Saudita'),('sn','SN','SEN','Senegal','Senegal'),('cs','CS','SCG','Serbia',NULL),('sc','SC','SYC','Seychelles','Seychelles'),('sl','SL','SLE','Sierra Leone','Sierra Leone'),('sg','SG','SGP','Singapore','Singapore'),('sk','SK','SVK','Slovakia','Slovacchia'),('si','SI','SVN','Slovenia','Slovenia'),('sb','SB','SLB','Solomon Islands','Isole Solomon'),('so','SO','SOM','Somalia','Somalia'),('za','ZA','ZAF','South Africa','Sudafrica'),('gs',NULL,NULL,'South Georgia and the South Sandwich Islands',NULL),('es','ES','ESP','Spain','Spagna'),('lk','LK','LKA','Sri Lanka','Sri Lanka'),('sd','SD','SDN','Sudan','Sudan'),('sr','SR','SUR','Suriname','Suriname'),('sj','SJ','SJM','Svalbard and Jan Mayen Islands','Svalbard e Jan Mayen'),('sz','SZ','SWZ','Swaziland','Swaziland'),('se','SE','SWE','Sweden','Svezia'),('ch','CH','CHE','Switzerland','Svizzera'),('sy','SY','SYR','Syria','Siria'),('tw','TW','TWN','Taiwan','Taiwan'),('tj','TJ','TJK','Tajikistan','Tagikistan'),('tz','TZ','TZA','Tanzania','Tanzania'),('th','TH','THA','Thailand','Thailandia'),('tp','TL','TLS','Timor-Leste','Timor Est'),('tg','TG','TGO','Togo','Togo'),('tk',NULL,NULL,'Tokelau',NULL),('to','TO','TON','Tonga','Tonga'),('tt','TT','TTO','Trinidad and Tobago','Trinidad e Tobago'),('tn','TN','TUN','Tunisia','Tunisia'),('tr','TR','TUR','Turkey','Turchia'),('tm','TM','TKM','Turkmenistan','Turkmenistan'),('tc','TC','TCA','Turks and Caicos Islands','Isole Turks e Caicos'),('tv','TV','TUV','Tuvalu','Tuvalu'),('ug','UG','UGA','Uganda','Uganda'),('ua','UA','UKR','Ukraine','Ucraina'),('ae','AE','ARE','United Arab Emirates','Emirati Arabi Uniti'),('uk','GB','GBR','United Kingdom','Regno Unito'),('us','US','USA','United States of America','Stati Uniti d\'America'),('vi','VI','VIR','United States Virgin Islands','Isole Vergini Americane'),('uy','UY','URY','Uruguay','Uruguay'),('um',NULL,NULL,'US Minor Outlying Islands',NULL),('su','SU','SUN','USSR',NULL),('uz','UZ','UZB','Uzbekistan','Uzbekistan'),('vu','VU','VUT','Vanuatu','Vanuatu'),('va','VA','VAT','Vatican City State (Holy See)','Città del Vaticano'),('ve','VE','VEN','Venezuela','Venezuela'),('vn','VN','VNM','Vietnam','Vietnam'),('wf','WF','WLF','Wallis and Futuna Islands','Wallis e Futuna'),('eh','EH','ESH','Western Sahara','Sahara Occidentale'),('ye','YE','YEM','Yemen','Yemen'),('yu','YU','YUG','Yugoslavia',NULL),('zm','ZM','ZMB','Zambia','Zambia'),('zw','ZW','ZWE','Zimbabwe','Zimbabwe');
UNLOCK TABLES;
SELECT * FROM country_countries
LIMIT 5;
ALTER TABLE country_countries
ORDER BY code1;
SELECT * FROM country_countries
LIMIT 5;
/ # Indexes
ALTER TABLE conservation_status
CHANGE status_id conservation_status_id INT AUTO_INCREMENT PRIMARY KEY;
SHOW INDEX FROM birdwatchers.humans;
EXPLAIN SELECT * FROM birdwatchers.humans
WHERE name_last = 'Hollar';
ALTER TABLE birdwatchers.humans
ADD INDEX human_names (name_last, name_first);
SHOW CREATE TABLE birdwatchers.humans;
SHOW INDEX FROM birdwatchers.humans
WHERE Key_name = 'human_names';
EXPLAIN SELECT * FROM birdwatchers.humans
WHERE name_last = 'Hollar';
ALTER TABLE conservation_status
DROP PRIMARY KEY,
CHANGE status_id conservation_status_id INT PRIMARY KEY AUTO_INCREMENT;
| LearningMySQLMariaDB/CH5AlteringTables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import cv2
import tensorflow as tf
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = tf.keras.models.load_model("/home/arjun/DM/face.h5")
# EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised","neutral"]
EMOTIONS=["angry",
"disgust",
"fear",
"happy",
"neutral",
"sad",
"surprise"]
# EMOTIONS = ["afraid","angry","disgust","happy","neutral","sad","surprised"]
# -
def prepare(ima):
IMG_SIZE = 48 # image size
img_array = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
img_array=img_array/255.0
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return new_array.reshape(-1,IMG_SIZE, IMG_SIZE,1)
# # Static Test
image=cv2.imread("images.jpeg")
# faces = face_cascade.detectMultiScale(image, 1.3, 5)
# faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
# (x,y,w,h)=faces
# roi = image[y-40:y+h+40, x:x+w]
prediction = model.predict([prepare(image)])
preds = prediction[0]
label = EMOTIONS[preds.argmax()]
print(label)
# image = cv2.rectangle(image,(x,y-40),(x+w,y+h+40),(255,0,0),2)
cv2.imshow("image",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# # Live Test
cap=cv2.VideoCapture("/home/arjun/DM/1.mp4")
# cap=cv2.VideoCapture(0)
result = cv2.VideoWriter('1testface.avi',cv2.VideoWriter_fourcc(*'MJPG'), 30, (540, 960))
while True:
ret, img=cap.read()
# print(img.shape)
img = cv2.resize(img, (540, 960))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
canvas = np.zeros((256,256,3), dtype="uint8")
frameclone=img
try:
faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
(x,y,w,h)=faces
img = cv2.rectangle(img,(x,y-20),(x+w,y+h),(255,0,0),2)
roi = img[y-20:y+h, x:x+w]
cv2.imshow('img2',roi)
prediction = (model.predict([prepare(roi)]))
preds = prediction[0]
label = EMOTIONS[preds.argmax()]
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
text = "{}: {:.2f}%".format(emotion, prob*100)
w = int(prob*300)
cv2.rectangle(canvas, (7, (i*35)+5), (w, (i*35)+35),(0,0,255), -1)
cv2.putText(canvas, text, (10, (i*35) +23), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255,255,255), 2)
cv2.imshow("Probabilities", canvas)
cv2.putText(img,label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
result.write(img)
except:
pass
cv2.imshow('img',img)
cv2.waitKey(1)
if cv2.waitKey(1) & cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# # Test on static Validation data
# +
for j in range(0,7):
right_count=0
wrong_count=0
for i in range(1,50):
# try:
img=cv2.imread("/home/arjun/DM/Face/validation/"+str(j)+"/"+str(i)+".jpg")
# cv2.imshow("image",img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# faces = face_cascade.detectMultiScale(img, 1.3, 5)
# print(faces)
# faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]
# (x,y,w,h)=faces
# roi = image[y-20:y+h, x:x+w]
pr=model.predict([prepare(img)])
preds=pr[0]
label = EMOTIONS[preds.argmax()]
if(label==EMOTIONS[j]):
right_count+=1
else:
wrong_count+=1
# except:
# pass
print(EMOTIONS[j])
print("Right "+str(right_count)+" Wrong "+str(wrong_count))
# -
#
#
# +
angry
Right 20 Wrong 29
disgust
Right 30 Wrong 19
fear
Right 23 Wrong 26
happy
Right 40 Wrong 9
neutral
Right 26 Wrong 23
sad
Right 32 Wrong 17
surprise
Right 34 Wrong 15
# -
| ARJUN/Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Práctica 0: Python
# ---
# ### Autores:
# <NAME> - 5º Doble Grado en Ingeniería Informática - Matemáticas
# <NAME> - 4º Grado en Ingeniería Informática
#
# ---
# **Fecha de entrega:** 4 de octubre de 2018, 18.00h
#
# #### Descripción de la práctica
# En esta primera práctica has de implementar un algoritmo de integración numérica basado en el método de Monte Carlo.
# Dada una función real e integrable de una sola variable $f(x)$, y su integral $F(x)$, la integral definida de $f(x)$ entre $a$ y $b$ viene dada por la expresión $$I = \int_a^b f(x) dx = F(b) - F(a)$$ como el cálculo simbólico de la integral $F(x)$ puede ser muy difícil, se utilizan métodos numéricos que aproximan su valor utilizando la interpretación geométrica de la integral definida que se corresponde con el área bajo la curva $f(x)$ entre $a$ y $b$.
# Dada una función $f(x)$ positiva en el intervalo $x \in [a;b]$ cuyo valor máximo es $M$ dentro de ese intervalo, podemos definir un rectángulo de área $(b-a)\times M$ como el que se muestra en la figura para el intervalo $[0;2]$. El método de Monte Carlo para el cálculo de la integral consiste en generar aleatoriamente puntos (en rojo en la figura) dentro de ese rectángulo y aproximar el valor de la integral por el porcentaje de puntos que caen por debajo de la función en cuestión:
# $$I\approx \frac{N_{debajo}}{N_{total}}(b-a)M$$ donde $N_{debajo}$ es el número de puntos $(x, y)$ generados aleatoriamente cuya coordenada $y$ es menor que el valor de la función $f(x)$ para ese valor de $x$ y $N_{total}$ es el número total de puntos generados aleatoriamente dentro del rectángulo.
#
# #### Implementación de la solución
# Antes de mostrar el código desarrollado para el correcto funcionamiento de la práctica, debemos importar los módulos necesarios para el desarrollo de esta práctica:
#
# - **Time**: para calcular el tiempo de ejecución del método de Monte Carlo.
# - **Numpy**: para realizar operaciones con vectores.
# - **Pyplot**, de la librería **Matplotlib**: para dibujar gráficas que reflejan los resultados de la práctica.
# - **Integrate**, de la librería **Scipy**: para calcular el valor de la integral de una función en un intervalo.
#
# %matplotlib notebook
import time
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
# 1) Versión iterativa
# +
def integra_mc_it(fun, a, b, num_puntos=10000):
tic = time.process_time()
# Get M
interval = np.linspace(a, b, num_puntos)
M = fun(interval).max()
#for x in interval:
# if fun(x) > M:
# M = fun(x)
# Generate the random points in the square
# and check points under the function
N_under = 0
for i in range(num_puntos):
xrand = np.random.uniform(a, b)
yrand = np.random.uniform(0, M)
if fun(xrand) > yrand:
N_under = N_under + 1
sol = N_under/num_puntos*(b-a)*M
toc = time.process_time()
return (sol, toc-tic)
# I = integra_mc_it(np.sin, 0.0, np.pi)
# print('The integral under the curve is ' + str(I))
# -
# 2) Versión paralela
# +
def integra_mc_fast(fun, a, b, num_puntos=10000):
tic = time.process_time()
# get M
interval = np.linspace(a, b, num_puntos)
f = fun(interval)
M = np.max(f)
# Generate the random points in the square
xrand = np.random.uniform(a, b, num_puntos)
yrand = np.random.uniform(0, M, num_puntos)
# Check points under the function
fx = fun(xrand)
N_under = sum(fx > yrand)
sol = N_under/num_puntos*(b-a)*M
toc = time.process_time()
return (sol, toc-tic)
# I = integra_mc(np.cos, 0.0, np.pi)
# print('The integral under the curve is ' + str(I))
# -
# 3) Pruebas de ejecución
#
# Para poner a prueba la eficacia del método de Monte Carlo utilizaremos una función cuya integral sea conocida. Por ejemplo, sabemos que la integral de la función $sin$ en el intervalo $[0,\pi]$ es igual a 2 (aunque lo comprobaremos con la función `integrate.quad` de Python.
# +
N = 5000
print('* Iterative case')
sol_it = integra_mc_it(np.sin, 0.0, np.pi, N)
print('The integral under the curve is ' + str(sol_it[0]))
print('Elapsed time (iterative): ' + str(sol_it[1]))
print ('-'*80)
print('* Vectorized case')
sol_vec = integra_mc_fast(np.sin, 0.0, np.pi, N)
print('The integral under the curve is ' + str(sol_vec[0]))
print('Elapsed time (vector): ' + str(sol_vec[1]))
x = np.linspace(0.0, np.pi, N)
plt.figure()
ejes = plt.gca()
m = np.max(np.sin(x))
prop_ejes = [0.0, np.pi, 0, m]
ejes.axis(prop_ejes)
plt.plot(x, np.sin(x), '-', c='blue')
x_aleatorios = np.random.uniform(0.0, np.pi, N)
y_aleatorios = np.random.uniform(0, m, N)
plt.scatter(x_aleatorios, y_aleatorios, marker='x', c='red')
plt.show()
print('Aplying a Python function, the integral obtained is ' + str(integrate.quad(np.sin, 0.0, np.pi)[0]))
# -
# #### Conclusión
# Los resultados obtenidos muestran con evidencia aquello que ya sabíamos de antemano: aplicar funciones y algoritmos de manera vectorizada es mucho más eficiente que hacerlo de manera iterativa. Además el algoritmo de Monte Carlo es bastante eficiente para el cálculo de integrales, pues los errores obtenidos son entorno a la tercera cifra decimal.
| P0.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit (conda)
# language: python
# name: python_defaultSpec_1599503179152
# ---
# + tags=[]
from collections import Counter
import numpy as np
import cv2
from matplotlib import pyplot as plt
import os
# +
pwd=os.getcwd()
img_path = pwd+ "/data/Images/Original Images/img_001.jpg"
img = cv2.imread(img_path, 1)
blue, green, red = cv2.split(img)
# + active=""
# [[chapter_collab]]
# +
# Convert BGR image into RGB
def bgr2rgb(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def normalize(seq, input_start, input_end, output_start, output_end):
factor = (output_end - output_start) / (input_end - input_start)
return (output_start + factor * (seq - input_start)).astype(np.uint8)
# -
layout = (2, 3)
plt.subplot2grid(layout, (0, 1), colspan=1)
plt.imshow(bgr2rgb(img)), plt.title("Original")
plt.subplot2grid(layout, (1, 0))
plt.imshow(blue), plt.title("Blue")
plt.colorbar()
plt.subplot2grid(layout, (1, 1))
plt.imshow(green), plt.title("Green")
plt.colorbar()
plt.subplot2grid(layout, (1, 2))
plt.imshow(red), plt.title("Red")
plt.colorbar()
plt.savefig("T1-Plots/RGB_Plots.png")
plt.show()
# Convert BGR image into HSV
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Split HSV into its channels
h, s, v = cv2.split(hsv_img)
# +
# Map Hue to range (0, 255)
h = normalize(h, 0, 179, 0, 255)
layout = (2, 3)
plt.subplot2grid(layout, (0, 1), colspan=1)
plt.imshow(hsv_img), plt.title("HSV")
plt.subplot2grid(layout, (1, 0))
plt.imshow(h), plt.title("HUE")
plt.colorbar()
plt.subplot2grid(layout, (1, 1))
plt.imshow(s), plt.title("Saturation")
plt.colorbar()
plt.subplot2grid(layout, (1, 2))
plt.imshow(v), plt.title("Value")
plt.colorbar()
plt.savefig("T1-Plots/HSV_Plots.png")
plt.show()
# +
def find_bin(num, bins):
n_bins = len(bins) - 1
for i in range(n_bins):
if (num >= bins[i]) and (num < bins[i + 1]):
return i
return None
def bin_numbers(seq, bins):
binnumbers = []
for num in seq:
bin_number = find_bin(num, bins)
binnumbers.append(bin_number)
return np.array(binnumbers)
def histogram(seq, n_bins=256, range=(0, 256)):
bins = np.linspace(range[0], range[1], n_bins + 1)
#Find indices where elements should be inserted to maintain order.
bin_indices = np.searchsorted(bins, seq, side='left')
hist = np.bincount(bin_indices, minlength=n_bins)
# Return x axis and height values to plot
return hist, bins
def counter(seq):
z = Counter(seq.tolist())
hist = np.zeros(256)
for el in z:
hist[el] = z[el]
bins = np.arange(0, 256)
return hist, bins
images = {
'RED': red,
'GREEN': green,
'BLUE': blue,
'HUE': h,
'SATURATION': s,
'Value': v
}
# -
for title in images:
hist_m, bins_m = histogram(images[title].ravel(), 256, [0, 256])
plt.title(title) # + " mine")
plt.bar(bins_m[:-1], height=hist_m)
plt.savefig("T1-Plots/histogram_{}".format(title))
plt.show()
hist, bins = np.histogram(images[title].ravel(), 256, [0, 256])
assert np.equal(hist, hist_m).all(), "My histogram function is not equal to numpy histogram"
hist_b = np.bincount(images[title].flatten(), minlength=256)
assert np.equal(hist, hist_b).all(), "My histogram function is not equal to bincount"
| 1/Task1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import random
import math
import numpy as np
import gym
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch import nn
from torch.autograd import Variable
from utils import grid_from_state
from atari_wrappers import wrap_deepmind
# -
# ## Utils
def process_state(state):
state = np.array(state, dtype='float32').transpose((2, 0, 1)) # change WHC to CWH for pytorch
state /= 255. # rescale 0-1
state = torch.from_numpy(state).unsqueeze(0) # add batch dim
return state
# ## Environment
# - deepmind wrapper keeps track of a frame stack, resizes to 84x84, and converts to grayscale
#
env = gym.make('Pong-v0')
env = wrap_deepmind(env, frame_stack=True)
# ### Sample state
# - this is what the q-network will see at every step
state = env.reset()
plt.imshow(grid_from_state(state), cmap='gray');
# ## Q-Network
# this network will approximate the Q function. It takes a state (4 stacked frames) and outputs a q values for each possible action (Pong has 6 possible actions). Q values are estimates of amount of reward (score) we expect to get at the end of the game.
# 
class DQN(nn.Module):
"""
deepmind architecture from "Human-level control through deep reinforcement learning"
https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf
"""
def __init__(self, num_actions):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc4 = nn.Linear(7 * 7 * 64, 512)
self.fc5 = nn.Linear(512, num_actions)
def forward(self, state):
x = F.relu(self.conv1(state)) # (20, 20, 32)
x = F.relu(self.conv2(x)) # (9, 9, 64)
x = F.relu(self.conv3(x)) # (7, 7, 64)
x = x.view(x.size(0), -1) # flatten (7*7*64)
x = F.relu(self.fc4(x)) # (512)
q_values = self.fc5(x) # (num_actions) q value for each action
return q_values
# ## Replay Memory
# - we want constant time insertion and sampling as this holds 1 million transitions in original paper and must be sampled from on every step.
class ReplayMemory:
"""
samples are stored as ('state', 'action', 'next_state', 'reward', done)
"""
def __init__(self, capacity):
self.capacity = capacity
self.samples = []
self.insert_location = 0
def add(self, state, action, reward, next_state, done):
sample = (state, action, reward, next_state, done)
if self.insert_location >= len(self.samples):
self.samples.append(sample)
else:
self.samples[self.insert_location] = sample # assignment is O(1) for lists
# walk insertion point through list
self.insert_location = (self.insert_location + 1) % self.capacity
def sample(self, batch_size):
batch_size = min(batch_size, len(self.samples))
batch = random.sample(self.samples, batch_size)
return self.prepare_batch(batch)
def prepare_batch(self, batch):
"""
Transposes and pre-processes batch of transitions into batches of torch tensors
Args:
batch: list of transitions [[s, a, r, s2, done],
[s, a, r, s2, done]]
Returns: [s], [a], [r], [s2], [done_mask]
"""
states, actions, rewards, next_states, done_mask = [], [], [], [], []
for state, action, reward, next_state, done in batch:
states.append(process_state(state))
actions.append(action)
rewards.append(reward)
next_states.append(process_state(next_state))
done_mask.append(1 - done) # turn True values into zero for mask
states = torch.cat(states)
next_states = torch.cat(next_states)
rewards = torch.FloatTensor(rewards)
done_mask = torch.FloatTensor(done_mask)
return states, actions, rewards, next_states, done_mask
def __len__(self):
return len(self.samples)
# ## Action Selection Policy
# with probability epsilon, select a random action. otherwise selects action corresponding to highest predicted Q value argmax(Q(S, A)
def select_action(q_network, state, env, epsilon):
"""
epsilon greedy policy.
selects action corresponding to highest predicted Q value, otherwise selects
otherwise selects random action with epsilon probability.
Args:
state: current state of the environment (4 stack of image frames)
epsilon: probability of random action (1.0 - 0.0)
Returns:(int) action to perform
"""
if epsilon > random.random():
return env.action_space.sample()
state = Variable(process_state(state), volatile=True).cuda()
return int(q_network(state).data.max(1)[1])
def calculate_epsilon(current_step, epsilon_max=0.9, epsilon_min=0.05, decay_rate=1e-5):
"""
calculates epsilon value given steps done and speed of decay
"""
epsilon = epsilon_min + (epsilon_max - epsilon_min) * \
math.exp(-decay_rate * current_step)
return epsilon
# 
# ## deep q learning (1 episode)
# +
# hyper params
batch_size = 32
gamma = 0.99
learning_rate =1e-4
memory = ReplayMemory(capacity=10000) # initialize replay memory
q_network = DQN(env.action_space.n).cuda() # initialize action-value function Q with random weights
optimizer = Adam(q_network.parameters(), lr=learning_rate)
criterion = nn.SmoothL1Loss()
state = env.reset() # observe initial state
current_step = 0
while True:
env.render() # so we can watch!
action = select_action(q_network, state, env, calculate_epsilon(current_step)) # select action
next_state, reward, done, info = env.step(action) # carry out action/observe reward
# store experience s, a, r, s' in replay memory
memory.add(state, action, reward, next_state, done)
# sample random transitions
states, actions, rewards, next_states, done_mask = memory.sample(batch_size)
# prepare batch
states = Variable(states).cuda()
next_states = Variable(next_states).cuda()
rewards = Variable(rewards).cuda()
done_mask = Variable(done_mask).cuda()
# calculate target
# find next Q values and set Q values for done states to 0
next_q_values = q_network(next_states).max(dim=1)[0].detach() * done_mask
# calculate targets = rewards + (gamma * next_Q_values)
targets = rewards + (gamma * next_q_values)
q_values = q_network(states)[range(len(actions)), actions] # select only Q values for actions we took
# train network
loss = criterion(q_values, targets) # smooth l1 loss
optimizer.zero_grad()
loss.backward()
# gradient clipping to prevent exploding gradient
for param in q_network.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
state = next_state # move to next state
current_step += 1
if done:
break
env.close()
| Minimal_DQN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to the yt quickstart!
#
# In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.
#
# Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook. The documentation exists at http://yt-project.org/doc/. If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.
#
# ## Acquiring the datasets for this tutorial
#
# If you are executing these tutorials interactively, you need some sample datasets on which to run the code. You can download these datasets at http://yt-project.org/data/. The datasets necessary for each lesson are noted next to the corresponding tutorial.
#
# ## What's Next?
#
# The Notebooks are meant to be explored in this order:
#
# 1. Introduction
# 2. Data Inspection (IsolatedGalaxy dataset)
# 3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)
# 4. Data Objects and Time Series (IsolatedGalaxy dataset)
# 5. Derived Fields and Profiles (IsolatedGalaxy dataset)
# 6. Volume Rendering (IsolatedGalaxy dataset)
# The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it.
download_datasets = False
if download_datasets:
# !curl -sSO https://yt-project.org/data/enzo_tiny_cosmology.tar.gz
print ("Got enzo_tiny_cosmology")
# !tar xzf enzo_tiny_cosmology.tar.gz
# !curl -sSO https://yt-project.org/data/Enzo_64.tar.gz
print ("Got Enzo_64")
# !tar xzf Enzo_64.tar.gz
# !curl -sSO https://yt-project.org/data/IsolatedGalaxy.tar.gz
print ("Got IsolatedGalaxy")
# !tar xzf IsolatedGalaxy.tar.gz
print ("All done!")
| doc/source/quickstart/1)_Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using optimization routines from `scipy` and `statsmodels`
# %matplotlib inline
import scipy.linalg as la
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
import pandas as pd
np.set_printoptions(precision=3, suppress=True)
# Using `scipy.optimize`
# ----
#
# One of the most convenient libraries to use is `scipy.optimize`, since it is already part of the Anaconda installation and it has a fairly intuitive interface.
from scipy import optimize as opt
# #### Minimizing a univariate function $f: \mathbb{R} \rightarrow \mathbb{R}$
def f(x):
return x**4 + 3*(x-2)**3 - 15*(x)**2 + 1
x = np.linspace(-8, 5, 100)
plt.plot(x, f(x));
# The [`minimize_scalar`](http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar) function will find the minimum, and can also be told to search within given bounds. By default, it uses the Brent algorithm, which combines a bracketing strategy with a parabolic approximation.
opt.minimize_scalar(f, method='Brent')
opt.minimize_scalar(f, method='bounded', bounds=[0, 6])
# ### Local and global minima
def f(x, offset):
return -np.sinc(x-offset)
x = np.linspace(-20, 20, 100)
plt.plot(x, f(x, 5));
# note how additional function arguments are passed in
sol = opt.minimize_scalar(f, args=(5,))
sol
plt.plot(x, f(x, 5))
plt.axvline(sol.x, c='red')
pass
# #### We can try multiple random starts to find the global minimum
lower = np.random.uniform(-20, 20, 100)
upper = lower + 1
sols = [opt.minimize_scalar(f, args=(5,), bracket=(l, u)) for (l, u) in zip(lower, upper)]
idx = np.argmin([sol.fun for sol in sols])
sol = sols[idx]
plt.plot(x, f(x, 5))
plt.axvline(sol.x, c='red');
# #### Using a stochastic algorithm
#
# See documentation for the [`basinhopping`](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.basinhopping.html) algorithm, which also works with multivariate scalar optimization. Note that this is heuristic and not guaranteed to find a global minimum.
# +
from scipy.optimize import basinhopping
x0 = 0
sol = basinhopping(f, x0, stepsize=1, minimizer_kwargs={'args': (5,)})
sol
# -
plt.plot(x, f(x, 5))
plt.axvline(sol.x, c='red');
# ### Constrained optimization with `scipy.optimize`
#
# Many real-world optimization problems have constraints - for example, a set of parameters may have to sum to 1.0 (equality constraint), or some parameters may have to be non-negative (inequality constraint). Sometimes, the constraints can be incorporated into the function to be minimized, for example, the non-negativity constraint $p \gt 0$ can be removed by substituting $p = e^q$ and optimizing for $q$. Using such workarounds, it may be possible to convert a constrained optimization problem into an unconstrained one, and use the methods discussed above to solve the problem.
#
# Alternatively, we can use optimization methods that allow the specification of constraints directly in the problem statement as shown in this section. Internally, constraint violation penalties, barriers and Lagrange multipliers are some of the methods used used to handle these constraints. We use the example provided in the Scipy [tutorial](http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html) to illustrate how to set constraints.
#
# We will optimize:
#
# $$
# f(x) = -(2xy + 2x - x^2 -2y^2)
# $$
# subject to the constraint
# $$
# x^3 - y = 0 \\
# y - (x-1)^4 - 2 \ge 0
# $$
# and the bounds
# $$
# 0.5 \le x \le 1.5 \\
# 1.5 \le y \le 2.5
# $$
def f(x):
return -(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
x = np.linspace(0, 3, 100)
y = np.linspace(0, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f(np.vstack([X.ravel(), Y.ravel()])).reshape((100,100))
plt.contour(X, Y, Z, np.arange(-1.99,10, 1), cmap='jet');
plt.plot(x, x**3, 'k:', linewidth=1)
plt.plot(x, (x-1)**4+2, 'k:', linewidth=1)
plt.fill([0.5,0.5,1.5,1.5], [2.5,1.5,1.5,2.5], alpha=0.3)
plt.axis([0,3,0,3])
# To set constraints, we pass in a dictionary with keys `type`, `fun` and `jac`. Note that the inequality constraint assumes a $C_j x \ge 0$ form. As usual, the `jac` is optional and will be numerically estimated if not provided.
# +
cons = ({'type': 'eq',
'fun' : lambda x: np.array([x[0]**3 - x[1]]),
'jac' : lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
{'type': 'ineq',
'fun' : lambda x: np.array([x[1] - (x[0]-1)**4 - 2])})
bnds = ((0.5, 1.5), (1.5, 2.5))
# -
x0 = [0, 2.5]
# Unconstrained optimization
ux = opt.minimize(f, x0, constraints=None)
ux
# Constrained optimization
cx = opt.minimize(f, x0, bounds=bnds, constraints=cons)
cx
x = np.linspace(0, 3, 100)
y = np.linspace(0, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f(np.vstack([X.ravel(), Y.ravel()])).reshape((100,100))
plt.contour(X, Y, Z, np.arange(-1.99,10, 1), cmap='jet');
plt.plot(x, x**3, 'k:', linewidth=1)
plt.plot(x, (x-1)**4+2, 'k:', linewidth=1)
plt.text(ux['x'][0], ux['x'][1], 'x', va='center', ha='center', size=20, color='blue')
plt.text(cx['x'][0], cx['x'][1], 'x', va='center', ha='center', size=20, color='red')
plt.fill([0.5,0.5,1.5,1.5], [2.5,1.5,1.5,2.5], alpha=0.3)
plt.axis([0,3,0,3]);
# ## Some applications of optimization
# ### Finding paraemeters for ODE models
#
# This is a specialized application of `curve_fit`, in which the curve to be fitted is defined implicitly by an ordinary differential equation
# $$
# \frac{dx}{dt} = -kx
# $$
# and we want to use observed data to estimate the parameters $k$ and the initial value $x_0$. Of course this can be explicitly solved but the same approach can be used to find multiple parameters for $n$-dimensional systems of ODEs.
#
# [A more elaborate example for fitting a system of ODEs to model the zombie apocalypse](http://adventuresinpython.blogspot.com/2012/08/fitting-differential-equation-system-to.html)
# +
from scipy.integrate import odeint
def f(x, t, k):
"""Simple exponential decay."""
return -k*x
def x(t, k, x0):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(k,))
return x.ravel()
# +
# True parameter values
x0_ = 10
k_ = 0.1*np.pi
# Some random data genererated from closed form solution plus Gaussian noise
ts = np.sort(np.random.uniform(0, 10, 200))
xs = x0_*np.exp(-k_*ts) + np.random.normal(0,0.1,200)
popt, cov = opt.curve_fit(x, ts, xs)
k_opt, x0_opt = popt
print("k = %g" % k_opt)
print("x0 = %g" % x0_opt)
# -
import matplotlib.pyplot as plt
t = np.linspace(0, 10, 100)
plt.plot(ts, xs, 'r.', t, x(t, k_opt, x0_opt), '-');
# ### Another example of fitting a system of ODEs using the `lmfit` package
#
# You may have to install the [`lmfit`](http://cars9.uchicago.edu/software/python/lmfit/index.html) package using `pip` and restart your kernel. The `lmfit` algorithm is another wrapper around `scipy.optimize.leastsq` but allows for richer model specification and more diagnostics.
# ! pip install lmfit
from lmfit import minimize, Parameters, Parameter, report_fit
import warnings
# +
def f(xs, t, ps):
"""Lotka-Volterra predator-prey model."""
try:
a = ps['a'].value
b = ps['b'].value
c = ps['c'].value
d = ps['d'].value
except:
a, b, c, d = ps
x, y = xs
return [a*x - b*x*y, c*x*y - d*y]
def g(t, x0, ps):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(ps,))
return x
def residual(ps, ts, data):
x0 = ps['x0'].value, ps['y0'].value
model = g(ts, x0, ps)
return (model - data).ravel()
t = np.linspace(0, 10, 100)
x0 = np.array([1,1])
a, b, c, d = 3,1,1,1
true_params = np.array((a, b, c, d))
np.random.seed(123)
data = g(t, x0, true_params)
data += np.random.normal(size=data.shape)
# set parameters incluing bounds
params = Parameters()
params.add('x0', value= float(data[0, 0]), min=0, max=10)
params.add('y0', value=float(data[0, 1]), min=0, max=10)
params.add('a', value=2.0, min=0, max=10)
params.add('b', value=2.0, min=0, max=10)
params.add('c', value=2.0, min=0, max=10)
params.add('d', value=2.0, min=0, max=10)
# fit model and find predicted values
result = minimize(residual, params, args=(t, data), method='leastsq')
final = data + result.residual.reshape(data.shape)
# plot data and fitted curves
plt.plot(t, data, 'o')
plt.plot(t, final, '-', linewidth=2);
# display fitted statistics
report_fit(result)
# -
# #### Optimization of graph node placement
#
# To show the many different applications of optimization, here is an example using optimization to change the layout of nodes of a graph. We use a physical analogy - nodes are connected by springs, and the springs resist deformation from their natural length $l_{ij}$. Some nodes are pinned to their initial locations while others are free to move. Because the initial configuration of nodes does not have springs at their natural length, there is tension resulting in a high potential energy $U$, given by the physics formula shown below. Optimization finds the configuration of lowest potential energy given that some nodes are fixed (set up as boundary constraints on the positions of the nodes).
#
# $$
# U = \frac{1}{2}\sum_{i,j=1}^n ka_{ij}\left(||p_i - p_j||-l_{ij}\right)^2
# $$
#
# Note that the ordination algorithm Multi-Dimensional Scaling (MDS) works on a very similar idea - take a high dimensional data set in $\mathbb{R}^n$, and project down to a lower dimension ($\mathbb{R}^k$) such that the sum of distances $d_n(x_i, x_j) - d_k(x_i, x_j)$, where $d_n$ and $d_k$ are some measure of distance between two points $x_i$ and $x_j$ in $n$ and $d$ dimension respectively, is minimized. MDS is often used in exploratory analysis of high-dimensional data to get some intuitive understanding of its "structure".
from scipy.spatial.distance import pdist, squareform
# - P0 is the initial location of nodes
# - P is the minimal energy location of nodes given constraints
# - A is a connectivity matrix - there is a spring between $i$ and $j$ if $A_{ij} = 1$
# - $L_{ij}$ is the resting length of the spring connecting $i$ and $j$
# - In addition, there are a number of `fixed` nodes whose positions are pinned.
n = 20
k = 1 # spring stiffness
P0 = np.random.uniform(0, 5, (n,2))
A = np.ones((n, n))
A[np.tril_indices_from(A)] = 0
L = A.copy()
L.astype('int')
def energy(P):
P = P.reshape((-1, 2))
D = squareform(pdist(P))
return 0.5*(k * A * (D - L)**2).sum()
D0 = squareform(pdist(P0))
E0 = 0.5* k * A * (D0 - L)**2
D0[:5, :5]
E0[:5, :5]
energy(P0.ravel())
# fix the position of the first few nodes just to show constraints
fixed = 4
bounds = (np.repeat(P0[:fixed,:].ravel(), 2).reshape((-1,2)).tolist() +
[[None, None]] * (2*(n-fixed)))
bounds[:fixed*2+4]
sol = opt.minimize(energy, P0.ravel(), bounds=bounds)
# #### Visualization
#
# Original placement is BLUE
# Optimized arrangement is RED.
plt.scatter(P0[:, 0], P0[:, 1], s=25)
P = sol.x.reshape((-1,2))
plt.scatter(P[:, 0], P[:, 1], edgecolors='red', facecolors='none', s=30, linewidth=2);
# Optimization of standard statistical models
# ---
#
# When we solve standard statistical problems, an optimization procedure similar to the ones discussed here is performed. For example, consider multivariate logistic regression - typically, a Newton-like algorithm known as iteratively reweighted least squares (IRLS) is used to find the maximum likelihood estimate for the generalized linear model family. However, using one of the multivariate scalar minimization methods shown above will also work, for example, the BFGS minimization algorithm.
#
# The take home message is that there is nothing magic going on when Python or R fits a statistical model using a formula - all that is happening is that the objective function is set to be the negative of the log likelihood, and the minimum found using some first or second order optimization algorithm.
import statsmodels.api as sm
# ### Logistic regression as optimization
#
# Suppose we have a binary outcome measure $Y \in {0,1}$ that is conditinal on some input variable (vector) $x \in (-\infty, +\infty)$. Let the conditioanl probability be $p(x) = P(Y=y | X=x)$. Given some data, one simple probability model is $p(x) = \beta_0 + x\cdot\beta$ - i.e. linear regression. This doesn't really work for the obvious reason that $p(x)$ must be between 0 and 1 as $x$ ranges across the real line. One simple way to fix this is to use the transformation $g(x) = \frac{p(x)}{1 - p(x)} = \beta_0 + x.\beta$. Solving for $p$, we get
# $$
# p(x) = \frac{1}{1 + e^{-(\beta_0 + x\cdot\beta)}}
# $$
# As you all know very well, this is logistic regression.
#
# Suppose we have $n$ data points $(x_i, y_i)$ where $x_i$ is a vector of features and $y_i$ is an observed class (0 or 1). For each event, we either have "success" ($y = 1$) or "failure" ($Y = 0$), so the likelihood looks like the product of Bernoulli random variables. According to the logistic model, the probability of success is $p(x_i)$ if $y_i = 1$ and $1-p(x_i)$ if $y_i = 0$. So the likelihood is
# $$
# L(\beta_0, \beta) = \prod_{i=1}^n p(x_i)^y(1-p(x_i))^{1-y}
# $$
# and the log-likelihood is
# \begin{align}
# l(\beta_0, \beta) &= \sum_{i=1}^{n} y_i \log{p(x_i)} + (1-y_i)\log{1-p(x_i)} \\
# &= \sum_{i=1}^{n} \log{1-p(x_i)} + \sum_{i=1}^{n} y_i \log{\frac{p(x_i)}{1-p(x_i)}} \\
# &= \sum_{i=1}^{n} -\log 1 + e^{\beta_0 + x_i\cdot\beta} + \sum_{i=1}^{n} y_i(\beta_0 + x_i\cdot\beta)
# \end{align}
#
# Using the standard 'trick', if we augment the matrix $X$ with a column of 1s, we can write $\beta_0 + x_i\cdot\beta$ as just $X\beta$.
df_ = pd.read_csv("binary.csv")
df_.columns = df_.columns.str.lower()
df_.head()
# +
# We will ignore the rank categorical value
cols_to_keep = ['admit', 'gre', 'gpa']
df = df_[cols_to_keep]
df.insert(1, 'dummy', 1)
df.head()
# -
# ### Solving as a GLM with IRLS
#
# This is very similar to what you would do in R, only using Python's `statsmodels` package. The GLM solver uses a special variant of Newton's method known as iteratively reweighted least squares (IRLS), which will be further desribed in the lecture on multivarite and constrained optimizaiton.
model = sm.GLM.from_formula('admit ~ gre + gpa',
data=df, family=sm.families.Binomial())
fit = model.fit()
fit.summary()
# ### Or use R
# %load_ext rpy2.ipython
# + magic_args="-i df" language="R"
# m <- glm(admit ~ gre + gpa, data=df, family="binomial")
# summary(m)
# -
# ### Home-brew logistic regression using a generic minimization function
#
# This is to show that there is no magic going on - you can write the function to minimize directly from the log-likelihood equation and run a minimizer. It will be more accurate if you also provide the derivative (+/- the Hessian for second order methods), but using just the function and numerical approximations to the derivative will also work. As usual, this is for illustration so you understand what is going on - when there is a library function available, you should probably use that instead.
def f(beta, y, x):
"""Minus log likelihood function for logistic regression."""
return -((-np.log(1 + np.exp(np.dot(x, beta)))).sum() + (y*(np.dot(x, beta))).sum())
beta0 = np.zeros(3)
opt.minimize(f, beta0, args=(df['admit'], df.loc[:, 'dummy':]), method='BFGS', options={'gtol':1e-2})
# ### Optimization with `sklearn`
#
# There are also many optimization routines in the `scikit-learn` package, as you already know from the previous lectures. Many machine learning problems essentially boil down to the minimization of some appropriate loss function.
# ### Resources
#
# - [Scipy Optimize reference](http://docs.scipy.org/doc/scipy/reference/optimize.html)
# - [Scipy Optimize tutorial](http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html)
# - [LMFit - a modeling interface for nonlinear least squares problems](http://cars9.uchicago.edu/software/python/lmfit/index.html)
# - [CVXpy- a modeling interface for convex optimization problems](https://github.com/cvxgrp/cvxpy)
# - [Quasi-Newton methods](http://en.wikipedia.org/wiki/Quasi-Newton_method)
# - [Convex optimization book by Boyd & Vandenberghe](http://stanford.edu/~boyd/cvxbook/)
# - [Nocedal and Wright textbook](http://www.springer.com/us/book/9780387303031)
| notebooks/T07D_Optimization_Examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Modeling timeseries data in MXNet: using LSTM to predict AWS spot instance price
#
# @sunilmallya
#
# @jrhunt
#
# MXNet cheat sheet: https://s3.amazonaws.com/aws-bigdata-blog/artifacts/apache_mxnet/apache-mxnet-cheat.pdf
# Dataset: https://raw.githubusercontent.com/sunilmallya/mxnet-notebooks/master/python/tutorials/data/p2-east-1b.csv
# ### Recurrent Neural Networks (RNN)
#
# <img src="https://camo.githubusercontent.com/89a1cc7342d324ca30e45025bb278572f3f114d2/687474703a2f2f6b617270617468792e6769746875622e696f2f6173736574732f726e6e2f64696167732e6a706567"/>
# <p style="text-align: center; font-size: 22px;"><em>h<sub>t</sub> = activation(X<sub>t</sub>W<sub>x</sub> + h<sub>t-1</sub>W<sub>h</sub> )</em></p>
# LSTMs -- Long Short Term Memory
# <img src="https://camo.githubusercontent.com/5a4faf272952795d0011b147dc8bb0a3a7095cb3/687474703a2f2f646565706c6561726e696e672e6e65742f7475746f7269616c2f5f696d616765732f6c73746d5f6d656d6f727963656c6c2e706e67">
#
# UnRolling LSTMs
#
# <img src="https://camo.githubusercontent.com/a90144b7e3d10d3b5a267cb617bb90b938cb048a/687474703a2f2f636f6c61682e6769746875622e696f2f706f7374732f323031352d30382d556e6465727374616e64696e672d4c53544d732f696d672f524e4e2d756e726f6c6c65642e706e67"?>
#
pip install mxnet
pip install numpy
pip install matplotlib
pip install pandas
# +
# load the
# !wget https://raw.githubusercontent.com/sunilmallya/mxnet-notebooks/master/python/tutorials/data/p2-east-1b.csv
# +
import pandas
import numpy as np
import mxnet as mx
import matplotlib.pyplot as plt
# %matplotlib inline
df = pandas.read_csv('p2-east-1b.csv', usecols=[0,4], names=['date', 'cost'])
df.head()
# +
import dateutil.parser
values = df.values[::-1]
ticks = map(dateutil.parser.parse, values[:,0])
dataset = values[:,1]
dataset.shape
# -
plt.plot(ticks, dataset)
# +
# Tx = Tx-1, Tx-2 ; Window size = 2
from sklearn.preprocessing import MinMaxScaler
dataset = np.reshape(dataset, (len(dataset), 1))
scaler = MinMaxScaler(feature_range=(0,1))
scaled_dataset = scaler.fit_transform(dataset)
dataset[:5], scaled_dataset[:5]
# +
# align the data
seq_len = 2
x = scaled_dataset
y = scaled_dataset[: ,[-1]]
x[:5], y[:5]
dataX = []; dataY = []
for i in range(0, len(y)-seq_len):
_x = x[i: i+seq_len]
_y = y[i+seq_len]
dataX.append(_x)
dataY.append(_y)
dataX[0], dataY[0]
# Tx0, Tx1 => Tx2
# Tx1, Tx2 => Tx3
# -
dataX[1], dataY[2]
# +
# Define Itertors
#split the data
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
batch_size = 32
trainX, testX = np.array(dataX[:train_size]), np.array(dataX[train_size:])
trainY, testY = np.array(dataY[:train_size]), np.array(dataY[train_size:])
train_iter = mx.io.NDArrayIter(data=trainX, label=trainY,
batch_size=batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(data=testX, label=testY,
batch_size=batch_size, shuffle=False)
trainX.shape
# -
# +
# Lets build the network
data = mx.sym.var("data")
data = mx.sym.transpose(data, axes=(1,0,2))
# T N C -- Time Steps/ Seq len; N - Batch Size, C - dimensions in the hidden state
'''
Long-Short Term Memory (LSTM) network cell.
Parameters:
num_hidden (int) – Number of units in output symbol.
prefix (str, default ‘lstm_‘) – Prefix for name of layers (and name of weight if params is None).
params (RNNParams, default None) – Container for weight sharing between cells. Created if None.
forget_bias (bias added to forget gate, default 1.0.) – Jozefowicz et al. 2015 recommends setting this to 1.0
'''
lstm1 = mx.rnn.LSTMCell(num_hidden=5, prefix='lstm1')
lstm2 = mx.rnn.LSTMCell(num_hidden=10, prefix='lstm2')
L1, states = lstm1.unroll(length=seq_len, inputs=data, merge_outputs=True, layout="TNC")
L2, L2_states = lstm2.unroll(length=seq_len, inputs=L1, merge_outputs=True, layout="TNC")
L2_reshape = mx.sym.reshape(L2_states[0], shape=(-1, 0), reverse=True) # (T*N, 10 -- num_hidden lstm2)
fc = mx.sym.FullyConnected(L2_reshape, num_hidden=1, name='fc')
net = mx.sym.LinearRegressionOutput(data=fc, name="softmax")
#mx.viz.plot_network(net) #, shape=(1,2,2))
# +
# Training
import logging
logging.getLogger().setLevel(logging.DEBUG)
num_epochs = 2
model = mx.mod.Module(symbol=net, context=mx.cpu(0))
model.fit(train_data=train_iter, eval_data=val_iter,
optimizer="adam",
optimizer_params={'learning_rate': 1E-3},
eval_metric="mse",
num_epoch=num_epochs
)
# +
test_pred = model.predict(val_iter).asnumpy()
#type(test_pred)
print np.mean((test_pred - testY)**2)
test_plot = scaler.inverse_transform(test_pred)
test_plot[:5], testY[:5]
# -
plt.plot(ticks[train_size+seq_len:], test_plot)
# +
t_plot = np.empty_like(dataset)
t_plot[:] = np.nan
t_plot[len(trainY): -seq_len] = test_plot
plt.plot(ticks, dataset, label="real data")
plt.plot(ticks, t_plot, label= "pred")
plt.legend()
# -
| E5_timeseries-modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import json
# ## Configuration
auth = ('your username','your password')
headers = {'Content-type': 'application/json', 'accept': 'application/json'}
# ## collections - Collections are groupings of datasets
# ### create collection
def create_collection(name, description='', space_id=''):
'''
param: name, description, spaces id
note: it's different from dataset, dataset can be added to multiple collections and spaces
here collection can only choose ONE space
'''
payload = json.dumps({'name': name,
'description': description,
'space': space_id
})
r = requests.post('https://clowderframework.org/clowder/api/collections',
data=payload,
headers=headers,
auth=auth)
print(r.status_code)
print(r.text)
create_collection('new_collection', '...', '5a8f375d4f0cfe889c135091')
# ### list collections
def list_collections():
r = requests.get('https://clowderframework.org/clowder/api/collections/allCollections',
auth=auth)
print(r.status_code)
print(r.text)
list_collections()
# ### add dataset to collection
def add_dataset_to_collection(collection_id, dataset_id):
''' parameters: collection id; dataset id
'''
empty_payload = json.dumps({})
r = requests.post('https://clowderframework.org/clowder/api/collections/'
+ collection_id +'/datasets/' + dataset_id,
data=empty_payload,
headers=headers,
auth=auth)
print(r.status_code)
print(r.text)
add_dataset_to_collection('5aac33b74f0cc56d89e1418f', '5aabd0354f0cc56d89e13d39')
| scripts/jupyter-notebook/collections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <ol>
# <li>Experiment with multiplying strings or lists with integers and see what happens. Having seen that ;create a variable which holds value ‘xyzxyzxyzxyzxyzxyzxyzxyzxyzxyz’</li>
#
# <br></br>
# <li>Using list comprehension ,list related functions or otherwise, create following list:
# <br></br>
# [[1, 2, 3, 4, 5, 6, 7], [1, 4, 9, 16, 25, 36, 49], [1, 8, 27, 64, 125, 216,343]]</li>
#
#
# <br></br>
# <li>Write a function which takes input as list and returns a list which contains a sequence of 10 equally spaced numbers between min and max of the string if minimum and maximum value of lists is same then it returns a list with 10 zeroes.
# <br></br>
# example:
# <br></br>
# input list : [20, 2, 6, 7, 10]
# <br></br>
# output list : [2,4,6,8,10,12,14,16,18,20]
# </li>
# <br></br>
# <li>Show by example how to sort a dictionary by its values</li>
# <br></br>
# <li>Write a function which takes input 3 dictionaries and returns a new dictionary which has all key value pairs from those three dictionaries
# </li>
# <br></br>
# <li>Write a function which takes input a string and returns reversed string example
# <br></br>
# input : “string”
# <br></br>
# output : “gnirts”
#
# </li>
# <br></br>
# <li>Consider this dictionary of addresses :
# <br></br>
# {“home” : [“Hyderabad”,“Lingampally”,“Ph:1234567890”],
# <br></br>
# “office”:[“Maharashtra”,“Mumbai”,“Ghatkopar”,“Ph : 5432167809”,“Pin :400043”],
# <br></br>
# “OOI” : [“Singapore”,“Ph : 09876345”]}
# <br></br>
# Write a program to extract phone numbers for each location
# </li>
# <br></br>
# <li>Write a Python function to find the greatest common divisor (gcd) of two integers.</li>
# <br></br>
# <li>Write a function which takes input n and q and returns sum of qth powers of first n natural numbers</li>
# <br></br>
# <li>Find out all numbers between 1 to 100 which are divisible by either 3 or 5 but not by both.[Hint : use sets]</li>
# <br></br>
# <li>Write a function which takes input n and produces first n elements of a fibonacci series</li>
#
# </ol>
#
# Prepared by: <NAME> and <NAME>
| intro-to-python-ml/week1/excercise/python_intro_Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/claytonchagas/intpy_prod/blob/main/1_3_automatic_evaluation_fibonacci_recursive_ast_only_DB.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="6-qqZu4hiiIV" outputId="1df6f3c4-6c0f-455d-8dc0-a77febc57420"
# !sudo apt-get update
# + colab={"base_uri": "https://localhost:8080/"} id="hDiUpQCOjv6C" outputId="f90a5f72-75ca-4786-bd4b-7a0511feaa50"
# !sudo apt-get install python3.9
# + colab={"base_uri": "https://localhost:8080/"} id="R5t3mOroj17M" outputId="bbe99081-ec9b-4fbe-cff9-461a0fc55328"
# !python3.9 -V
# + colab={"base_uri": "https://localhost:8080/"} id="JtqzTIk0mm4e" outputId="d1645a13-5277-44af-968a-1ef48fa286c2"
# !which python3.9
# + [markdown] id="ihQdCPFZubcr"
# #**i. Colab hardware and software specs:**
# - n1-highmem-2 instance
#
# - 2vCPU @ 2.3GHz
#
# - 13GB RAM
#
# - 100GB Free Space
#
# - idle cut-off 90 minutes
#
# - maximum lifetime 12 hours
# + colab={"base_uri": "https://localhost:8080/"} id="CWA0_ajEup39" outputId="11a46753-7120-4586-9745-aea5bcd00f35"
# Colab hardware info (processor and memory):
# # !cat /proc/cpuinfo
# # !cat /proc/memoinfo
# # !lscpu
# !lscpu | egrep 'Model name|Socket|Thread|NUMA|CPU\(s\)'
print("---------------------------------")
# !free -m
# + colab={"base_uri": "https://localhost:8080/"} id="HHAOaF_Ousot" outputId="bf6582eb-952e-4710-9319-b0f2772f40bf"
# Colab SO structure and version
# !ls -a
print("---------------------------------")
# !ls -l /
print("---------------------------------")
# !lsb_release -a
# + [markdown] id="cx7938P5ayDl"
# #**ii. Cloning IntPy repository:**
# - https://github.com/claytonchagas/intpy_dev.git
# + colab={"base_uri": "https://localhost:8080/"} id="7UC9Br0IbcAl" outputId="f818408a-78b0-4f91-bf13-8420fb7fb178"
# !git clone https://github.com/claytonchagas/intpy_dev.git
# + id="EPv3-WsR7FOh"
# + colab={"base_uri": "https://localhost:8080/"} id="DuD5xDCpewJ8" outputId="8ce53066-c107-4fd8-edca-f6f1dbfabdb3"
# !ls -a
print("---------------------------------")
# %cd intpy_dev/
# !git checkout c27b261
# !ls -a
print("---------------------------------")
# !git branch
print("---------------------------------")
# #!git log --pretty=oneline --abbrev-commit
# #!git log --all --decorate --oneline --graph
# + [markdown] id="bn8snbMWLJGt"
# #**iii. Fibonacci's evolutions and cutoff by approach**
#
# - Evaluating recursive fibonacci code and its cutoff by approach
# + colab={"base_uri": "https://localhost:8080/"} id="Xylnb3sSMN8i" outputId="bbbc7e4c-04ce-4226-ee9a-c3308a9dd049"
# !ls -a
print("---------------------------------")
print("Cleaning up cache")
# !rm -rf .intpy
# !rm -rf output_iii.dat
print("--no-cache execution")
# !for i in {1..37}; do python3.9 fibonacci_recursive.py $i --no-cache >> output_iii.dat; rm -rf .intpy; done
print("done!")
print("only intra cache")
# !for i in {1..37}; do python3.9 fibonacci_recursive.py $i -v v01x >> output_iii.dat; rm -rf .intpy; done
print("done!")
print("full cache")
# !for i in {1..37}; do python3.9 fibonacci_recursive.py $i -v v01x >> output_iii.dat; done
print("done!")
# + colab={"base_uri": "https://localhost:8080/", "height": 445} id="xXAckgUsPqiW" outputId="c0ad9385-8c76-4dcd-9939-2c86ba76bce5"
import matplotlib.pyplot as plt
import numpy as np
f1 = open("output_iii.dat", "r")
data1 = []
dataf1 = []
for x in f1.readlines()[3:148:4]:
data1.append(float(x))
f1.close()
for datas1 in data1:
dataf1.append(round(datas1, 3))
print(dataf1)
f2 = open("output_iii.dat", "r")
data2 = []
dataf2 = []
for x in f2.readlines()[151:296:4]:
data2.append(float(x))
f2.close()
for datas2 in data2:
dataf2.append(round(datas2, 3))
print(dataf2)
f3 = open("output_iii.dat", "r")
data3 = []
dataf3 = []
for x in f3.readlines()[299:444:4]:
data3.append(float(x))
f3.close()
for datas3 in data3:
dataf3.append(round(datas3, 3))
print(dataf3)
x = np.arange(1,38)
#plt.style.use('classic')
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_figheight(5)
fig.set_figwidth(14)
fig.suptitle("Fibonacci's evolutions and cutoff by approach", fontweight='bold')
ax1.plot(x, dataf1, "tab:blue", label="no-cache")
ax1.plot(x, dataf2, "tab:orange", label="intra cache")
ax1.plot(x, dataf3, "tab:green", label="full cache")
#ax1.set_title("Fibonacci's evolutions and cutoff by approach")
ax1.set_xlabel("Fibonacci's Series Value")
ax1.set_ylabel("Time in seconds")
ax1.grid()
lex = ax1.legend()
ax2.plot(x, dataf2, "tab:orange", label="intra cache")
ax2.plot(x, dataf3, "tab:green", label="full cache")
#ax2.set_title("Quicksort's random evolutions and cutoff by approach")
ax2.set_xlabel("Fibonacci's Series Value")
ax2.set_ylabel("Time in seconds")
ax2.grid()
lex = ax2.legend()
plt.show()
# + [markdown] id="ZqkmGmC5M7tF"
# #**iv. Fibonacci 200, 100 and 50 recursive, three mixed trials**
# - Evaluating recursive fibonacci code, input 200, 100, and 50, three trials and plot.
# - First trial: input 200, 100, and 50, no inter-cache (baseline).
# - Second trial: input 200, 100, and 50, with intra and inter-cache, analyzing the cache's behavior with different inputs.
# - Third trial: input 50, 100, and 200, with intra and inter-cache, analyzing the cache's behavior with different inputs, in a different order of the previous running.
# + id="vKTYph90Nd_x" colab={"base_uri": "https://localhost:8080/"} outputId="d1aa0db1-b21a-46cf-f1b4-4fc7b0df6eef"
print("---------------------------------")
print("Cleaning up cache")
# !rm -rf .intpy
# !rm -rf output_iv.dat
print("First running, Fibonacci 200: value and time in sec")
# !python3.9 fibonacci_recursive.py 200 -v v01x | tee -a output_iv.dat
print("---------------------------------")
print("Cleaning up cache")
# !rm -rf .intpy
print("Second running, Fibonacci 100: value and time in sec")
# !python3.9 fibonacci_recursive.py 100 -v v01x | tee -a output_iv.dat
print("---------------------------------")
print("Cleaning up cache")
# !rm -rf .intpy
print("Third running, Fibonacci 50: value and time in sec")
# !python3.9 fibonacci_recursive.py 50 -v v01x | tee -a output_iv.dat
print("---------------------------------")
# + [markdown] id="AExxxmgrNxAR"
# - Second trial: with inter and intra-cache, inputs: 200, 100 and 50.
# + id="IrNtsoJ3Nzlk" colab={"base_uri": "https://localhost:8080/"} outputId="4e273328-5872-4b05-81e4-4da345b5e2a9"
print("---------------------------------")
print("Cleaning up cache")
# !rm -rf .intpy
print("First running, Fibonacci 200: value and time in sec")
# !python3.9 fibonacci_recursive.py 200 -v v01x | tee -a output_iv.dat
print("---------------------------------")
print("Second running, Fibonacci 100: value and time in sec")
# !python3.9 fibonacci_recursive.py 100 -v v01x | tee -a output_iv.dat
print("---------------------------------")
print("Third running, Fibonacci 50: value and time in sec")
# !python3.9 fibonacci_recursive.py 50 -v v01x | tee -a output_iv.dat
print("---------------------------------")
# + [markdown] id="tMAaUbQCN8RT"
# - Third trial: with inter and intra-cache, inputs: 50, 100 and 200.
# + id="IEpHZCb-N-tT" colab={"base_uri": "https://localhost:8080/"} outputId="95dd5cbe-d07e-4658-d9c0-60edd8a332cc"
print("---------------------------------")
print("Cleaning up cache")
# !rm -rf .intpy
print("First running, Fibonacci 50: value and time in sec")
# !python3.9 fibonacci_recursive.py 50 -v v01x | tee -a output_iv.dat
print("---------------------------------")
print("Second running, Fibonacci 100: value and time in sec")
# !python3.9 fibonacci_recursive.py 100 -v v01x | tee -a output_iv.dat
print("---------------------------------")
print("Third running, Fibonacci 200: value and time in sec")
# !python3.9 fibonacci_recursive.py 200 -v v01x | tee -a output_iv.dat
print("---------------------------------")
# + [markdown] id="pmHxle56OQFE"
# - Plotting the comparison: first graph.
# + id="oLsIl-B2OR_V" colab={"base_uri": "https://localhost:8080/", "height": 442} outputId="2ce3b6af-f249-4a62-fd92-1885bbdb6e87"
import numpy as np
f4 = open("output_iv.dat", "r")
fib200 = []
fib100 = []
fib50 = []
data4 = []
dataf4 = []
for x in f4.readlines()[3::4]:
data4.append(float(x))
f4.close()
for datas4 in data4:
dataf4.append(round(datas4, 6))
print(dataf4)
fib200 = [dataf4[0], dataf4[3], dataf4[8]]
print(fib200)
fib100 = [dataf4[1], dataf4[4], dataf4[7]]
print(fib100)
fib50 = [dataf4[2], dataf4[5], dataf4[6]]
print(fib50)
running3to5 = ['1st trial: cache intra', '2nd trial: cache inter-intra/desc', '3rd trial: cache inter-intra/asc']
y = np.arange(len(running3to5))
width = 0.40
z = ['Fib 200', 'Fib 100', 'Fib 50']
list_color_z = ['blue', 'orange', 'green']
zr = ['Fib 50', 'Fib 100', 'Fib 200']
list_color_zr = ['green', 'orange', 'blue']
t1=[dataf4[0], dataf4[1], dataf4[2]]
t2=[dataf4[3], dataf4[4], dataf4[5]]
t3=[dataf4[6], dataf4[7], dataf4[8]]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(11,5))
rects1 = ax1.bar(z, t1,width, label='1st trial', color=list_color_z)
rects2 = ax2.bar(z, t2, width, label='2nd trial', color=list_color_z)
rects3 = ax3.bar(zr, t3, width, label='3rd trial', color=list_color_zr)
ax1.set_ylabel('Time in seconds', fontweight='bold')
ax1.set_xlabel('1st trial: cache intra', fontweight='bold')
ax2.set_xlabel('2nd trial: cache inter-intra/desc', fontweight='bold')
ax3.set_xlabel('3rd trial: cache inter-intra/asc', fontweight='bold')
ax2.set_title('Fibonacci recursive 200, 100 and 50 v0.1.x', fontweight='bold')
for index, datas in enumerate(t1):
ax1.text(x=index, y=datas, s=t1[index], ha = 'center', va = 'bottom', fontweight='bold')
for index, datas in enumerate(t2):
ax2.text(x=index, y=datas, s=t2[index], ha = 'center', va = 'bottom', fontweight='bold')
for index, datas in enumerate(t3):
ax3.text(x=index, y=datas, s=t3[index], ha = 'center', va = 'bottom', fontweight='bold')
ax1.grid(axis='y')
ax2.grid(axis='y')
ax3.grid(axis='y')
fig.tight_layout()
plt.savefig('chart_iv_fib_50_100_200_v01x.png')
plt.show()
# + [markdown] id="bSpHczl1YQrQ"
# #**1. Fast execution, all versions (v0.1.x and from v0.2.1.x to v0.2.7.x)**
# + [markdown] id="yraAw5HRtVlZ"
# ##**1.1 Fast execution: only intra-cache**
# + [markdown] id="5Mb3bnrnWSnm"
# ###**1.1.1 Fast execution: only intra-cache => experiment's executions**
# + id="Fapp-EvmcyXZ" colab={"base_uri": "https://localhost:8080/"} outputId="d6c153bf-1221-45be-c604-45c2eb1d2404"
# !rm -rf .intpy;\
# rm -rf stats_intra.dat;\
# echo "IntPy only intra-cache";\
# experimento=fibonacci_recursive.py;\
# param=200;\
# echo "Experiment: $experimento";\
# echo "Params: $param";\
# for i in v01x v021x v022x v023x v024x v025x v027x;\
# do rm -rf output_intra_$i.dat;\
# rm -rf .intpy;\
# echo "---------------------------------";\
# echo "IntPy version $i";\
# for j in {1..5};\
# do echo "Execution $j";\
# rm -rf .intpy;\
# python3.9 $experimento $param -v $i >> output_intra_$i.dat;\
# echo "Done execution $j";\
# done;\
# echo "Done IntPy version $i";\
# done;\
# echo "---------------------------------";\
# echo "---------------------------------";\
# echo "Statistics evaluation:";\
# for k in v01x v021x v022x v023x v024x v025x v027x;\
# do echo "Statistics version $k" >> stats_intra.dat;\
# echo "Statistics version $k";\
# python3.9 stats_colab.py output_intra_$k.dat;\
# python3.9 stats_colab.py output_intra_$k.dat >> stats_intra.dat;\
# echo "---------------------------------";\
# done;\
# + [markdown] id="BT1wVMNdXtP5"
# ###**1.1.2 Fast execution: only intra-cache => charts generation**
#
# + id="wwbrnOubY4pe" colab={"base_uri": "https://localhost:8080/"} outputId="147983bd-de4e-49ac-917f-cccce69369af"
# %matplotlib inline
import matplotlib.pyplot as plt
#versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x']
versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:brown']
filev = "f_intra_"
data = "data_intra_"
dataf = "dataf_intra_"
for i, j in zip(versions, colors):
filev_version = filev+i
data_version = data+i
dataf_version = dataf+i
file_intra = open("output_intra_"+i+".dat", "r")
data_intra = []
dataf_intra = []
for x in file_intra.readlines()[3::4]:
data_intra.append(float(x))
file_intra.close()
#print(data_intra)
for y in data_intra:
dataf_intra.append(round(y, 5))
print(i+": ",dataf_intra)
running1_1 = ['1st', '2nd', '3rd', '4th', '5th']
plt.figure(figsize = (10, 5))
plt.bar(running1_1, dataf_intra, color =j, width = 0.4)
plt.grid(axis='y')
for index, datas in enumerate(dataf_intra):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Running only with intra cache "+i, fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Chart "+i+" intra - Fibonacci 200 recursive - with intra cache, no inter cache - IntPy "+i+" version", fontweight='bold')
plt.savefig("chart_intra_"+i+".png")
plt.close()
#plt.show()
# + id="yRtSKKEqBwp7" colab={"base_uri": "https://localhost:8080/"} outputId="bf3c14a9-c25c-4346-b48a-ff991b127bad"
import matplotlib.pyplot as plt
file_intra = open("stats_intra.dat", "r")
data_intra = []
for x in file_intra.readlines()[5::8]:
data_intra.append(round(float(x[8::]), 5))
file_intra.close()
print(data_intra)
#versions = ["0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.6.x", "0.2.7.x"]
versions = ["0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.7.x"]
#colors =['royalblue', 'forestgreen', 'orangered', 'purple', 'skyblue', 'lime', 'lightgrey', 'tan']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:brown']
plt.figure(figsize = (10, 5))
plt.bar(versions, data_intra, color = colors, width = 0.7)
plt.grid(axis='y')
for index, datas in enumerate(data_intra):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Median for 5 executions in each version, intra cache", fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Fibonacci 200 recursive, cache intra-running, comparison of all versions", fontweight='bold')
plt.savefig('compare_median_intra.png')
plt.close()
#plt.show()
# + [markdown] id="zbpUjTlnWZay"
# ##**1.2 Fast execution: full cache -> intra and inter-cache**
# + [markdown] id="pJvM7s-ct1E_"
# ###**1.2.1 Fast execution: full cache -> intra and inter-cache => experiment's executions**
# + id="WUQiTFqnWqBD" colab={"base_uri": "https://localhost:8080/"} outputId="7a24675e-022a-4897-e9fc-cd92466ce30f"
# !rm -rf .intpy;\
# rm -rf stats_full.dat;\
# echo "IntPy full cache -> intra and inter-cache";\
# experimento=fibonacci_recursive.py;\
# param=200;\
# echo "Experiment: $experimento";\
# echo "Params: $param";\
# for i in v01x v021x v022x v023x v024x v025x v027x;\
# do rm -rf output_full_$i.dat;\
# rm -rf .intpy;\
# echo "---------------------------------";\
# echo "IntPy version $i";\
# for j in {1..5};\
# do echo "Execution $j";\
# python3.9 $experimento $param -v $i >> output_full_$i.dat;\
# echo "Done execution $j";\
# done;\
# echo "Done IntPy version $i";\
# done;\
# echo "---------------------------------";\
# echo "---------------------------------";\
# echo "Statistics evaluation:";\
# for k in v01x v021x v022x v023x v024x v025x v027x;\
# do echo "Statistics version $k" >> stats_full.dat;\
# echo "Statistics version $k";\
# python3.9 stats_colab.py output_full_$k.dat;\
# python3.9 stats_colab.py output_full_$k.dat >> stats_full.dat;\
# echo "---------------------------------";\
# done;\
# + [markdown] id="8BMFq3oZqqZh"
# ###**1.2.2 Fast execution: full cache -> intra and inter-cache => charts generation**
# + id="F736YE-3qxiy" colab={"base_uri": "https://localhost:8080/"} outputId="82ebfe16-3c22-45e0-f79d-6f33af835299"
# %matplotlib inline
import matplotlib.pyplot as plt
#versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x']
versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:brown']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:brown']
filev = "f_full_"
data = "data_full_"
dataf = "dataf_full_"
for i, j in zip(versions, colors):
filev_version = filev+i
data_version = data+i
dataf_version = dataf+i
file_full = open("output_full_"+i+".dat", "r")
data_full = []
dataf_full = []
for x in file_full.readlines()[3::4]:
data_full.append(float(x))
file_full.close()
for y in data_full:
dataf_full.append(round(y, 5))
print(i+": ",dataf_full)
running1_1 = ['1st', '2nd', '3rd', '4th', '5th']
plt.figure(figsize = (10, 5))
plt.bar(running1_1, dataf_full, color =j, width = 0.4)
plt.grid(axis='y')
for index, datas in enumerate(dataf_full):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Running full cache "+i, fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Chart "+i+" full - Fibonacci 200 recursive - with intra and inter cache - IntPy "+i+" version", fontweight='bold')
plt.savefig("chart_full_"+i+".png")
plt.close()
#plt.show()
# + id="R9SBjV5kB_iF" colab={"base_uri": "https://localhost:8080/"} outputId="c99165f5-d15d-4297-96bb-7e0ad174cde3"
import matplotlib.pyplot as plt
file_full = open("stats_full.dat", "r")
data_full = []
for x in file_full.readlines()[5::8]:
data_full.append(round(float(x[8::]), 5))
file_full.close()
print(data_full)
#versions = ["0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.6.x", "0.2.7.x"]
versions = ["0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.7.x"]
#colors =['royalblue', 'forestgreen', 'orangered', 'purple', 'skyblue', 'lime', 'lightgrey', 'tan']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:brown']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:brown']
plt.figure(figsize = (10, 5))
plt.bar(versions, data_full, color = colors, width = 0.7)
plt.grid(axis='y')
for index, datas in enumerate(data_full):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Median for 5 executions in each version, full cache", fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Fibonacci 200 recursive, cache intra and inter-running, comparison of all versions", fontweight='bold')
plt.savefig('compare_median_full.png')
plt.close()
#plt.show()
# + [markdown] id="5fR3sOQtv4KI"
# ##**1.3 Displaying charts to all versions**
# + [markdown] id="7XlKxf6cwYHD"
# ###**1.3.1 Only intra-cache charts**
# + id="UMHhuKAkwBM8" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2cae0d5d-d1bc-47f7-ec96-469c2009e211"
#versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x']
versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
from IPython.display import Image, display
for i in versions:
display(Image("chart_intra_"+i+".png"))
print("=====================================================================================")
# + [markdown] id="H-t1YcyPyyQY"
# ###**1.3.2 Full cache charts -> intra and inter-cache**
# + id="rECLruWzy-P0" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ce82d4fa-cb62-40c4-954b-76829e02ecb3"
#versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v026x', 'v027x']
versions = ['v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
from IPython.display import Image, display
for i in versions:
display(Image("chart_full_"+i+".png"))
print("=====================================================================================")
# + [markdown] id="MLufhYPK6wMf"
# ###**1.3.3 Only intra-cache: median comparison chart of all versions**
# + id="SS3WMo3Q7GgQ" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="e3900c6f-9194-4077-de6f-da2c6a71953d"
from IPython.display import Image, display
display(Image("compare_median_intra.png"))
# + [markdown] id="G-voI26Q7H7a"
# ###**1.3.4 Full cache -> intra and inter-cache: median comparison chart of all versions**
# + id="dhBn4j7e7N61" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="a97fc0c6-8832-4c8d-c756-5e4e7026cb54"
from IPython.display import Image, display
display(Image("compare_median_full.png"))
# + [markdown] id="F0zA_HtPGlqe"
# ###**1.3.5 IntPy Fibonacci 50 - raw execution OK (no cache): 1h31min15sec**
# + id="-EIkSBpiG0XA" colab={"base_uri": "https://localhost:8080/", "height": 692} outputId="be4ff106-6413-42bb-8567-51e7b8abab95"
# !wget -nv https://github.com/claytonchagas/intpy_prod/raw/main/intpy_raw_50_1h31m15s_ok.jpg
from IPython.display import Image, display
display(Image("intpy_raw_50_1h31m15s_ok.jpg", width=720))
# + [markdown] id="PI7S4hjsG2f_"
# ###**1.3.6 IntPy Fibonacci 100 - raw execution NO OK (no cache): 14h43min30sec**
# + id="r69K6OA3JDg0" colab={"base_uri": "https://localhost:8080/", "height": 693} outputId="0e6fcd25-008e-4d82-ef02-6a9ecb9f5fbe"
# !wget -nv https://github.com/claytonchagas/intpy_prod/raw/main/intpy_raw_100_14h43m30s_NO_ok.jpg
from IPython.display import Image, display
display(Image("intpy_raw_100_14h43m30s_NO_ok.jpg", width=720))
# + [markdown] id="O-fbYfJ_HAgJ"
# ###**1.3.6 IntPy Fibonacci 200 - no execution (no cache): inf**
# + id="DX9qdDwiJEIu" colab={"base_uri": "https://localhost:8080/", "height": 720} outputId="94db4422-147e-44a8-c640-33881a246e36"
# !wget -nv https://github.com/claytonchagas/intpy_prod/raw/main/intpy_raw_200_NO_exec_inf.jpg
from IPython.display import Image, display
display(Image("intpy_raw_200_NO_exec_inf.jpg", width=720))
| 1_3_automatic_evaluation_fibonacci_recursive_ast_only_DB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pdb
import csv
import time
# +
def getData(collection,key,default):
if key in collection:
value = collection[key]
else:
value = default
return value
start_time = time.time()
rating_file = "user_ratings.csv"
result = {}
ids = []
headers = []
#read the source file, then store it in mem
with open(rating_file) as csvfile:
csv_reader = csv.reader(csvfile)
rating_header = next(csv_reader)
i=0
for row in csv_reader:
i=i+1
id,name,value = row
if id in result:
user_data=result[id]
else:
user_data={}
result[id]=user_data
ids.append(id)
if name in user_data:
#in case some error data, warn and debug
pdb.set_trace()
else:
user_data[name]=value
if not name in headers:
headers.append(name)
#sort the rows and columns
ids.sort()
headers.sort()
# -
#print result to csv
with open('user_data.csv', "w", newline='') as f:
writer = csv.writer(f)
# write headers
csv_headers =['USER_ID']
csv_headers.extend(headers)
writer.writerows([csv_headers])
i=0
for id in ids:
i=i+1
csv_data = [id]
user_data = result[id]
for key in headers:
csv_data.append(getData(user_data,key,''))
writer.writerows([csv_data])
f.close()
| reformatting rating.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Lesson 5 - Printing, Strings, Numbers
#
# ### Readings
#
# * Shaw: [Exercises 1-10](https://learnpythonthehardway.org/python3/ex1.html)
# * Lutz: Chapters 1-7
#
# ### Table of Contents
#
# * [Strings](#strings)
# * [Numbers and Math](#numbers)
# * [Help](#help)
# <a id="strings"></a>
#
# ### Strings
# With IPython/Jupyter notebooks, we don't have to type `print()` as much as Shaw does in _LPTHW_, but we will use it for this first section.
print('This is a sentence.')
'This is a sentence.'
print("Now the sentence is in double quotes.")
print("Have you heard the word? 'Bird' is the word.")
print('Have you heard the word? "Bird" is the word.')
print("You can \"escape\" a quote marking.")
print("Here are some more escape characters: # \" \\")
print('Have you heard the word? ' + 'Bird is the word.')
print("""
Here is a block
of text that we
are going to print.
""")
print("\tOne\n\tTwo\n\tThree")
# #### Strings are sequences of characters, and they have properties like lists
# ##### S is a variable (with a string assigned to it)
S = 'Spam'
len(S)
S[0]
S[0], S[1], S[2], S[3]
# ##### We can use comments (starting with a #) to add context to what we are typing
# Last letter
S[-1]
# Negative indexing the hard way
S[len(S)-1]
S[-1], S[-2], S[-3], S[-4]
# Slice from 1 through 2 (not 3)
S[1:3]
# Everything but first letter
S[1:]
# S is not changed
S
# Everything but last letter
S[0:-1]
# Everything but last letter
S[:-1]
# All of S as a top-level copy
S[:]
# ##### To review, we can index a string from the start or from the end:
# ```
# S p a m
# index from start: 0 1 2 3
# index from end: -4 -3 -2 -1
# ```
#
# This Python-style indexing may seem strange at first, but it has the benefit of not requiring you to know the length of the string you are slicing:
#
# * `S[:2]` gives the first 2 characters (regardless of string length)
# * `S[2:]` gives everything except the first 2 characters (regardless of string length)
# * `S[-2:]` gives the last 2 characters (regardless of string length)
# * `S[:-2]` gives everything except the last 2 characters (regardless of string length)
S[:2], S[2:], S[-2:], S[:-2]
T = 'Supercalifragilisticexpialidocious'
T[:2], T[2:], T[-2:], T[:-2]
# ##### Repetition and concatenation
# Repetition
S * 8
# Concatenation
S + 'xyz'
# Storing a new value for S
S = 'z' + S[1:]
# S is now changed
S
# #### String methods
S = 'Spam'
S
S.find('pa')
S.find('spa')
S.replace('pa', 'XYZ')
S
line = 'aaa,bbb,ccc,ddd'
line
line.split(',')
S.upper()
line.upper()
S.isalnum()
line.isalnum()
line2 = 'aaa,bbb,ccc,ddd\n'
line2
line2.split(',')
line2 = line2.rstrip()
line2.split(',')
line3 = 'aaa,bbb,ccc,ddd \t \n'
line3.rstrip()
# #### String formatters
a = "Bird"
b = 'Word'
"Have you heard? %s is the %s." % (a, b)
"If we only want one: %s" % a
"If we only want one: %r" % a
# we can combine strings with commas to form a tuple
"Let's add 2 + 2. It is", 2 + 2, "."
# wrapping the tuple in a print command prints it together
print("Let's add 2 + 2. It is", 2 + 2, ".")
# but the modulo operator is better
'It works better if we write %s.' % (2 + 2)
# #### But wait, there's more!
#
# There are now three main ways to format strings. See [Real Python](https://realpython.com/python-f-strings/) for more information.
# **Option 1: %-formatting** — Original Python syntax.
"Have you heard? %s is the %s." % (a, b)
# **Option 2: str.format()** – Introduced in Python 2.6.
"Have you heard? {} is the {}.".format(a, b)
s = "Have you heard? {} is the {}."
s.format(a, b)
"Have you heard? {1} is the {0}.".format(a, b)
"Have you heard? {0} is the {1}.".format(a.upper(), b.upper())
"Have you heard? {animal} is the {thing}.".format(animal=a, thing=b)
"Have you heard? {animal} is the {thing}.".format(thing=b, animal=a)
# **Option 3: f-strings** – Introduced in Python 3.6.
f"Have you heard? {a} is the {b}."
f"Have you heard? {a.upper()} is the {b.upper()}."
# <a id="numbers"></a>
#
# ### Numbers and Math
2 / 4 + 0.1
(1 + 1) * (2 + 2)
# ##### a, b, c, d, and e are variables (with numbers assigned to them)
a = 123 + 222
b = 1.5 * 4
c = 2 ** 100
d = 1.0
e = 4
print("a = {}\nb = {}\nc = {}".format(a, b, c))
print("1.0 / 4 = %d" % (d/e))
print("1.0 / 4 = %f" % (d/e))
print("1.0 / 4 = %.3f" % (d/e))
print("1.0 / 4 = %r" % (d/e))
print("1.0 / 4 = %s" % (d/e))
# ##### Let's import the `math` module
import math
math.floor(4.22)
math.ceil(4.22)
math.factorial(5)
math.pi
math.sqrt(85)
pi = math.pi
f = math.sqrt(85)
print("pi = {}\nsqrt(85) = {}".format(pi, f))
# ##### Now let's import the `random` module
import random
random.seed(42)
random.random()
random.choice([1, 2, 3, 4])
random.choice(range(10))
g = random.random()
h = random.choice([1, 2, 3, 4])
i = random.choice(range(10))
print(f"random from 0-1: {g}\nrandom from 1,2,3,4: {h}\nrandom from 1-10: {i}")
magic_8_ball = [
'It is certain',
'It is decidedly so',
'Without a doubt',
'Yes, definitely',
'You may rely on it',
'As I see it, yes',
'Most likely',
'Outlook good',
'Yes',
'Signs point to yes',
'Reply hazy try again',
'Ask again later',
'Better not tell you now',
'Cannot predict now',
'Concentrate and ask again',
'Don\'t count on it',
'My reply is no',
'My sources say no',
'Outlook not so good',
'Very doubtful']
random.choice(magic_8_ball)
# <a id="help"></a>
#
# ### Getting help (or use IPython and tab-complete!)
# +
# get more information about the string variable S
# S?
# +
# get more information about the string find method
# S.find?
# +
# get more information about the random seed function
# random.seed?
# +
# try typing `S.` then Tab below to see which methods this string has
# -
# +
# try typing `math.` then Tab below to see what methods/functions math has
# -
# list all the variables in the workspace
# %whos
| lessons/lesson05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ** This notebook assumes ... **
#
# 1) Basic familiarity with gwsurrogate as covered in basics.ipynb
#
# 2) Basic familiarity with multi-modal and fast-spline (linear) surrogates as covered in numerical_relativity.ipynb
# +
### setup paths used throughout this notebook ###
import sys
path_to_gws = '/home/balzani57/Repo/GitRepos/Codes/gwsurrogate/'
sys.path.append(path_to_gws)
# ##%matplotlib inline
import numpy as np, matplotlib.pyplot as plt
import gwsurrogate as gws
import gwtools
# -
# # Setup
#
# First we need to download the time-domain 4d2s precessing model. Its about 9GB, so start the download and go grab a coffee.
gws.catalog.pull("NRSur4d2s_TDROM_grid12")
# create a surrogate from the newly downloaded h5 file
path_to_surrogate = path_to_gws+'surrogate_downloads/NRSur4d2s_TDROM_grid12.h5'
sur_grid12 = gws.EvaluateSurrogate(path_to_surrogate,use_orbital_plane_symmetry=False)
# # Simple evaluations
#
# The 4d2s surrogate model is described [here](https://arxiv.org/pdf/1701.00550.pdf), and on page 12 the parameterization is given by (Note: $\phi_{\chi_1}$ had its interval shifted by $\pi$)
#
#
# $$q \in [1,2] \quad \left| \vec{\chi}_1 \right| \in [0,.8] \quad \Theta_{\chi_1} \in [0,\pi] \quad \phi_{\chi_1} \in [0, 2 \pi] \quad \chi_2^z \in [-.8, .8]$$
#
# In what follows, we pass a numpy array into the surrogate evaluation with 5 numbers, whose ordering matches the parameters above.
# +
# Evaluate all modes, and plot a few
x = np.array([1.50, 0.40, 2.0, 2.0, 0.5]) # precessing
x = np.array([1.50, 0.40, 0.0, 0.0, 0.5]) # spin aligned
lm_modes, times, hre, him = sur_grid12(x, mode_sum=False, fake_neg_modes=False)
print('You have evaluated the modes',lm_modes)
h = hre + 1.0j * him
gwtools.plot_pretty(times, [h[:,4].real, h[:,4].imag],fignum=1)
plt.plot(times,gwtools.amp(h[:,4]),'b')
plt.title('The (%i,%i) mode'%(lm_modes[4][0],lm_modes[4][1]))
plt.xlabel('t/M ')
gwtools.plot_pretty(times, [h[:,3].real, h[:,3].imag],fignum=2)
plt.plot(times,gwtools.amp(h[:,3]),'b')
plt.title('The (%i,%i) mode'%(lm_modes[3][0],lm_modes[3][1]))
plt.xlabel('t/M ')
plt.show()
# -
| tutorial/notebooks/precessing_nr_surrogates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import pymannkendall as mk
import matplotlib as mpl
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patches as mpatch
import matplotlib.patches as patches
from matplotlib.transforms import offset_copy
import matplotlib.colors as colors
from matplotlib.lines import Line2D
import matplotlib.lines as mlines
import statsmodels.api as sm
import xarray as xr
import scipy.stats as sstats
from cmcrameri import cm
import cartopy.crs as ccrs
from cartopy.io import shapereader
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from itertools import count
import itertools
import rioxarray
from shapely.geometry import mapping
import geopandas as gpd
import cmaps
ctr_basin_sph0 = gpd.read_file("grdc_connecticut/subregions.geojson").iloc[[512]] # CT river basin shapefile
ctr_basin_sph1 = ctr_basin_sph0.reset_index()
# +
glofas = xr.open_dataset('glofas_ct_river_basin.grib', engine='cfgrib')
#ds2 = ds.sel(time=slice("2000-01-01", "2020-01-01"))
#ds2 = ds2.sel(longitude=dss2.geo_x+360, latitude=dss2.geo_y, method='nearest')
glofas['longitude'] = glofas['longitude'] - 360.0
glofas.rio.set_spatial_dims(x_dim="longitude", y_dim="latitude", inplace=True)
glofas.rio.write_crs("epsg:4326", inplace=True)
#Africa_Shape = gpd.read_file('grdc_connecticut/subregions.geojson', crs="epsg:4326")
#clipped = MSWEP_monthly2.rio.clip(Africa_Shape.geometry.apply(mapping), Africa_Shape.crs, drop=False)
glofas = glofas.rio.clip(ctr_basin_sph1.geometry.apply(mapping), crs="epsg:4326", drop=False)
# -
clipped_glofas
notnull = pd.notnull(glofas.dis24[0])
glofas.dis24[0].where(notnull)
# +
# extreme river discharge
glofas_extreme = clipped_glofas.groupby("time.year").quantile(0.99)
glofas_extreme
# +
# do the same but now seasonaly
glofas_winter = glofas.sel(time=glofas.time.dt.season=="DJF")
glofas_winter_extreme = glofas_winter.groupby("time.year").quantile(0.99)
glofas_spring = glofas.sel(time=glofas.time.dt.season=="MAM")
glofas_spring_extreme = glofas_spring.groupby("time.year").quantile(0.99)
glofas_summer = glofas.sel(time=glofas.time.dt.season=="JJA")
glofas_summer_extreme = glofas_summer.groupby("time.year").quantile(0.99)
glofas_fall = glofas.sel(time=glofas.time.dt.season=="SON")
glofas_fall_extreme = glofas_fall.groupby("time.year").quantile(0.99)
# +
# define a function to compute a linear trend of a timeseries
def linear_trend(x):
pf = np.polyfit(x.year, x, 1)
# need to return an xr.DataArray for groupby
return xr.DataArray(pf[0]*10)
# stack lat and lon into a single dimension called allpoints
stacked = glofas_winter_extreme.dis24.stack(allpoints=['latitude','longitude'])
stacked = stacked.fillna(0)
# apply the function over allpoints to calculate the trend at each point
trend = stacked.groupby('allpoints').apply(linear_trend)
# unstack back to lat lon coordinates
trend_unstacked_winter = trend.unstack('allpoints')
trend_unstacked_winter = trend_unstacked_winter.rename({'dim_0': 'trend', 'allpoints_level_0': 'lat','allpoints_level_1': 'lon'})
trend_unstacked_winter = trend_unstacked_winter.where(trend_unstacked_winter[0] != 0.)
stacked = glofas_spring_extreme.dis24.stack(allpoints=['latitude','longitude'])
stacked = stacked.fillna(0)
# apply the function over allpoints to calculate the trend at each point
trend = stacked.groupby('allpoints').apply(linear_trend)
# unstack back to lat lon coordinates
trend_unstacked_spring = trend.unstack('allpoints')
trend_unstacked_spring = trend_unstacked_spring.rename({'dim_0': 'trend', 'allpoints_level_0': 'lat','allpoints_level_1': 'lon'})
trend_unstacked_spring = trend_unstacked_spring.where(trend_unstacked_spring[0] != 0.)
stacked = glofas_summer_extreme.dis24.stack(allpoints=['latitude','longitude'])
stacked = stacked.fillna(0)
# apply the function over allpoints to calculate the trend at each point
trend = stacked.groupby('allpoints').apply(linear_trend)
# unstack back to lat lon coordinates
trend_unstacked_summer = trend.unstack('allpoints')
trend_unstacked_summer = trend_unstacked_summer.rename({'dim_0': 'trend', 'allpoints_level_0': 'lat','allpoints_level_1': 'lon'})
trend_unstacked_summer = trend_unstacked_summer.where(trend_unstacked_summer[0] != 0.)
stacked = glofas_fall_extreme.dis24.stack(allpoints=['latitude','longitude'])
stacked = stacked.fillna(0)
# apply the function over allpoints to calculate the trend at each point
trend = stacked.groupby('allpoints').apply(linear_trend)
# unstack back to lat lon coordinates
trend_unstacked_fall = trend.unstack('allpoints')
trend_unstacked_fall = trend_unstacked_fall.rename({'dim_0': 'trend', 'allpoints_level_0': 'lat','allpoints_level_1': 'lon'})
trend_unstacked_fall = trend_unstacked_fall.where(trend_unstacked_fall[0] != 0.)
# -
trend_unstacked_spring.min(), trend_unstacked_summer.max()
trend_unstacked_spring
# +
fig, ax_lst = plt.subplots(2, 2, figsize=(9,12.5), subplot_kw=dict(projection=ccrs.PlateCarree()),
gridspec_kw={'hspace': 0.0, 'wspace': 0.15})
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['font.size'] = 13
def label_subplots(ax_lst, *, upper_case=False,
offset_points=(-5, -5)):
start_ord = 65 if upper_case else 97
for ax, lab in zip(np.ravel(ax_lst), ('('+chr(j)+')' for j in count(start_ord))):
ax.annotate(lab, (1., 1.),
xytext=offset_points,
xycoords='axes fraction',
textcoords='offset points',
ha='right', va='top', fontsize=14, fontweight='regular')
label_subplots(ax_lst, upper_case=False)
def custom_div_cmap(numcolors=25, name='custom_div_cmap',
mincol='blue', midcol='white', maxcol='red'):
""" Create a custom diverging colormap with three colors
Default is blue to white to red with 11 colors. Colors can be specified
in any way understandable by matplotlib.colors.ColorConverter.to_rgb()
"""
from matplotlib.colors import LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list(name=name,
colors =[mincol, midcol, maxcol],
N=numcolors)
return cmap
cmap = cm.broc
cmap = cmap.reversed()
#cmap = cm.batlow
#bounds = np.array([-5.,-4.,-3.,-2.,-1.,0.,1.,2.,3.,4.,5.])
min_lon = -74.5
max_lon = -69.5
min_lat = 40.5
max_lat = 45.5
lon_ticks = [-74, -73, -72, -71, -70]
lat_ticks = [41, 42, 43, 44, 45]
#cmap = cmaps.GMT_drywet
blevels = [-150.,-100.,-50.,0.,50.,100.,150]
ticks = np.array([-150.,-100.,50.,0.,100.,50.,150.])
N = len(blevels)-1
#cmap2 = custom_div_cmap(N, mincol='DarkBlue', midcol='CornflowerBlue' ,maxcol='w')
#cmap2 = custom_div_cmap(N, mincol=cmap(0.9), midcol=cmap(0.5) ,maxcol=cmap(0.1))
cmap2 = custom_div_cmap(N, mincol=cmap(1.), midcol=cmap(0.5) ,maxcol=cmap(0.))
cmap2.set_over('1.') # light gray
from matplotlib.colors import BoundaryNorm
bnorm = BoundaryNorm(blevels, ncolors=N, clip=False)
# Figure 1
ax_lst[0,0].set_extent([-74.5, -69.5, 40.5, 45.5])
ax_lst[0,0].xaxis.set_major_formatter(LongitudeFormatter())
ax_lst[0,0].yaxis.set_major_formatter(LatitudeFormatter())
#lon_ticks = np.arange(min_lon, max_lon, 1)
#lat_ticks = np.arange(min_lat, max_lat, 1)
ax_lst[0,0].set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax_lst[0,0].set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax_lst[0,0].xaxis.set_tick_params(which='major', size=2., width=1, direction='in', top='on', pad=7)
ax_lst[0,0].yaxis.set_tick_params(which='major', size=2., width=1, direction='in', right='on',pad=7)
ax_lst[0,0].tick_params(labelleft=True,labelbottom=False,labeltop=True,labelright=False)
ax_lst[0,0].add_feature(cfeature.LAND.with_scale('10m'), color='white', alpha=0.4)
ax_lst[0,0].coastlines(resolution='10m',linewidth=0.5)
#ax_lst[0,0].add_feature(cfeature.STATES.with_scale('10m'),linewidth=0.5,alpha=0.8)
ax_lst[0,0].add_feature(cfeature.OCEAN, alpha=0.7)
ax_lst[0,0].add_feature(cfeature.LAND, alpha=0.4)
lon, lat= np.meshgrid(trend_unstacked_winter.lon,trend_unstacked_winter.lat)
#notnull = pd.notnull(ds_unweighted['dis24'][0]) .where(notnull)
cs2 = ax_lst[0,0].pcolormesh(lon, lat, trend_unstacked_winter[0], vmin=-100.,vmax=100., cmap=cmap)
#.where(notnull)
cax,kw = mpl.colorbar.make_axes(ax_lst,location='bottom',pad=0.06,aspect=30, shrink=0.8)
cmap.set_over(cmap(1.0))
out=fig.colorbar(cs2, cax=cax, ticks=blevels,extend='both',**kw)
out.set_label('Extreme River Discharge Trend ($m^3 \cdot y^{-1}$)')
out.ax.tick_params(direction='in',size=2., width=1)
precipitation = Line2D([0], [0], marker='s', color='white', linestyle="", label='Winter',
markerfacecolor=cmap(0.7),markeredgecolor=cmap(0.7), markersize=5)
ax_lst[0,0].legend(handles=[precipitation], loc='upper left',fontsize=13, facecolor='lightgrey')
# Figure 2
ax_lst[0,1].set_extent([-74.5, -69.5, 40.5, 45.5])
ax_lst[0,1].xaxis.set_major_formatter(LongitudeFormatter())
ax_lst[0,1].yaxis.set_major_formatter(LatitudeFormatter())
#lon_ticks = np.arange(min_lon, max_lon, 1)
#lat_ticks = np.arange(min_lat, max_lat, 1)
ax_lst[0,1].set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax_lst[0,1].set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax_lst[0,1].xaxis.set_tick_params(which='major', size=2., width=1, direction='in', top='on', pad=7)
ax_lst[0,1].yaxis.set_tick_params(which='major', size=2., width=1, direction='in', right='on',pad=7)
ax_lst[0,1].tick_params(labelleft=False,labelbottom=False,labeltop=True,labelright=True)
ax_lst[0,1].add_feature(cfeature.LAND.with_scale('10m'), color='white', alpha=0.4)
ax_lst[0,1].coastlines(resolution='10m',linewidth=0.5)
ax_lst[0,1].add_feature(cfeature.OCEAN, alpha=0.7)
ax_lst[0,1].add_feature(cfeature.LAND, alpha=0.4)
lon, lat= np.meshgrid(trend_unstacked_spring.lon,trend_unstacked_spring.lat)
cs2 = ax_lst[0,1].pcolormesh(lon, lat, trend_unstacked_spring[0], vmin=-100.,vmax=100.,
cmap=cmap)
precipitation = Line2D([0], [0], marker='s', color='white', linestyle="", label='Spring',
markerfacecolor=cmap(0.7),markeredgecolor=cmap(0.7), markersize=5)
ax_lst[0,1].legend(handles=[precipitation], loc='upper left',fontsize=13, facecolor='lightgrey')
# Figure 3
ax_lst[1,0].set_extent([-74.5, -69.5, 40.5, 45.5])
ax_lst[1,0].xaxis.set_major_formatter(LongitudeFormatter())
ax_lst[1,0].yaxis.set_major_formatter(LatitudeFormatter())
#lon_ticks = np.arange(min_lon, max_lon, 1)
#lat_ticks = np.arange(min_lat, max_lat, 1)
ax_lst[1,0].set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax_lst[1,0].set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax_lst[1,0].xaxis.set_tick_params(which='major', size=2., width=1, direction='in', top='on', pad=7)
ax_lst[1,0].yaxis.set_tick_params(which='major', size=2., width=1, direction='in', right='on',pad=7)
ax_lst[1,0].tick_params(labelleft=True,labelbottom=True,labeltop=False,labelright=False)
ax_lst[1,0].add_feature(cfeature.LAND.with_scale('10m'), color='white', alpha=0.4)
ax_lst[1,0].coastlines(resolution='10m',linewidth=0.5)
ax_lst[1,0].add_feature(cfeature.OCEAN, alpha=0.7)
ax_lst[1,0].add_feature(cfeature.LAND, alpha=0.4)
lon, lat= np.meshgrid(trend_unstacked_summer.lon,trend_unstacked_summer.lat)
cs2 = ax_lst[1,0].pcolormesh(lon, lat, trend_unstacked_summer[0], vmin=-100.,vmax=100.,
cmap=cmap)
precipitation = Line2D([0], [0], marker='s', color='white', linestyle="", label='Summer',
markerfacecolor=cmap(0.7),markeredgecolor=cmap(0.7), markersize=5)
ax_lst[1,0].legend(handles=[precipitation], loc='upper left',fontsize=13, facecolor='lightgrey')
# Figure 4
ax_lst[1,1].set_extent([-74.5, -69.5, 40.5, 45.5])
ax_lst[1,1].xaxis.set_major_formatter(LongitudeFormatter())
ax_lst[1,1].yaxis.set_major_formatter(LatitudeFormatter())
#lon_ticks = np.arange(min_lon, max_lon, 1)
#lat_ticks = np.arange(min_lat, max_lat, 1)
ax_lst[1,1].set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax_lst[1,1].set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax_lst[1,1].xaxis.set_tick_params(which='major', size=2., width=1, direction='in', top='on', pad=7)
ax_lst[1,1].yaxis.set_tick_params(which='major', size=2., width=1, direction='in', right='on',pad=7)
ax_lst[1,1].tick_params(labelleft=False,labelbottom=True,labeltop=False,labelright=True)
ax_lst[1,1].add_feature(cfeature.LAND.with_scale('10m'), color='white', alpha=0.4)
ax_lst[1,1].coastlines(resolution='10m',linewidth=0.5)
ax_lst[1,1].add_feature(cfeature.OCEAN, alpha=0.7)
ax_lst[1,1].add_feature(cfeature.LAND, alpha=0.4)
lon, lat= np.meshgrid(trend_unstacked_fall.lon,trend_unstacked_fall.lat)
cs2 = ax_lst[1,1].pcolormesh(lon, lat, trend_unstacked_fall[0], vmin=-100.,vmax=100.,
cmap=cmap)
precipitation = Line2D([0], [0], marker='s', color='white', linestyle="", label='Fall',
markerfacecolor=cmap(0.7),markeredgecolor=cmap(0.7), markersize=5)
ax_lst[1,1].legend(handles=[precipitation], loc='upper left',fontsize=13, facecolor='lightgrey')
plt.savefig('seasonal_extreme_rd_allyears.png', format='png', transparent=False, dpi=600, bbox_inches='tight')
# -
import seaborn as sns
sns.histplot(data=trend_unstacked_winter[0].values.flatten())
trend_unstacked_winter[0].values.flatten().max()
glofas_yearly_sum = glofas.groupby("time.year").sum('time')
glofas_yearly_sum_year = glofas_yearly_sum.mean('year')
glofas_yearly_sum_year = glofas_yearly_sum_year.where(glofas_yearly_sum_year.dis24 != 0.)
glofas_yearly_sum_year.dis24.plot()
glofas_yearly_sum_year
glofas_yearly_sum_year.dis24.min(), glofas_yearly_sum_year.dis24.max()
# +
# trend of annual total precipitation
glofas_yearly_sum = glofas.groupby("time.year").sum('time')
# define a function to compute a linear trend of a timeseries
def linear_trend(x):
pf = np.polyfit(x.year, x, 1)
# need to return an xr.DataArray for groupby
return xr.DataArray(pf[0]*10)
# stack lat and lon into a single dimension called allpoints
stacked = glofas_yearly_sum.dis24.stack(allpoints=['latitude','longitude'])
# apply the function over allpoints to calculate the trend at each point
trend = stacked.groupby('allpoints').apply(linear_trend)
# unstack back to lat lon coordinates
trend_unstacked = trend.unstack('allpoints')
trend_unstacked = trend_unstacked.rename({'dim_0': 'trend', 'allpoints_level_0': 'lat','allpoints_level_1': 'lon'})
trend_unstacked = trend_unstacked.where(trend_unstacked[0] != 0.)
# -
trend_unstacked.max().values, trend_unstacked.min().values
trend_unstacked.plot()
# +
fig, ax_lst = plt.subplots(1, 2,figsize=(11., 6.5), subplot_kw=dict(projection=ccrs.PlateCarree()),
gridspec_kw={'hspace': 0.2, 'wspace': 0.1})
fig.tight_layout()
def label_subplots(ax_lst, *, upper_case=False,
offset_points=(-5, -5)):
start_ord = 65 if upper_case else 97
for ax, lab in zip(np.ravel(ax_lst), ('('+chr(j)+')' for j in count(start_ord))):
ax.annotate(lab, (0.07, 1),
xytext=offset_points,
xycoords='axes fraction',
textcoords='offset points',
ha='right', va='top', fontsize=14, fontweight='regular')
label_subplots(ax_lst, upper_case=False)
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['font.size'] = 14
#cmap = cm.batlow
cmap3 = cm.broc
cmap3 = cmap3.reversed()
def custom_div_cmap(numcolors=25, name='custom_div_cmap',
mincol='blue', midcol='white', maxcol='red'):
""" Create a custom diverging colormap with three colors
Default is blue to white to red with 11 colors. Colors can be specified
in any way understandable by matplotlib.colors.ColorConverter.to_rgb()
"""
from matplotlib.colors import LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list(name=name,
colors =[mincol, midcol, maxcol],
N=numcolors)
return cmap
#cmap=cmaps.cmocean_tempo
#cmap = cm.devon
cmap = cmaps.GMT_drywet
blevels = [0.,5.,10.,15.,20.,25.,50.,75.,100.,150.,200.]
N = len(blevels)-1
#cmap2 = custom_div_cmap(N, mincol='DarkBlue', midcol='CornflowerBlue' ,maxcol='w')
#cmap2 = custom_div_cmap(N, mincol=cmap(0.9), midcol=cmap(0.5) ,maxcol=cmap(0.1))
cmap2 = custom_div_cmap(N, mincol=cmap(0.), midcol=cmap(0.5) ,maxcol=cmap(0.9))
cmap2.set_over('1.') # light gray
from matplotlib.colors import BoundaryNorm
bnorm = BoundaryNorm(blevels, ncolors=N, clip=False)
bounds = np.array([0.,5.,10.,15.,20.,25.,50.,75.,100.,200.])
ticks = np.array([0.,10.,20.,50.,100.,200.])
#bounds = np.array([1000, 1100, 1200, 1300, 1400., 1500.])
bounds2 = np.array([-2.,-1.,0,1.,2.])
lon_ticks = [-74, -73, -72, -71, -70]
lat_ticks = [41, 42, 43, 44, 45]
# Figure 1
ax_lst[0].set_extent([-74.5, -69.5, 40.5, 45.5])
ax_lst[0].xaxis.set_major_formatter(LongitudeFormatter())
ax_lst[0].yaxis.set_major_formatter(LatitudeFormatter())
#lon_ticks = np.arange(min_lon, max_lon, 1)
#lat_ticks = np.arange(min_lat, max_lat, 1)
ax_lst[0].set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax_lst[0].set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax_lst[0].xaxis.set_tick_params(which='major', size=2., width=1, direction='in', top='on', pad=7)
ax_lst[0].yaxis.set_tick_params(which='major', size=2., width=1, direction='in', right='on',pad=7)
ax_lst[0].tick_params(labelleft=True,labelbottom=False,labeltop=True,labelright=False)
ax_lst[0].add_feature(cfeature.LAND.with_scale('10m'), color='white', alpha=0.4)
#ax_lst[0].add_feature(cfeature.STATES, linewidth=0.4)
ax_lst[0].coastlines(resolution='10m',linewidth=0.5)
#ax_lst[0,0].add_feature(cfeature.STATES.with_scale('10m'),linewidth=0.5,alpha=0.8)
ax_lst[0].add_feature(cfeature.OCEAN, alpha=0.7)
ax_lst[0].add_feature(cfeature.LAND, alpha=0.4)
lon, lat= np.meshgrid(glofas_yearly_sum_year.longitude,glofas_yearly_sum_year.latitude)
cs2 = ax_lst[0].pcolormesh(lon, lat, glofas_yearly_sum_year.dis24/1000, norm=bnorm,
cmap=cmap2)
#.where(notnull)
cax,kw = mpl.colorbar.make_axes(ax_lst[0],location='bottom',pad=0.06,aspect=30, shrink=0.95)
out=fig.colorbar(cs2, cax=cax, ticks=ticks,**kw)
out.set_label('Total Annual River Discharge ($m^3 \cdot yr^{-1} \cdot 10^{3}$)')
out.ax.tick_params(direction='in',size=2., width=1)
"""
precipitation = Line2D([0], [0], marker='s', color='white', linestyle="", label='Winter',
markerfacecolor=cmap(0.5),markeredgecolor=cmap(0.5), markersize=5)
ax_lst[0].legend(handles=[precipitation], loc='upper left',fontsize=13, facecolor='lightgrey')
"""
# Figure 2
ax_lst[1].set_extent([-74.5, -69.5, 40.5, 45.5])
ax_lst[1].xaxis.set_major_formatter(LongitudeFormatter())
ax_lst[1].yaxis.set_major_formatter(LatitudeFormatter())
#lon_ticks = np.arange(min_lon, max_lon, 1)
#lat_ticks = np.arange(min_lat, max_lat, 1)
ax_lst[1].set_xticks(lon_ticks, crs=ccrs.PlateCarree())
ax_lst[1].set_yticks(lat_ticks, crs=ccrs.PlateCarree())
ax_lst[1].xaxis.set_tick_params(which='major', size=2., width=1, direction='in', top='on', pad=7)
ax_lst[1].yaxis.set_tick_params(which='major', size=2., width=1, direction='in', right='on',pad=7)
ax_lst[1].tick_params(labelleft=False,labelbottom=False,labeltop=True,labelright=True)
ax_lst[1].add_feature(cfeature.LAND.with_scale('10m'), color='white', alpha=0.4)
ax_lst[1].coastlines(resolution='10m',linewidth=0.5)
ax_lst[1].add_feature(cfeature.OCEAN, alpha=0.7)
ax_lst[1].add_feature(cfeature.LAND, alpha=0.4)
lon, lat= np.meshgrid(trend_unstacked.lon,trend_unstacked.lat)
cs2 = ax_lst[1].pcolormesh(lon, lat, trend_unstacked[0]/1000, vmin=-2.,vmax=2.,
cmap=cmap3)
"""
precipitation = Line2D([0], [0], marker='s', color='white', linestyle="", label='Spring',
markerfacecolor=cmap(0.5),markeredgecolor=cmap(0.5), markersize=5)
ax_lst[1].legend(handles=[precipitation], loc='upper left',fontsize=13, facecolor='lightgrey')
"""
cax,kw = mpl.colorbar.make_axes(ax_lst[1],location='bottom',pad=0.06,aspect=30, shrink=0.95)
out=fig.colorbar(cs2, cax=cax, ticks=bounds2,extend='both',**kw)
out.set_label('1979-2021 Trend ($m^3 \cdot decade^{-1} \cdot 10^3$)')
out.ax.tick_params(direction='in',size=2., width=1)
plt.savefig('enve5810_total_annual_rd_allyears.png', format='png', transparent=False, dpi=600, bbox_inches='tight')
# -
| ENVE5810_Trends_RD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="O4DSXpF-sEsE"
# **TASK 1**
# + id="3lzPIHlLj4bi"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/"} id="kjVkOJgvkB-1" outputId="8a3de590-1059-48c6-b018-b3390eebcdac"
def explicit_func(G):
G = np.sqrt((G[0]**2)+(G[1]**2)+(G[2]**2)+(G[3]**2)+(G[4]**2))
print(G)
A = np.array([3,4,7,4,1])
B = np.array([3,3,5,1,2])
C = np.array([1,8,0,8,4])
D = np.array([5,3,4,1,0])
X = np.array([1,2,3,4,4])
Y = np.array([5,6,7,8,9])
print("Explicit function: ")
explicit_func(A)
explicit_func(B)
explicit_func(C)
explicit_func(D)
explicit_func(X)
explicit_func(Y)
print("\n")
print("np.linalg.norm() function: ")
print(np.linalg.norm(A))
print(np.linalg.norm(B))
print(np.linalg.norm(C))
print(np.linalg.norm(D))
print(np.linalg.norm(X))
print(np.linalg.norm(Y))
# + [markdown] id="HeGMOjnZsI25"
# **Task 2**
# + colab={"base_uri": "https://localhost:8080/"} id="1NrSHtIasTmo" outputId="b539dd9d-dc68-47fe-eabd-76f9a23a5e7d"
def exp_func(X, Y):
Product = ((X[0]*Y[0]) + (X[1]*Y[1]) + (X[2]*Y[2]) + (X[3]*Y[3]) + (X[4]*Y[4]))
print(Product)
A = np.array([5,4,3,2,1])
B = np.array([6,4,5,5,4])
C = np.array([4,8,2,1,5])
D = np.array([5,6,4,0,5])
E = np.array([2,5,1,6,1])
F = np.array([7,3,2,1,1])
G = np.array([8,4,2,3,5])
H = np.array([6,5,5,1,6])
I = np.array([5,5,3,4,1])
J = np.array([1,1,4,2,3])
print("Explicit function: ")
exp_func(A, B)
exp_func(C, D)
exp_func(E, F)
exp_func(G, H)
exp_func(I, J)
print("\n")
print("np.inner() function: ")
print(np.inner(A, B))
print(np.inner(C, D))
print(np.inner(E, F))
print(np.inner(G, H))
print(np.inner(I, J))
# + [markdown] id="NzxCG8qwu8Ui"
# **Task 3**
# + id="g6g10Dw-u_OI" colab={"base_uri": "https://localhost:8080/"} outputId="653af044-9b23-438c-fe1b-84a715be793d"
def formula(G,T,A):
Total = ((G@G + T@T + A@A) * (G * (T + G * T) / A)) * np.linalg.norm(G+T+A)
print(Total)
G = np.array([-0.4, 0.3, -0.6])
T = np.array([-0.2, 0.2, 1])
A = np.array([0.2, 0.1, -1.5])
print("My Output: ")
formula(G,T,A)
# + colab={"base_uri": "https://localhost:8080/", "height": 248} id="GZrYoT988DnG" outputId="24babac6-7224-4827-975c-fbf595f03ead"
fig = plt.figure()
plt1 = fig.gca(projection='3d')
plt1.set_xlim([-2, 3])
plt1.set_ylim([-2, 3])
plt1.set_zlim([-2, 3])
plt1.quiver(0, 0, 0,1.25952615, 4.09345999, 0.8396841 , colors='red')
plt.show()
# + id="EEseig3J_7MU"
| Lab4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Errors and Debugging
# Code development and data analysis always require a bit of trial and error, and Jupyter Notebook contains tools to streamline this process.
# This section will briefly cover some options for controlling Python's exception reporting, followed by exploring tools for debugging errors in code.
# ## Controlling Exceptions: ``%xmode``
#
# Most of the time when a Python script fails, it will raise an Exception.
# When the interpreter hits one of these exceptions, information about the cause of the error can be found in the *traceback*, which can be accessed from within Python.
# With the ``%xmode`` magic function, IPython allows you to control the amount of information printed when the exception is raised.
# Consider the following code:
# +
def func1(a, b):
return a / b
def func2(x):
a = x
b = x - 1
return func1(a, b)
# -
func2(1)
# Calling ``func2`` results in an error, and reading the printed trace lets us see exactly what happened.
# By default, this trace includes several lines showing the context of each step that led to the error.
# Using the ``%xmode`` magic function (short for *Exception mode*), we can change what information is printed.
#
# ``%xmode`` takes a single argument, the mode, and there are three possibilities: ``Plain``, ``Context``, and ``Verbose``.
# The default is ``Context``, and gives output like that just shown before.
# ``Plain`` is more compact and gives less information:
# %xmode Plain
func2(1)
# The ``Verbose`` mode adds some extra information, including the arguments to any functions that are called:
# %xmode Verbose
func2(1)
# This extra information can help narrow-in on why the exception is being raised.
# So why not use the ``Verbose`` mode all the time?
# As code gets complicated, this kind of traceback can get extremely long.
# Depending on the context, sometimes the brevity of ``Default`` mode is easier to work with.
# ## Debugging: When Reading Tracebacks Is Not Enough
#
# The standard Python tool for interactive debugging is ``pdb``, the Python debugger.
# This debugger lets the user step through the code line by line in order to see what might be causing a more difficult error.
# The IPython-enhanced version of this is ``ipdb``, the IPython debugger.
#
# There are many ways to launch and use both these debuggers; we won't cover them fully here.
# Refer to the online documentation of these two utilities to learn more.
#
# In IPython, perhaps the most convenient interface to debugging is the ``%debug`` magic command.
# If you call it after hitting an exception, it will automatically open an interactive debugging prompt at the point of the exception.
# The ``ipdb`` prompt lets you explore the current state of the stack, explore the available variables, and even run Python commands!
#
# Let's look at the most recent exception, then do some basic tasks–print the values of ``a`` and ``b``, and type ``quit`` to quit the debugging session:
# %debug
# The interactive debugger allows much more than this, though–we can even step up and down through the stack and explore the values of variables there:
# %debug
# This allows you to quickly find out not only what caused the error, but what function calls led up to the error.
#
# If you'd like the debugger to launch automatically whenever an exception is raised, you can use the ``%pdb`` magic function to turn on this automatic behavior:
# %xmode Plain
# %pdb on
func2(1)
# Finally, if you have a script that you'd like to run from the beginning in interactive mode, you can run it with the command ``%run -d``, and use the ``next`` command to step through the lines of code interactively.
# ### Partial list of debugging commands
#
# There are many more available commands for interactive debugging than we've listed here; the following table contains a description of some of the more common and useful ones:
#
# | Command | Description |
# |-----------------|-------------------------------------------------------------|
# | ``list`` | Show the current location in the file |
# | ``h(elp)`` | Show a list of commands, or find help on a specific command |
# | ``q(uit)`` | Quit the debugger and the program |
# | ``c(ontinue)`` | Quit the debugger, continue in the program |
# | ``n(ext)`` | Go to the next step of the program |
# | ``<enter>`` | Repeat the previous command |
# | ``p(rint)`` | Print variables |
# | ``s(tep)`` | Step into a subroutine |
# | ``r(eturn)`` | Return out of a subroutine |
#
# For more information, use the ``help`` command in the debugger, or take a look at ``ipdb``'s [online documentation](https://github.com/gotcha/ipdb).
| #0-1 Introduction and Jupyter Notebook/#1.7 - Errors and Debugging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies
import tweepy
import numpy as np
import json
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
# Import and Initialize Sentiment Analyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
# Twitter API Keys
consumer_key = "sz5wb5oRXgH9bnTIZFSgvhv3b"
consumer_secret = "<KEY>"
access_token = "979169790735470592-YZh7iRKW87dk7Ce7IAYlzGITHNC66hI"
access_token_secret = "<KEY>"
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# -
target1 = "@cnn"
tweet_test = api.search(target1,count=3,result_type="recent")
print(json.dumps(tweet,sort_keys=True,indent=4))
# +
# create list of twitter handles for news sources to target
target_handles = ("@BBCWorld", "@CBS", "@CNN",
"@FoxNews", "@nytimes")
#set filters for "real tweets"
min_tweets = 5
max_tweets = 10000
max_followers = 2500
max_following = 2500
lang = "en"
#loop through all target users
for target in target_handles:
#variable for oldest tweet
oldest_tweet = None
#variables for sentiments/user
#target = []
compound_list=[]
positive_list = []
negative_list = []
neutral_list = []
target_list = []
tweet_count = []
counter=1
# loop through 5x (20 tweets per page)
for x in range(5):
#run search through each tweet
public_tweets = api.search(target,count=100,result_type="recent",max_id = oldest_tweet)
#loop through all tweets
for tweet in public_tweets["statuses"]:
# Use filters to check if user meets conditions
if (tweet["user"]["followers_count"] < max_followers
and tweet["user"]["statuses_count"] > min_tweets
and tweet["user"]["statuses_count"] < max_tweets
and tweet["user"]["friends_count"] < max_following
and tweet["user"]["lang"] == lang):
# Run Vader Analysis on each tweet
results = analyzer.polarity_scores(tweet["text"])
compound = results["compound"]
pos = results["pos"]
neu = results["neu"]
neg = results["neg"]
#create variables for target and tweet count
target= tweet["user"]
tweets_ago = counter
# Add each value to the appropriate list
compound_list.append(compound)
positive_list.append(pos)
negative_list.append(neg)
neutral_list.append(neu)
target_list.append(target)
tweet_count.append(tweets_ago)
# Set the new oldest_tweet value and counter of tweet
oldest_tweet = tweet["id"] - 1
counter = counter+1
# +
#add looped data into dataframe
results_df = pd.DataFrame({"User":target_list,"Compound":compound_list,"Neutral":neutral_list,"Positive":positive_list,"Negative":negative_list,"Tweets Ago":tweets_ago})
results_df
# +
#Sentiment Over Last 100 Tweets
#create seperate data frames for each handle then plot data frame in scatter plot
compound_df = results_df[["User","Compound"]]
tweetsago_df = results_df[["User","Tweets Ago"]]
BBC_compound = compound_df[compound_df["User"]=="BBCWorld"]
BBC_tweets = tweetsago_df[tweetsago_df["User"]=="BBCWorld"]
plt.scatter(BBC_tweets, BBC_compound, c = "red", edgecolor="black", linewidths=1, marker = "o", alpha=0.8, label="BBC")
CBS_compound = compound_df[compound_df["User"]=="CBS"]
CBS_tweets = tweetsago_df[tweetsago_df["User"]=="CBS"]
plt.scatter(CBS_tweets, CBS_compound, c = "orange", edgecolor="black", linewidths=1, marker = "o", alpha=0.8, label="CBS")
CNN_compound = compound_df[compound_df["User"]=="CNN"]
CNN_tweets = tweetsago_df[tweetsago_df["User"]=="CNN"]
plt.scatter(CNN_tweets, CNN_compound, c = "yellow", edgecolor="black", linewidths=1, marker = "o", alpha=0.8, label="CNN")
Fox_compound = compound_df[compound_df["User"]=="FoxNews"]
Fox_tweets = tweetsago_df[tweetsago_df["User"]=="FoxNews"]
plt.scatter(Fox_tweets, Fox_compound, c = "green", edgecolor="black", linewidths=1, marker = "o", alpha=0.8, label="Fox")
nytimes_compound = compound_df[compound_df["User"]=="nytimes"]
nytimes_tweets = tweetsago_df[tweetsago_df["User"]=="nytimes"]
plt.scatter(nytimes_tweets, nytimes_compound, c = "blue", edgecolor="black", linewidths=1, marker = "o", alpha=0.8, label="NY Times")
#add graph elements
plt.title("Sentiment Analysis of Media Tweets")
ply.ylabel("Tweet Polarity")
plt.xlabel("Number of Tweets Ago")
plt.xlim(0,100)
plt.ylim(-1,1)
plt.grid(True)
#create a legend
legend = plt.legend(fontsize="small",mode = "Expanded",numpoints=1,scatterpoints=1,loc="best",title="Media Sources",labelspacing=0.5)
plt.savefig("sentiment.png")
#show plot
plt.show()
# +
#Overall Sentiment of media sources
bar_df = results_df[["User","Compound"]]
bar_df= bar_df.groupby(["User"]).mean()["Compound"]
users=["@BBCWorld", "@CBS", "@CNN","@FoxNews", "@nytimes"]
x_axis =
plt.bar(users,,alpha=0.5,align="edge")
tick_locations = [value+0.4 for value in x_axis]
plt.xticks(tick_locations.["@BBCWorld", "@CBS", "@CNN","@FoxNews", "@nytimes"])
plt.xlim(-0.25, len(x_axis))
plt.ylim(0, -1)
#add chart elements
plt.title("Polarity of Tweets")
plt.xlabel("Media Source")
plt.ylabel("Overall Media Sentiment based on Twitter")
plt.savefig("sentiment_overall.png")
plt.show()
| Tweepy_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import json
import numpy as np
import matplotlib.pyplot as plt
def transformDf(df, key):
df[key] = pd.to_datetime(df[key])
df = df.set_index(key)
return df
def setIndexToDate(df, avg):
avg["Key"] = pd.to_datetime(avg["Key"])
df = df.set_index(avg["Key"])
return df
# +
import pandas as pd
import matplotlib.pyplot as plt
def transformDf(df, key):
df[key] = pd.to_datetime(df[key])
df = df.set_index(key)
return df
# Reach
dataAvg = pd.read_json('maintainerReach/averageMaintainerReach.json')
averageMaintainerReach = transformDf(dataAvg, "Key")
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(8,6), dpi=90)
plt.xlabel('Time')
plt.ylabel('Average Maintainer Reach')
plt.plot(averageMaintainerReach["Value"])
plt.savefig("maintainerReach/averageMaintainerReach.png")
# +
def setIndexToDate(df, avg):
avg["Key"] = pd.to_datetime(avg["Key"])
df = df.set_index(avg["Key"])
return df
# Top 5 Reach
isaacs = setIndexToDate(pd.read_json('maintainerReach/isaacs.json'), dataAvg)
mathias = setIndexToDate(pd.read_json('maintainerReach/mathias.json'), dataAvg)
sindresorhus = setIndexToDate(pd.read_json('maintainerReach/sindresorhus.json'), dataAvg)
substack = setIndexToDate(pd.read_json('maintainerReach/substack.json'), dataAvg)
tootallnate = setIndexToDate(pd.read_json('maintainerReach/tootallnate.json'), dataAvg)
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(9,6), dpi=90)
plt.xlabel('Time')
plt.ylabel('Average Maintainer Reach')
plt.plot(isaacs["Y"], label = "isaacs")
plt.plot(mathias["Y"], label = "mathias")
plt.plot(sindresorhus["Y"], label = "sindresorhus")
plt.plot(substack["Y"], label = "substack")
plt.plot(tootallnate["Y"], label = "tootallnate")
plt.legend(loc='best', frameon=False)
plt.savefig("maintainerReach/top_5_reach.png")
# +
def setIndexToDate(df, avg):
avg["Key"] = pd.to_datetime(avg["Key"])
df = df.set_index(avg["Key"])
return df
dataAvg = pd.read_json('average_package_reach.json')
# Top 5 Reach
inherits = setIndexToDate(pd.read_json('inherits.json'), dataAvg)
safeBuffer = setIndexToDate(pd.read_json('safe-buffer.json'), dataAvg)
lodash = setIndexToDate(pd.read_json('lodash.json'), dataAvg)
coreUtilIs = setIndexToDate(pd.read_json('core-util-is.json'), dataAvg)
ms = setIndexToDate(pd.read_json('ms.json'), dataAvg)
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(10,6), dpi=90)
plt.xlabel('Time')
plt.ylabel('Average Package Reach')
plt.plot(inherits["Y"], label = "inherits")
plt.plot(safeBuffer["Y"], label = "safe-buffer")
plt.plot(lodash["Y"], label = "lodash")
plt.plot(coreUtilIs["Y"], label = "core-util-is")
plt.plot(ms["Y"], label = "ms")
plt.legend(loc='best', frameon=False)
plt.savefig("top_5_reach.png")
# -
# Reach of vvilhonen for case study
vvilhonen = setIndexToDate(pd.read_json('maintainerReach/vvilhonen.json'), dataAvg)
plt.figure(figsize=(8,6), dpi=90)
plt.xlabel('Time')
plt.ylabel('Average Maintainer Reach')
plt.plot(vvilhonen["Y"])
plt.savefig("maintainerReach/vvilhonen.png")
# +
# Maintainer Package Count
dataAvg = pd.read_json('maintainerPackages/averageMaintainerPackageCount.json')
averageMaintainerPackageCount = transformDf(dataAvg, "Key")
plt.figure(figsize=(8,6), dpi=90)
plt.xlabel('Time')
plt.ylabel('Average Maintainer Package Count')
plt.plot(averageMaintainerPackageCount["Value"], label = "Average")
plt.savefig("maintainerPackages/averageMaintainerPackages.png")
# +
def setIndexToDate(df, avg):
avg["Key"] = pd.to_datetime(avg["Key"])
df = df.set_index(avg["Key"])
return df
# Top 5 Maintainer Packages Count
types = setIndexToDate(pd.read_json('maintainerPackages/types.json'), dataAvg)
isaacs = setIndexToDate(pd.read_json('maintainerPackages/isaacs.json'), dataAvg)
ehsalazar = setIndexToDate(pd.read_json('maintainerPackages/ehsalazar.json'), dataAvg)
jonschlinkert = setIndexToDate(pd.read_json('maintainerPackages/jonschlinkert.json'), dataAvg)
sindresorhus = setIndexToDate(pd.read_json('maintainerPackages/sindresorhus.json'), dataAvg)
kylemathews = setIndexToDate(pd.read_json('maintainerPackages/kylemathews.json'), dataAvg)
plt.figure(figsize=(9,6), dpi=90)
plt.xlabel('Time')
plt.ylabel('Average Maintainer Package Count')
plt.plot(types["Y"], label = "types")
plt.plot(isaacs["Y"], label = "isaacs")
plt.plot(ehsalazar["Y"], label = "ehsalazar")
plt.plot(jonschlinkert["Y"], label = "jonschlinkert")
plt.plot(sindresorhus["Y"], label = "sindresorhus")
plt.plot(kylemathews["Y"], label = "kylemathews")
plt.legend(loc='best', frameon=False)
plt.savefig("maintainerPackages/top_5_packages.png")
# +
# Sorted maintainer package count
import math
import pandas as pd
import matplotlib.pyplot as plt
sortedPackageCount = pd.read_json(
'sorted_maintainer_package_count.json', typ='series')
sortedPackageCount
plt.figure(figsize=(8, 6), dpi=90)
plt.rcParams.update({'font.size': 14})
plt.xlabel('Maintainer')
plt.ylabel('Package Count (log10)')
plt.plot(sortedPackageCount[2011], label="2011")
plt.plot(sortedPackageCount[2012], label="2012")
plt.plot(sortedPackageCount[2013], label="2013")
plt.plot(sortedPackageCount[2014], label="2014")
plt.plot(sortedPackageCount[2015], label="2015")
plt.plot(sortedPackageCount[2016], label="2016")
plt.plot(sortedPackageCount[2017], label="2017")
plt.plot(sortedPackageCount[2018], label="2018")
plt.gca().set_yticklabels(['{:.0f}'.format(math.pow(10, x))
for x in plt.gca().get_yticks()])
plt.legend(loc='best', frameon=False)
plt.savefig("sorted_maintainer_package_count.png")
| jupyterlab/graphs/EvolutionGraphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment
# ## Programming for Data Analysis
# ### numpy.random package
# #### *<NAME>*
#
#
# https://www.markdownguide.org/basic-syntax/
#
# ---
import numpy as np
import matplotlib.pyplot as plt
# #### Task 1
# ***
#
# Overall purpose of the package
# ---
# Definition
# ***
# This module is part of the numpy library. In it's simpliest form, it contains functions that are used to randomly generate numbers. Within the module contains some simple random data generation methods, some permutation and distribution functions, and random generator functions. https://www.javatpoint.com/numpy-random.
#
#
# The package's(numpy.random) number routines produce pseudo random numbers using combinations of a BitGenerator to create sequences and a Generator to use those sequences.
# https://numpy.org/doc/stable/reference/random/index.html#:~:text=Random%20sampling%20(%20numpy.-,random%20),Objects%20that%20generate%20random%20numbers.
#
#
#
#
#
#
#
#
#
#
#
# https://chrisalbon.com/python/basics/generating_random_numbers_with_numpy/
#
# https://www.geeksforgeeks.org/numpy-random-rand-python/
# #### Task 2
# ***
# 1. Simple Random Package
# A. integers
# B. random
# c. choice
# d. bytes
#
# 2. Permutations
# a. shuffle
# b. permutation
# ---
# # Simple Random Data
#
# ### Integers
# Return random integers
#
# Parameters
#
# low: lowest integers to be drawn from the distribution. if no high value, parameter is 0 and value is used as high
#
# high: if provided, one above the largest integer to be drawn from the distribution
#
# size: output shape. single integer or array
#
# dtype: desired tyoe of the result
#
# endpoint: if true, sample from low to high, default = False
#
# Returns: size hsaped array of random integers or single if size not provided.
# +
# integers
# example 1
rng = np.random.default_rng()
# high point = 2
# size = 10
rng.integers(3, size=10) # random
# returns an array of 10 integers ranging between 0 -> 2
# -
# Generate a 2 x 4 array of ints between 0 and 4, inclusive:
rng.integers(5, size=(2, 4))
# # Simple Random Data
#
# ### B. Random
# Random function = Outputs return between (0.0 -> 1.0)
#
# Results generated from this function are from the "continuous uniform" distribution
#
# (b -a) * random() + a
#
# Parameters:
#
# size: intergers or tuple of intergers (if nothing is input, a single vlaue will be returned)
#
# dtype: refers to the desired dtype. Default value is np.float64
#
# out: alternative output array.
#
# Returns: array of random floating numbers or single if no size is input.
#
#
# https://numpy.org/doc/stable/reference/random/generated/numpy.random.Generator.random.html#numpy.random.Generator.random
#
# https://www.javatpoint.com/numpy-random
#
#
# +
#Example 1
# Example below is the use of the new generator PCG64
# PCG64 replaced the previous default generator MT19937.
rng = np.random.default_rng()
rng.random()
# +
# Exmaple 2
# six by two array
# betweens numbers [-2, 0]
2 * rng.random((6, 2)) -2
# -
# +
# Random
#Example 1
# np.random.rand = used to generate random numbers
# in the below example the '3' refers to the rows of output and
#the '5' refers to the number of random numbers randomly gererated
a = np.random.rand(3, 5)
# Returns 3 rows each containing 5 randomly generated numbers between 0.0 -> 1.0
a
# -
# +
# Example 2
# Randomly generating a single number
# Re-run to show the randomly generated number each time
c = np.random.random()
# Returns random number between 0.0 -> 1.0
c
# +
#type of number returned
type(c)
# -
#
# # Simple Random Data
#
# ### Choice
# This function generates a random sample from a given 1-D array
#
# Parameters:
# a: array: if ndarray = random sample generated, if integer = random sample generated from np.arange(a)
#
# size: Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn from the 1-d a. Default is None, and a single value is returned.
#
# replace: Whether the sample is with or without replacement
#
# p: The probabilities associated with each entry in a. If not given the sample assumes a uniform distribution over all entries in a.
#
# axis: The axis along which the selection is performed. The default, 0, selects by row.
#
# shuffle: bool, optional
# Whether the sample is shuffled when sampling without replacement. Default is True, False
#
#
# Returns: generates a single or random ndarray
# +
# Choice
# example 1
#Similar to rng.integers
rng = np.random.default_rng()
# generate a unifroma random sample from np.arange(7) of size 5
rng.choice(7, 5) # output is random
# +
# example 2
# generate a uniform random sample from np.arange(5) of size 3 without replacement:
rng.choice(5, 3, replace=False)
#This is equivalent to rng.permutation(np.arange(5))[:3]
# -
# # Simple Random Data
#
# ### Bytes
# generator.bytes(length)
#
# Parameters: length (in the form of an integer)
#
# Returns: string of length.
# +
# numpy function.generator.bytes
np.random.default_rng().bytes(10)
# -
# # Permutation
#
# ### Shuffle
# Modify a sequence in-place by shuffling its contents.
#
# order of sub arrays is changed but contents remain the same
#
# Parameters:
#
# x: array/list to be shuffled
#
# axis: axis which x is tobe shuffled. Default is 0.
#
# Returns: none
# A.Shuffle
# +
# Example 1
# shuffle array given the range np.arange(10) = 0 -> 11 (not including 11)
rng = np.random.default_rng()
arr = np.arange(12)
rng.shuffle(arr)
arr
# +
# Example 2
# shuffle array given the range np.arange(10) = 0 -> 9 (not including 10)
# in 5 rows containg 2 integers from the array
arr = np.arange(10).reshape((5, 2))
rng.shuffle(arr)
arr
# -
# # Permutation
#
# ### Shuffle
# Randomly permute a sequence, or return a permuted range.
#
# Parameters:
#
# x: if x is an integer, randomly permute np.arange(x). If x is array, make a copy and shuffle elements randomly
#
# axis: axis which x is shuffled
#
# Returns: permuted sequence or array range
#
# +
# Example 1
# shuffle elements in array - np.arange(10)
rng = np.random.default_rng()
rng.permutation(10)
# +
# Example 2
# shuffle elements in the array provided
a = np.random.permutation([1025, 56, 4, 17, 100])
a
# -
# #### Task 3
#
# Five Distributions with the numpy.random package
# ---
#
# 1) Uniform
# 2) Standard normal
# 3) hypergeometric
# 4) triangular
# 5) power
# Uniform Distribution
#
# Draw samples from a uniform Distribution
#
# samples are uniformly distributed over the half-open interval
#
# Parameters:
#
# low: lower boundary of the output interval. Default is 0
#
# high: upper boundary of the output interval. Default is 1.0
#
# size: output shape.
#
# Returns: drawn samples from the parameterized uniform distribution
s = np.random.default_rng().uniform(-1,0,1000)
# +
np.all(s >= -1)
np.all(s < 0)
# -
count, bins, ignored = plt.hist(s, 15, density=True)
plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
plt.show()
# Standard Normal Distribution
#
# Draw samples from a normal standard distribution
#
# Parameters:
#
# size: output shape. if given ( a, b, c) then a * b * c samples are drawn. Default is none, single value returned.
#
# dtype: desired dtype of the result. Default is np.float64.
#
# out: alternative array in which to place a result.
#
# Returns: a floating poin array of shape size of drawn samples.
# +
#standard normal (2)
rng = np.random.default_rng()
rng.standard_normal(8000)
# -
# Hypergeometric Distribution
#
# Samples are drawn from a hypergeometric distribution with specified parameters, ngood (ways to make a good selection), nbad (ways to make a bad selection), and nsample (number of items sampled, which is less than or equal to the sum ngood + nbad).
#
# Parameters:
#
# ngood:
# Number of ways to make a good selection
#
# nbad:
# Number of ways to make a bad selection
#
# nsample:
# Number of items sampled.
#
# size:
# Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. If size is None (default), a single value is returned if ngood, nbad, and nsample are all scalars. Otherwise, np.broadcast(ngood, nbad, nsample).size samples are drawn
#
#
# Returns:
# Drawn samples from the parameterized hypergeometric distribution. Each sample is the number of good items within a randomly selected subset of size nsample taken from a set of ngood good items and nbad bad items.
# hypergeometric (3)
rng = np.random.default_rng()
ngood, nbad, nsamp = 100, 2, 10
s1 = rng.hypergeometric(good, bad, samp, 1000)
plt.hist(s1)
plt.show()
s = rng.hypergeometric(15, 15, 15, 100000)
sum(s>=7)/100000. + sum(s<=8)/100000.
# Triangular Distribution
#
# triangular distribution is a continuous probability distribution with lower limit left, peak at mode, and upper limit right. Unlike the other distributions, these parameters directly define the shape of the pdf.
#
# Parameters:
# left: Lower limit.
#
# mode: The value where the peak of the distribution occurs. The value must fulfill the condition left <= mode <= right.
#
# right: Upper limit, must be larger than left.
#
# size: Output shape.
#
# Returns:
# Drawn samples from the parameterized triangular distribution.
# +
# triangular (4)
h = plt.hist(np.random.default_rng().triangular(-3, 0, 8, 1000000), bins=200,
density=True)
plt.show()
# -
# +
# power (5)
x = 5. # shape
samples = 1000
s1 = np.random.power(x, samples)
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s1, bins=30)
a = np.linspace(0, 1, 100)
b = x*a**(x-1.)
density_b = samples*np.diff(bins)[0]*b
plt.plot(a, density_b)
plt.show()
# -
# #### Task 4
#
# Explain the use of seeds in generating pseudorandom numbers.
#
# https://yourbasic.org/algorithms/random-number-generator-seed/
#
# What is a seed?
#
# The seed is a starting point for a sequence of pseudorandom numbers. If you start at the same seed you get the same sequence.
#
# Are they random?
#
# No, to us humans they appear random but in actual fact, they're computed using a fixed fixed determined algorthim.
#
# https://yourbasic.org/algorithms/random-number-generator-seed/
#
#
#
#
#
#
#
# Why are seeds needed?
#
# Computers don't generate random numbers - they are determined by a set of rules. Generally by the time on the computer.
#
# A simple example of a non-random generator.
#
# E.g
#
# (X + 325) = y
# (y - 50) = z
#
# And the next "random" number would apply the same logic
#
# (z + 325) = p
# (p -50) = r
#
# and so on.
#
# when you apply numbers you can see the appearance of a random number each time.
#
# Obviously this example is simplistic and the actual algorithm would be much more complex and appear to be even more random.
#
# https://www.statisticshowto.com/random-seed-definition/
#
#
#
# Numpy - 1.19 package
#
# *PCG64*
#
# Previous numpy versions
#
# *MT19937*
#
#
#
#
from numpy.random import Generator, PCG64
import numpy.random
rg = Generator(PCG64())
# %timeit -n 1 rg.standard_normal(100000)
# %timeit -n 1 numpy.random.standard_normal(100000)
| Programming-for-Data-Analysis-Assignments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''PythonData'': conda)'
# language: python
# name: python361064bitpythondataconda29aaf85289304b3d90fb2f723b3eead0
# ---
#dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import pandas as pd
import pymongo
import time
import requests
#urls
nasa_news_url = 'https://mars.nasa.gov/news/'
space_image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
space_fact_url = "https://space-facts.com/mars/"
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
#Windows
# Set Executable Path & Initialize Chrome Browser
executable_path = {"executable_path": "./chromedriver.exe"}
browser = Browser("chrome", **executable_path)
# +
#visit nasa news url in splinter
browser.visit(nasa_news_url)
#create response, to be used in soup object
response = requests.get(nasa_news_url)
#create soup object
soup = bs(response.text, 'html.parser')
#display html
#nasa_news_html = print(soup.prettify())
# -
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.mars_db
collection = db.nasa
# +
results = soup.find_all('div', class_="content_title")
# loop over results to get article data
for result in results:
# scrape the article header
news_title = soup.find('div', class_= "content_title").find('a').text.strip()
news_paragraph = soup.find('div', class_= "rollover_description_inner").text.strip()
# print article data
print('-----------------')
print(news_title)
print(news_paragraph)
# Dictionary to be inserted into MongoDB
post = {
'news_title': news_title,
'news_paragraph': news_paragraph,
}
# Insert dictionary into MongoDB as a document
collection.insert_one(post)
# +
#visit space url in splinter
browser.visit(space_image_url)
#create response, to be used in soup object
response = requests.get(space_image_url, stream=True)
#create soup object
soup = bs(response.content, 'html5lib')
featured_image = soup.find("img", class_="thumb")["src"]
#space_image_html = print(soup.prettify())
featured_image_url = f"https://www.jpl.nasa.gov{featured_image}"
print(featured_image_url)
# +
# Define database and collection
db = client.mars_db
collection = db.space_images
results = soup.find("img", class_="thumb")["src"]
# loop over results to get article data
for result in results:
# Dictionary to be inserted into MongoDB
post = {
'featured_image_url': featured_image_url,
}
# Insert dictionary into MongoDB as a document
collection.insert_one(post)
# -
| .ipynb_checkpoints/mission_to_mars0903-2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Aula 05 - Strings!
#
# Strings são estruturas de dados para trabalhar com palavras e textos.
message = 'Oi, mundo'
# É possível utilizar a sintaxe de listas para acessar strings, pois elas são listas de caracteres
message[0]
message[0:2]
# Slicing s[inicio:fim:passo]
message[-1]
message[0:3]
# Temos diversos métodos de strings.
#
# - capitalize(): faz o primeiro caractere ficar maiúsculo
# - title(): transforma o texto Como Todas As Primeiras Letras Maiúsculas
# - lower(): deixa tudo minúsculo
# - upper(): deixa tudo MAIÚSCULO
message = 'OI, MUNDO!'
message.capitalize()
message.lower()
message.upper()
opcao = input('Escolha: a) LoL, b) Dota2: ').lower()
if opcao == 'a':
print('BOA')
elif opcao.lower() == 'b':
print('poxa vida')
else:
print('nem sei')
acao = input('digita ae')
v = 10 if acao == 'a' else 20
print(v)
message.split(',')
cadastro = 'guilherme#sep#36812312#sep#30#sep#1988'
cadastro.split('#sep#')
m = ' Oi '
m.strip()
message = 'oi, mundo oi'
message = message.replace('oi', 'olá')
message
'apple' == 'apple'
'Apple' > 'apple'
print('O unicode de A é ', ord('A'))
print('O unicode de a é ', ord('a'))
#palavra = input('Digite uma palavra: ')
palavra = 'x-salada'
for controle in [1, 2, 3, 4, 5]:
print(controle * 2)
palavra1 = 'Oi'
palavra2 = 'Mundo'
palavra1 + palavra2
c = ''
c = c + 'a'
c = c + 'b'
c
numeros = [1, 2, 3, 4, 5]
numeros_dobrados = ['par' if n % 2 == 0 else 'ímpar' for n in numeros]
numeros_dobrados
v = []
for n in numeros:
v.append(str(n))
v
'#'.join([str(n) for n in numeros])
# Faça um programa que pede para o usuário digitar uma palavra e cria umanova string igual, copiando letra por letra, depois imprima a nova string.
palavra = input('Digite uma palavra: ')
nova_string = ''
i = 0
for letra in palavra:
nova_string += letra
i += 1
print('lendo letra {}, nova_string é: {}'.format(letra, nova_string))
palavra = input('Digite uma palavra: ')
nova_string = ''
i = 0
for letra in palavra:
if i % 2 == 0:
nova_string += letra.upper()
else:
nova_string += letra.lower()
print('lendo letra {}, nova_string é: {}'.format(letra, nova_string))
i += 1
| Aula_05/Aula 05 - Strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PART 1
a = 10
a=b
try:
## code block where exception can occure
a=b
except:
print('Some proble occure')
# main class for exception
Exception
try:
a=b
except Exception as e:
print(e)
a=b
try:
a=b
except NameError as n:
print("User hasen't defined")
except Exception as e:
print(e)
try:
a=1
b='hello'
c=a+b
except NameError as n:
print("User hasen't defined")
except Exception as e:
print(e)
a=1
b='hello'
c=a+b
try:
a=1
b='hello'
c=a+b
except NameError as n:
print("User hasen't defined")
except TypeError as t:
print("Try to make same data type")
except Exception as e:
print(e)
try:
a=int(input('Ente Number 1:'))
b=int(input('enter Number 2:'))
c=a+b
e=a/b
d=a*b
print(c)
print(e)
print(d)
except NameError as n:
print("User hasen't defined")
except TypeError as t:
print("Try to make same data type")
except ZeroDivisionError as z:
print("Don't Divide by ZERO\nProvide Number Greater Then 0")
except Exception as e:
print(e)
# +
# try else block
try:
a=int(input('Ente Number 1:'))
b=int(input('enter Number 2:'))
c=a+b
e=a/b
d=a*b
except NameError as n:
print("User hasen't defined")
except TypeError as t:
print("Try to make same data type")
except ZeroDivisionError as z:
print("Don't Divide by ZERO\nProvide Number Greater Then 0")
except Exception as e:
print(e)
else:
print(c)
print(e)
print(d)
print('Done...')
# -
# +
try:
a=int(input('Ente Number 1:'))
b=int(input('enter Number 2:'))
e=a/b
except NameError as n:
print("User hasen't defined")
except TypeError as t:
print("Try to make same data type")
except ZeroDivisionError as z:
print("Don't Divide by ZERO\nProvide Number Greater Then 0")
except Exception as e:
print(e)
else:
print(e)
# print('Done...')
finally:
print('The execution Done...')
# -
# # PART 2 Custom Exception
class Error(Exception):
pass
class dobException(Error):
pass
class customgeneric(Error):
pass
year = int(input('Ente Year Of Birth:'))
age = 2021-year
try:
if age <=30 and age>=20:
print('Valid Age apply for job')
else:
raise dobException
except dobException:
print('The age is not with in range . you can not apply for job')
| Prerequisite/Advance Python Exception Handling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="WJDQLGIicAfp" executionInfo={"status": "ok", "timestamp": 1629710998509, "user_tz": 420, "elapsed": 21699, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12205748060130771295"}} outputId="9c3e9c77-cc17-427d-beb7-1c987f0b15d5"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="Bop6v8W5esBX"
# ## Librerías
# + colab={"base_uri": "https://localhost:8080/"} id="Dx4nN_OGcNB-" executionInfo={"status": "ok", "timestamp": 1629711134769, "user_tz": 420, "elapsed": 136266, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12205748060130771295"}} outputId="4f3e4ff0-147e-46db-8ab6-ae8dccc310ff"
# !pip install textacy
# !pip install scikit-learn
# !pip install -U spacy
# !python -m spacy download es
# !python -m spacy download es_core_news_lg
# !sudo apt install tesseract-ocr
# !apt install libtesseract-dev
# !pip install pytesseract
# !apt-get install tesseract-ocr-spa
# !mkdir tessdata
# !wget -O ./tessdata/spa.traineddata https://github.com/tesseract-ocr/tessdata_best/blob/master/spa.traineddata?raw=true
# !pip install easyocr
# + id="uOO0yc9QmJzz"
import pytesseract
import spacy
import numpy as np
import cv2
from google.colab.patches import cv2_imshow
import PIL
from PIL import Image
from matplotlib import pyplot as plt
from pytesseract import Output
from easyocr import Reader
from PIL import ImageFont, ImageDraw, Image
import re
import pandas as pd
import math
from spacy import displacy
import json
from IPython.core.display import display,HTML
import glob
# + [markdown] id="wfhFcxL5vXvw"
# ## Configuraciones
# + colab={"base_uri": "https://localhost:8080/"} id="vPoBghfr8DQW" executionInfo={"status": "ok", "timestamp": 1629711162727, "user_tz": 420, "elapsed": 23993, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12205748060130771295"}} outputId="bfe700a7-369d-4c7c-8f07-d484258b9375"
pd.set_option('display.max_rows', None)
lista_idiomas = 'es'
idiomas = lista_idiomas.split(',')
gpu = True #@param {type: 'boolean'}
reader = Reader(idiomas, gpu)
config_tesseract = '--tessdata-dir tessdata --psm 6'
# + id="8mCGlROP_azb"
meses= '(enero|ene|ne|febrero|feb|fev|marzo|mar|abril|abr|mayo|may|junio|jun|julio|jul|agosto|ago|ag0|septiembre|sep|octubre|oct|ct|noviembre|nov|diciembre|dic)'
variaciones_exp = '(Exp|Expe|Expo|Ep.|EX|E:p|5Xp|xp.|ExXpe|xP.,Ex)'
# + id="E9sywWQ8sbI9"
#Build upon the spaCy Small Model
nlp = spacy.load('es_core_news_lg')
#Create the Ruler and Add it
ruler = nlp.add_pipe("entity_ruler",before="tok2vec")
#List of Entities and Patterns (source: https://spacy.io/usage/rule-based-matching)
patterns = [
{
"label": "FECHA", "pattern": [{"TEXT": {"REGEX": "F@"}}
]
},{
"label": "EXPEDIENTE", "pattern": [{"TEXT": {"REGEX": "E@"}}
]
}
]
#add patterns to ruler
ruler.add_patterns(patterns)
# + [markdown] id="_KnurXX4g0IP"
# ## Funciones
# + id="6BxmcuN_qOJS"
def orient(image):
if (image.shape[0] > image.shape[1]):
return cv2.rotate(image,rotateCode = cv2.ROTATE_90_COUNTERCLOCKWISE)
return image
# + id="snF5P28anD2E"
def find_textbox_coords(image):
resultados = reader.readtext(image)
min_x = image.shape[1]-1
min_y = image.shape[0]-1
max_x = 1
max_y = 1
for coordenadas, texto, confianza in resultados:
if (re.search("^[a-z]{2}",texto.lower())): #Si el texto contiene dos letras seguidas
for punto in coordenadas:
x = int(punto[0])
y = int(punto[1])
if (x < min_x and x > 0):
min_x = x
if (y < min_y and y > 0):
min_y = y
if (x > max_x and x > 0):
max_x = x
if (y > max_y and y > 0):
max_y = y
color= (255, 100, 0)
boxed_image = cv2.rectangle(image, (min_x, min_y), (max_x, max_y), color, 2)
cv2.imwrite('boxed.jpg',boxed_image)
return min_x,min_y,max_x,max_y
# + id="K12jHLe965-Y"
def crop(image, min_x,min_y,max_x,max_y):
cropped = image[min_y:max_y,min_x:max_x]
return cropped
# + id="PtKNvgOJe5nt"
def gray(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray_image
# + id="fUxdR6kHfNpq"
def binarize_otsu(image):
val, otsu = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#thresh, im_bw = cv2.threshold(image, 148, 200, cv2.THRESH_BINARY)
return otsu
# + id="Ihdycy4xNSyT"
def binarize_gauss(image):
im_bw = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 9)
#thresh, im_bw = cv2.threshold(image, 148, 200, cv2.THRESH_BINARY)
return im_bw
# + id="smv3ip94GA_C"
def otsu_plus(image):
resultados = reader.readtext(image)
mask = image.copy()
mask[:] = 255
for coordenadas, texto, confianza in resultados:
min_x = image.shape[1]-1
min_y = image.shape[0]-1
max_x = 0
max_y = 0
for punto in coordenadas:
x = int(punto[0])
y = int(punto[1])
if (x < min_x):
min_x = x
if (y < min_y):
min_y = y
if (x > max_x):
max_x = x
if (y > max_y):
max_y = y
pedazon = image[min_y: max_y,min_x: max_x]
val, pedazon_bin = cv2.threshold(pedazon, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
transformacion_bin = pedazon_bin
mask[min_y: max_y,min_x: max_x] = transformacion_bin
return mask
# + id="Kq-qvQZcjx8Q"
def denoise(image):
iterations = int(image.shape[0]/500)
if (iterations > 4):
iterations = 4
kernel = np.ones((2, 2), np.uint8)
image = cv2.dilate(image, kernel, iterations=iterations)
kernel = np.ones((2, 2), np.uint8)
image = cv2.erode(image, kernel, iterations=iterations)
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
image = cv2.medianBlur(image, 3)
return image
# + id="DoSFMYzAmIv6"
def thick_font(image,iterations=1):
image = cv2.bitwise_not(image)
kernel = np.ones((3,3),np.uint8)
image = cv2.dilate(image, kernel, iterations=iterations)
image = cv2.bitwise_not(image)
return image
# + id="AknucayolRfn"
def thin_font(image,iterations=1):
image = cv2.bitwise_not(image)
kernel = np.ones((2,2),np.uint8)
image = cv2.erode(image, kernel, iterations=iterations)
image = cv2.bitwise_not(image)
return image
# + id="FRn2sts_hDw4"
def put_margins(image):
color = [255, 255, 255]
top, bottom, left, right = [50]*4
image_with_border = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return image_with_border
# + id="DdQqHFXV3QLC"
def remove_lines(image,show=False):
thresh = cv2.bitwise_not(image)
# Remove horizontal
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (int(image.shape[1]/15),1))
horizontal_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,int(image.shape[0]/15)))
vertical_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
lines = cv2.bitwise_or(horizontal_lines, vertical_lines)
result = cv2.bitwise_or(image,lines)
'''
if (show):
cv2_imshow(lines)
cv2_imshow(result)
'''
kernel = np.ones((5, 5), 'uint8')
dilate_img = cv2.dilate(lines, kernel, iterations=1)
result = cv2.bitwise_or(image,dilate_img)
if (show):
cv2_imshow(dilate_img)
cv2_imshow(result)
return result
# + id="EdIZUBanGQcs"
def reorient(image):
image1 = image.copy()
texto1 = pytesseract.image_to_string(image1, lang='spa', config=config_tesseract)
score1 = get_score(texto1)
image2 = cv2.rotate(image.copy(),rotateCode = cv2.ROTATE_180)
texto2 = pytesseract.image_to_string(image2, lang='spa', config=config_tesseract)
score2 = get_score(texto2)
if (score1 > score2):
return texto1,image1
return texto2,image2
# + id="B1gd_W29e1_p"
def rotateImage(cvImage, angle: float):
newImage = cvImage.copy()
(h, w) = newImage.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
newImage = cv2.warpAffine(newImage, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return newImage
# + id="wQ-8qThXVMYY"
def deskew(texto1,image):
deskewed = image.copy()
datos = pytesseract.image_to_data(image, lang='spa', config=config_tesseract,output_type=Output.DATAFRAME)
primer_palabra_ultimo_renglon = datos[datos['word_num']==1]['top'].idxmax()
ultima_palabra_ultimo_renglon = datos.iloc[primer_palabra_ultimo_renglon:,:]['word_num'].idxmax()
x1 = datos.iloc[primer_palabra_ultimo_renglon]['left']
x2 = datos.iloc[ultima_palabra_ultimo_renglon]['left']
y1 = datos.iloc[primer_palabra_ultimo_renglon]['top'] + datos.iloc[primer_palabra_ultimo_renglon]['height']
y2 = datos.iloc[ultima_palabra_ultimo_renglon]['top'] + datos.iloc[ultima_palabra_ultimo_renglon]['height']
angle = math.degrees(math.atan((y2-y1)/(x2-x1)))
deskewed = rotateImage(image,angle)
texto2 = pytesseract.image_to_string(deskewed, lang='spa', config=config_tesseract)
if(get_score(texto1) > get_score(texto2)):
return texto1, image
return texto2, deskewed
# + id="rbUDtBAsGkOL"
def get_score(texto):
count =0
letters = 0
strange_characters=['»','*','«','£','$',"€",'º','%','\“','>','<','[',']']
for character in texto:
letters = letters + character.isalpha()
if (character in strange_characters):
count =count+1
if (count==0):
count=1
score = letters/count
return score
# + id="je3rOUrCNbj4"
def cleanText_recursion (texto):
clean = texto.replace('—\n','\n').replace('-\n','\n').replace('-.\n','\n')
if (clean == texto):
return clean
else:
return cleanText_recursion(clean)
# + id="54o1-LejI9qU"
def cleanText (texto):
strange_characters=['»','*','«','£','$',"€",'º',"\"",'“']
clean = texto
for character in strange_characters:
clean = clean.replace(character,'')
clean = clean.replace('=','-')
clean = cleanText_recursion(clean)
clean = clean.replace(' ',' ')
return clean
# + id="9mxVeLC3wo6x"
def acomodarRenglones(texto):
clean = texto.replace('.\n','*****')
clean = clean.replace('\n',' ')
clean = clean.replace('*****','\n')
return clean
# + id="6Or4olODYYat"
def ocr(image):
oriented = orient(image) #Pasar a horizontal
cv2.imwrite('oriented.jpg',oriented)
min_x,min_y,max_x,max_y = find_textbox_coords(oriented) #Encontrar esquinas
cropped = crop(oriented,min_x,min_y,max_x,max_y)
cv2.imwrite('cropped.jpg',cropped)
grayed = gray(cropped)
cv2.imwrite('grayed.jpg',grayed)
binarized = otsu_plus(grayed) #Binarización segmentada
cv2.imwrite('binarized.jpg',binarized)
denoised = denoise(binarized)
cv2.imwrite('denoised.jpg',denoised)
#thicked = thick_font(denoised, iterations = 2)
#cv2.imwrite('thicked.jpg',thicked)
#thinned = thin_font(thicked, iterations = 2)
#cv2.imwrite('thinned.jpg',thinned)
margined = put_margins(denoised)
cv2.imwrite('margined.jpg',margined)
delined = remove_lines(margined)
cv2.imwrite('delined.jpg',delined)
text, reoriented = reorient(delined) #Checar si predice mejor volteado y rotado
cv2.imwrite('reoriented.jpg',reoriented)
texto, deskewed = deskew(text,reoriented)
cv2.imwrite('deskewed.jpg',deskewed)
f = open(nombre_del_archivo + ".txt", "a")
f.write(texto)
f.close()
return texto, deskewed
# + id="TG7np_n94ARR"
def get_expedientes(texto):
texto_ciclado = texto
expedientes = []
last_index = 0
indice=0
while (True):
try:
x = re.search(variaciones_exp + "[0-9\-\s\.,]*[0-9]+[0-9\-\s\.,]*", texto_ciclado[last_index:])
expedientes.append(x.group().split('\n')[0])
texto_ciclado = texto_ciclado[: last_index + x.span()[0]] + "E@" +str(indice)+" " + texto_ciclado[last_index + x.span()[1]:]
last_index = last_index + x.span()[0]+3+len(str(indice))
indice = indice+1
except:
break
return texto_ciclado, expedientes
# + id="DHuCJkj4siC4"
def get_fechas(texto):
texto_ciclado=texto
fechas=[]
last_index = 0
indice=0
while (True):
try:
condicion= '\d{1,2}[a-z\s\-\.,]{0,10}?(?i)' + meses + ".{0,18}?[5-8][0-9]|(?i)" + meses + "[a-z\s\-\.,]{0,2}[0-3]*[0-9]+[a-z\s\-\.,]{0,2}[5-8][0-9]"
x = re.search(condicion, texto_ciclado[last_index:])
fechas.append(x.group())
texto_ciclado = texto_ciclado[: last_index + x.span()[0]] + "F@" +str(indice)+" " + texto_ciclado[last_index + x.span()[1]:]
last_index = last_index + x.span()[0]+3+len(str(indice))
indice = indice+1
except:
break
return texto_ciclado, fechas
# + id="TUXMdaeXcsJ0"
def procesamiento_de_texto(nombre_del_archivo,texto):
texto_ciclado = cleanText(texto)
texto_ciclado, expedientes = get_expedientes(texto_ciclado)
texto_ciclado = acomodarRenglones(texto_ciclado)
texto_ciclado, fechas = get_fechas(texto_ciclado)
doc = nlp(texto_ciclado)
#html = displacy.render(doc, style="ent")
#display(HTML(html))
d = {'filename': [], 'label': [],'class':[]}
df = pd.DataFrame(data=d)
for ent in doc.ents:
entidad = ent.text
clase = ent.label_
if (clase == 'MISC'):
continue
if (clase == 'EXPEDIENTE'):
x = re.search('@',entidad)
entidad = expedientes[int(entidad[x.span()[1]:])]
clase = "Expediente"
elif (clase == 'FECHA'):
x = re.search('@',entidad)
entidad = fechas[int(entidad[x.span()[1]:])]
clase = "Fecha"
elif (clase == 'LOC'):
clase = "Lugar"
elif (clase == 'PER'):
clase = "Persona"
elif (clase == 'ORG'):
clase = "Organización"
df.loc[len(df)] = [nombre_del_archivo,entidad,clase]
df.sort_values(by=['filename','class','label'],inplace=True)
df.reset_index(drop=True,inplace=True)
return df
# + [markdown] id="goNVi7D4n0ok"
# ##Procesamiento
# + colab={"base_uri": "https://localhost:8080/"} id="iskjGLKugnAi" executionInfo={"status": "ok", "timestamp": 1629712698252, "user_tz": 420, "elapsed": 1138018, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12205748060130771295"}} outputId="f94b5c67-7ad3-4068-9055-460f9249bdf1"
in_dir = '/content/drive/MyDrive/Datos - Hackathon JusticIA/Evaluacion/Reto2/*'
d = {'filename': [], 'label': [],'class':[]}
resultados_entidades = pd.DataFrame(data=d)
d2 = {'filename': [], 'text': []}
resultados_textos = pd.DataFrame(data=d2)
for file in glob.glob(in_dir):
nombre_del_archivo = re.search("/(?:.(?!/))+$",file).group()[1:-4]
print(nombre_del_archivo)
img = cv2.imread(file)
texto, final_image = ocr(img)
resultados_textos.loc[len(resultados_textos)] = [nombre_del_archivo,texto]
resultados_entidades = resultados_entidades.append(procesamiento_de_texto(nombre_del_archivo,texto))
resultados_entidades.sort_values(by=['filename','class','label'],inplace=True)
resultados_entidades.reset_index(drop=True,inplace=True)
# + id="EeQdudQqsPRE"
resultados_textos.to_csv('Reto2A.csv')
resultados_entidades.to_csv('Reto2B.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="RoGAEJZB6Cs5" executionInfo={"status": "ok", "timestamp": 1629712698253, "user_tz": 420, "elapsed": 13, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12205748060130771295"}} outputId="608cc95c-deac-4203-c5e2-e71cab3e6f4e"
resultados_textos
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="LSbuidFK5IIq" executionInfo={"status": "ok", "timestamp": 1629712698255, "user_tz": 420, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12205748060130771295"}} outputId="42fc438b-f194-4912-ef75-510a34dd3e2a"
resultados_entidades
# + id="DMOMEP43Td7p"
| Reto_2_UTTeam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Procesar el txt con spacy-stanza y guardar en un fichero binario: A_Procesar_Guardar.ipynb
# Este es el primer *notebook* para procesar el censo de linajes asturianos de <NAME>. Si dispone del censo en txt, escriba la dirección al mismo en la variable `direcciontxt`. De lo contrario, continúe usando el pequeño extracto del censo guardado en la dirección `./Documentos/PadronOCRPrueba.txt`.
#
# En este *notebook* de *Python* vamos a realizar las siguientes tareas:
# - Cargamos un documento txt. El `txt` se encuentra en la dirección `direcciontxt`.
# - Cargamos los módulos `spacy` y `spacy-stanza`. El primero nos ofrece toda la funcionalidad y facilidad de tratamiento de Spacy. El segundo brinda toda la exactitud en los resultados del procesamiento con Stanza. Recuerde que debe tener instalado las librerías de Stanza, desarrollado por la Universidad de Stanford.
# - Guardamos el documento procesado como un fichero de datos (documento binario). El binario lo guardaremos en la dirección `direccionbin`. De esta forma no tenemos que procesar el documento cada vez que abrimos *Jupyter Lab*.
# ## Importamos las librerías a usar
# Lo haremos siempre al comienzo de cada *notebook*.
# +
import spacy
import spacy_stanza
from spacy.tokens import Doc
from spacy.tokens import DocBin
# Ejecutamos nuestras funciones creadas para la ocasión. Están en el notebook Z_Funciones.ipynb.
# %run './Z_Funciones.ipynb'
# -
# ## Especificamos las direcciones y configuración del pipeline
# Esta sección es la que podemos personalizar o modificar a conveniencia según el archivo que queramos procesar, dónde lo queremos guardar, etc. Contiene los siguientes datos:
# - `direcciontxt`: es la dirección del archivo de txt que vamos a procesar con Spacy-Stanza. Nota: cada línea del txt debe ser una frase completa limitada por un punto.
# - `direcciontxtprueba`: es la dirección de una pequeña parte archivo de txt que vamos a procesar con Spacy-Stanza. Es un archivo más pequeño que podemos usar para pruebas. Nota: cada línea del txt debe ser una frase completa limitada por un punto.
# - `direccionbin`: es la dirección donde vamos a guardar el archivo procesado por Spacy-Stanza. Es un archivo binario.
# - `nlpconfig`: es la variable que determina las características del pipeline de procesamiento de spacy-stanza. Por ejemplo, contiene el idioma, cuántas frases procesamos de una sola vez, etc.
direcciontxt = './Documentos/PadronOCRPrueba.txt'
direccionbin = './Documentos/Padronbin'
# direcciontxt = './Privado/PadronOCR.txt'
# direccionbin = './Privado/PadronbinTOTAL'
nlpconfig = {
'name': 'es', # Language code for the language to build the Pipeline in
'tokenize_batch_size': 32, # Enseguida vamos a cambiarlo por la cantidad máxima de palabras que alberga una frase en nuestro documento.
'ner_batch_size': 32 # Enseguida vamos a cambiarlo por la cantidad máxima de palabras que alberga una frase en nuestro documento.
}
# ## Cargamos el archivo de texto
# Vamos a convertir el archivo de texto a una variable de Python que Spacy-Stanza pueda procesar. La variable se llamará `lineas` porque es una lista que contiene cada una de las lineas del txt.
# +
# Cargamos el texto
with open(direcciontxt, encoding='utf-8') as f:
# Creamos la variable 'lineas' como una lista vacía que vamos rellenando.
lineas = []
for linea in f:
lineas.append(linea.strip())
# Mostremos las primeras 15 líneas.
print(lineas[0:15])
# -
# Vamos a corregir el número máximo de palabras que el pipeline para resolver el NER. Vamos a hacer que sea igual al número máximo de palabras que tienen las frases de nuestro documento, pero aumentado un 25% para tener en cuenta los tokens de puntuación. Usaremos la función `cuenta_palabras_max`.
# +
# Contamos las palabras y corregimos el diccionario nlpconfig
numero1 = cuenta_palabras_max(direcciontxt)
numero2 = int(numero1 * 1.25)
nlpconfig['ner_batch_size'] = numero2
nlpconfig['tokenize_batch_size'] = numero2
print("El número de palabras máximas es", str(numero1) + ".", "\nAñadiendo un 25%, tomaremos el número", str(numero2) + ".")
# -
# ## Cargamos el pipeline de procesamiento de spacy-stanza
#
# Creamos el pipeline (la función que procesa nuestro archivo de análisis).
#Cargamos el pipeline de spacy-stanza para resolver el problema NER.
nlp = spacy_stanza.load_pipeline(**nlpconfig)
# ## Aplicamos el pipeline de spacy-stanza
# Aplicamos el pipeline a nuestro modelo:
# - En primer lugar, `nlp.pipe` crea un iterable de frases a ser procesadas una a una.
# - Al ejecutar la orden `list`, no solo estamos convirtiendo el iterable en una lista, sino que también se aplica, frase a frase, la función `nlp`.
# - Finalmente, unimos cada frase en un solo documento de SpaCy llamado `doc`.
doc = Doc.from_docs(list(nlp.pipe(lineas)))
# # Serializamos
# La palabra serializar se refiere a guardar nuestro documento procesado `doc` a un archivo binario que se pueda alojar en un disco duro. Así no tenemos que procesar el documento de texto cada vez que abrimos *Jupyter Lab*. Guardamos el binario en la dirección `direccionbin`. Para ello usaremos la función `serializacion` definida en el archivo *funciones_Spacy.ipynb*.
# Serializamos
serializacion(doc, direccionbin)
| A_Procesar_Guardar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# # pyflightdata examples
#
# This document lists a few examples to show the basic usage of pyflightdata. This does not show all the potential uses of the data we get from the API.
#
# Please note that this is not an official API and we do not endorse or recommend any commercial usage of this API to make large scale mass requests.
#
# Also the API may break from time to time as the pages and their structure change at the underlying websites. For now this is only flightradar24.com but we might add more sites soon.
from pyflightdata import FlightData
api=FlightData()
api.get_countries()[:5]
api.get_airlines()[:5]
api.get_airports('India')[10:15]
#pass the airline-code from get_airlines
api.get_fleet('emirates-ek-uae')
#pass airline-code from get_airlines to see all current live flights
api.get_flights('AI1')[:10]
api.get_history_by_flight_number('AI101')[-5:]
api.get_history_by_tail_number('9V-SMA')[-5:]
api.get_info_by_tail_number('9V-SMA')
api.get_airport_arrivals('sin')
api.get_airport_departures('sin')
api.get_airport_details('sin')
| pyflightdata examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BreadFeet/DS_Machine_Learning/blob/master/Day32_linux_command.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Mzax3vZ8gTgQ"
# # Linux Command
# Google colab의 OS는 linux
# + id="2BqEwZGVhF4e" colab={"base_uri": "https://localhost:8080/"} outputId="bac8a4ce-bc97-4b14-ad4e-8457446ff5cf"
# 현재 디렉토리 보기(Print Working Directory)
# !pwd
# + colab={"base_uri": "https://localhost:8080/"} id="5xozOBk8e7-9" outputId="fbb3998c-5dde-409c-b457-b414a99458af"
# pwd로 찾은 디렉토리에 있는 파일 확인(list 명령어)
# Colab > folder에 있는 내용을 보여줌
# Jupyter에서 !dir과 같은 기능
# !ls
# + id="iySbGVsuhFxT" colab={"base_uri": "https://localhost:8080/"} outputId="e6eff918-986d-46fe-eda8-0e0b30c23879"
# 디렉토리 자세히 보기
# !ls -l
# + colab={"base_uri": "https://localhost:8080/"} id="ZkzKKSUsihXi" outputId="0dbba181-1bbb-485a-9f16-2e64c54590cf"
# !ls -l ./ # 위와 동일
# + [markdown] id="d8jFdDU0jt16"
# Run 출력 결과에서 **d: directory(폴더)**, **-: 파일**을 의미한다
# + id="_lkTrLmThGBj" colab={"base_uri": "https://localhost:8080/"} outputId="b2e1b8ea-cc39-468b-c5f5-6a1880af117e"
# 하위 경로에 있는 파일 확인
# !ls -l ./sample_data
# + [markdown] id="qgK6BQY5jMYH"
# # Wholesale_customers_data 분석
# + id="ldfg8bmUjLeU"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="Wk4HQWkqjgw4" outputId="c1e34a46-45d3-41a2-a21d-05591c78322a"
ws = pd.read_csv('./Wholesale_customers_data.csv')
ws.head()
| Day32_linux_command.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
x = [2, 4, 6]
y = [3,5,6]
for xi, yi in zip(x,y):
print(xi+yi)
z = []
for xi, yi in zip(x,y):
z.append (xi + yi)
z
[ xi+yi for xi,yi in zip(x,y)] #list Comprenhensions
[xi**2 for xi in x]
import numpy as np
# +
x = np.array([2, 1, 3])
y = np.array([4, 6, 7])
print (x+y)
print(type(x))
# -
x * 4000
lista = [2,3,4,5,6]
print(lista*5)
print(x*5)
x = np.array([2, 1, 3]) #Producto punto
y = np.array([4, 6, 7])
np.sum(x*y), x.dot(y), y.dot(x), x@y, x*y
# +
A = np.array([[1,3,4],[1,0,0],[1,2,2]])
B = np.array([[1,0,5],
[7,5,0],
[2,1,1]])
print(A)
print(B)
# -
A + B
B@A
A@B
A.size
np.arange(1,10)
np.arange(1,10).reshape(3,3)
I = np.eye(2)
I
# +
# np.eye?
# -
B = np.array([[1],
[11]])
A = np.array([[1,-2],
[3,2]])
A_inv = np.linalg.inv(A)
X = A_inv@B
X
A = np.array([[1,2,3],
[2,5,2],
[6, -3, 1]])
A = np.array([[3],[2],[4]])
B = np.array([[5],[3],[2]])
C = np.array([[6],[6],[6]])
| Vectores.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="http://landlab.github.io"><img style="float: left" src="../../../landlab_header.png"></a>
# # Quantifying river channel evolution with Landlab
# These exercises are based on a project orginally designed by <NAME> at Arizona State University. This notebook was created by <NAME> at Tulane University.
# <hr>
# <small>For tutorials on learning Landlab, click here: <a href="https://github.com/landlab/landlab/wiki/Tutorials">https://github.com/landlab/landlab/wiki/Tutorials</a></small>
# <hr>
# **What is this notebook?**
#
# This notebook illustrates the evolution of detachment-limited channels in an actively uplifting landscape. The landscape evolves according to the equation:
#
# \begin{equation}
# \frac{d z}{d t} = -K_\text{sp} A^{m_{sp}} S^{n_{sp}} + U
# \end{equation}
# Here, $K_{sp}$ is the erodibility coefficient on fluvial incision, which is thought to be positively correlated with climate wetness, or storminess (this is hard to quantify) and to be negatively correlated with rock strength (again, rock strength is hard to quantify). $m_{sp}$ and $n_{sp}$ are positive exponents, usually thought to have a ratio, $m_{sp}/n_{sp} \approx 0.5$. $A$ is drainage area and $S$ is the slope of steepest descent ($-\frac{dz}{dx}$) where $x$ is horizontal distance (positive in the downslope direction) and $z$ is elevation. (If slope is negative there is no fluvial erosion.) $U$ is an externally-applied rock uplift field.
#
# The fluvial erosion term is also known as the stream power equation. Before using this notebook you should be familiar with this equation from class lectures and reading.
#
# For a great overview of the stream power equation, see:
#
# - Whipple and Tucker, 1999, Dynamics of the stream-power river incision model: Implications for height limits of mountain ranges, landscape response timescales, and research needs, Journal of Geophysical Research.
#
# For some great illustrations of modeling with the sream power equation, see:
#
# - Tucker and Whipple, 2002, Topographic outcomes predicted by stream erosion models: Sensitivity analysis and intermodel comparison, Journal of Geophysical Research.
#
# Helpful background on landscape sensitivity to rock uplift rates and patterns can be found here:
#
# - <NAME> Whipple, 2012, Expression of active tectonics in erosional landscapes, Journal of Structural Geology.
#
# **What will you do?**
#
# In this exercise you will modify the code to get a better understanding of how rock uplift rates and patterns and the erodibility coefficient control fluvial channel form.
#
# Start at the top by reading each block of text and sequentially running each code block (shift - enter OR got to the _Cell_ pulldown menu at the top and choose _Run Cells_).
#
# If you just change one code block and rerun only that code block, only the parts of the code in that code block will be updated. (E.g. if you change parameters but don't reset the code blocks that initialize run time or topography, then these values will not be reset.)
#
# **STUDENTS: Questions to answer before starting this assignment.**
#
# Answer these questions before running the notebook.
#
# 1. What do you think will happen to total relief (defined as the maximum minus the minimum elevation, here area is fixed) and channel slope at steady state if $K_{sp}$ is uniformly increased?
# 2. What do you think will happen to total relief and channel slope at steady state if $U$ is uniformly increased?
# 3. How do you think a steady-state landscape with a uniform low rock uplift rate will respond if rock uplift is uniformly increased (relative to a steady base level)? How will channel slopes change through time?
# **Now on to the code...**
#
# First we have to import the parts of Python and Landlab that are needed to run this code. You should not have to change this first code block.
# +
# Code block 1
import copy
import numpy as np
from matplotlib import pyplot as plt
from landlab import RasterModelGrid, imshow_grid
from landlab.components import (
ChannelProfiler,
ChiFinder,
FlowAccumulator,
SteepnessFinder,
StreamPowerEroder,
)
from landlab.io import write_esri_ascii
# -
# Make a grid and set boundary conditions.
# +
# Code Block 2
number_of_rows = 50 # number of raster cells in vertical direction (y)
number_of_columns = 100 # number of raster cells in horizontal direction (x)
dxy = 200 # side length of a raster model cell, or resolution [m]
# Below is a raster (square cells) grid, with equal width and height
mg1 = RasterModelGrid((number_of_rows, number_of_columns), dxy)
# Set boundary conditions - only the south side of the grid is open.
# Boolean parameters are sent to function in order of
# east, north, west, south.
mg1.set_closed_boundaries_at_grid_edges(True, True, True, False)
# -
# Here we make the initial grid of elevation of zeros with a very small amount of noise to make a more pleasing network.
# +
# Code Block 3
np.random.seed(35) # seed set so our figures are reproducible
mg1_noise = (np.random.rand(mg1.number_of_nodes) / 1000.0
) # intial noise on elevation gri
# set up the elevation on the grid
z1 = mg1.add_zeros("topographic__elevation", at="node")
z1 += mg1_noise
# -
# Set parameters related to time.
# +
# Code Block 4
tmax = 5e5 # time for the model to run [yr] (Original value was 5E5 yr)
dt = 1000 # time step [yr] (Original value was 100 yr)
total_time = 0 # amount of time the landscape has evolved [yr]
# total_time will increase as you keep running the code.
t = np.arange(0, tmax, dt) # each of the time steps that the code will run
# -
# Set parameters for incision and intializing all of the process components that do the work. We also initialize tools for quantifying the landscape.
# +
# Code Block 5
# Original K_sp value is 1e-5
K_sp = 1.0e-5 # units vary depending on m_sp and n_sp
m_sp = 0.5 # exponent on drainage area in stream power equation
n_sp = 1.0 # exponent on slope in stream power equation
frr = FlowAccumulator(mg1, flow_director='FlowDirectorD8') # intializing flow routing
spr = StreamPowerEroder(mg1, K_sp=K_sp, m_sp=m_sp, n_sp=n_sp,
threshold_sp=0.0) # initializing stream power incision
theta = m_sp / n_sp
# initialize the component that will calculate channel steepness
sf = SteepnessFinder(mg1, reference_concavity=theta, min_drainage_area=1000.0)
# initialize the component that will calculate the chi index
cf = ChiFinder(mg1,
min_drainage_area=1000.0,
reference_concavity=theta,
use_true_dx=True)
# -
# Initialize rock uplift rate. This will need to be changed later.
# +
# Code Block 6
# uplift_rate [m/yr] (Original value is 0.0001 m/yr)
uplift_rate = np.ones(mg1.number_of_nodes) * 0.0001
# -
# Now for the code loop.
#
# Note that you can rerun Code Block 7 many times, and as long as you don't reset the elevation field (Code Block 3), it will take the already evolved landscape and evolve it even more. If you want to change parameters in other code blocks (e.g. Code Block 5 or 6), you can do that too, and as long as you don't reset the elevation field (Code Block 3) the new parameters will apply on the already evolved topography.
# +
# Code Block 7
for ti in t:
z1[mg1.
core_nodes] += uplift_rate[mg1.core_nodes] * dt # uplift the landscape
frr.run_one_step() # route flow
spr.run_one_step(dt) # fluvial incision
total_time += dt # update time keeper
print(total_time)
# -
# Plot the topography.
# +
# Code Block 8
imshow_grid(mg1,
"topographic__elevation",
grid_units=("m", "m"),
var_name="Elevation (m)")
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
plt.title(title_text)
max_elev = np.max(z1)
print("Maximum elevation is ", np.max(z1))
# -
# Plot the slope and area data at each point on the landscape (in log-log space). We will only plot the core nodes because the boundary nodes have slopes that are influenced by the boundary conditions.
# +
# Code Block 9
plt.loglog(
mg1.at_node["drainage_area"][mg1.core_nodes],
mg1.at_node["topographic__steepest_slope"][mg1.core_nodes],
"b.",
)
plt.ylabel("Topographic slope")
plt.xlabel("Drainage area (m^2)")
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
plt.title(title_text)
# -
# It is slightly easier to interpret slope-area data when we look at a single channel, rather than the entire landscape. Below we plot the profile and slope-area data for the three largest channels on the landscape.
# +
# Code Block 10
# profile the largest channels, set initially to find the mainstem channel in the three biggest watersheds
# you can change the number of watersheds, or choose to plot all the channel segments in the watershed that
# have drainage area below the threshold (here we have set the threshold to the area of a grid cell).
prf = ChannelProfiler(mg1,
number_of_watersheds=3,
main_channel_only=True,
minimum_channel_threshold=dxy**2)
prf.run_one_step()
# plot the elevation as a function of distance upstream
plt.figure(1)
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
prf.plot_profiles(xlabel='distance upstream (m)',
ylabel='elevation (m)',
title=title_text)
# plot the location of the channels in map view
plt.figure(2)
prf.plot_profiles_in_map_view()
# slope-area data in just the profiled channels
plt.figure(3)
for i, outlet_id in enumerate(prf.data_structure):
for j, segment_id in enumerate(prf.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = '_nolegend_'
segment = prf.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.loglog(
mg1.at_node["drainage_area"][profile_ids],
mg1.at_node["topographic__steepest_slope"][profile_ids],
'.',
color=color,
label=label,
)
plt.legend(loc="lower left")
plt.xlabel("drainage area (m^2)")
plt.ylabel("channel slope [m/m]")
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m"
plt.title(title_text)
# -
# The chi index is a useful way to quantitatively interpret fluvial channels. Below we plot the chi index in the three largest channels and also a chi map across the entire landscape.
# +
# Code Block 11
# calculate the chi index
cf.calculate_chi()
# chi-elevation plots in the profiled channels
plt.figure(4)
for i, outlet_id in enumerate(prf.data_structure):
for j, segment_id in enumerate(prf.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = '_nolegend_'
segment = prf.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.plot(
mg1.at_node["channel__chi_index"][profile_ids],
mg1.at_node["topographic__elevation"][profile_ids],
color=color,
label=label,
)
plt.xlabel("chi index (m)")
plt.ylabel("elevation (m)")
plt.legend(loc="lower right")
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}"
plt.title(title_text)
# chi map
plt.figure(5)
imshow_grid(
mg1,
"channel__chi_index",
grid_units=("m", "m"),
var_name="Chi index (m)",
cmap="jet",
)
title_text = f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}"
plt.title(title_text)
# -
# The channel steepness index is another useful index to quantify fluvial channels. Below we plot the steepness index in the same three largest channels, and also plot steepness index across the grid.
# +
# Code Block 12
# calculate channel steepness
sf.calculate_steepnesses()
# plots of steepnes vs. distance upstream in the profiled channels
plt.figure(6)
for i, outlet_id in enumerate(prf.data_structure):
for j, segment_id in enumerate(prf.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = '_nolegend_'
segment = prf.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
distance_upstream = segment["distances"]
color = segment["color"]
plt.plot(
distance_upstream,
mg1.at_node["channel__steepness_index"][profile_ids],
'x',
color=color,
label=label,
)
plt.xlabel("distance upstream (m)")
plt.ylabel("steepness index")
plt.legend(loc="upper left")
plt.title(
f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}"
)
# channel steepness map
plt.figure(7)
imshow_grid(
mg1,
"channel__steepness_index",
grid_units=("m", "m"),
var_name="Steepness index ",
cmap="jet",
)
title_text = ("$K_{sp}$=" + str(K_sp) + "; $time$=" + str(total_time) +
"yr; $dx$=" + str(dxy) + "m" + "; concavity=" + str(theta))
plt.title(
f"$K_{{sp}}$={K_sp}; $time$={total_time} yr; $dx$={dxy} m; concavity={theta}"
)
# -
# If you have a grid that you want to export, uncomment and edit the appropriate lines below and run the code block.
# +
# Code Block 13
## Below has the name of the file that data will be written to.
## You need to change the name of the file every time that you want
## to write data, otherwise you will get an error.
## This will write to the directory that you are running the code in.
# write_file_name = 'data_file.txt'
## Below is writing elevation data in the ESRI ascii format so that it can
## easily be read into Arc GIS or back into Landlab.
# write_esri_ascii(write_file_name, mg1, 'topographic__elevation')
# -
# After running every code block once, has the landscape reached steady state? Answer: NO! How do you know? After you think about this, you are ready to complete this project.
#
# Answer the following questions using the code above and below. All answers should be typed, and supporting figures (produced using the code) should be embedded in one document that you hand in. Code Blocks 8-12 and 18-21 produce different figures that you may find useful. You can use any or all of these different figures to help you with the questions below. (Download or screenshoot the figures.)
#
# Anything with a question mark should be answered in the document that you hand in. Make sure your write in full sentences and proofread the document that you hand in.
#
# 1. **Steady state with low uplift rate. ** Using the parameters provided in the initial notebook, run the landscape to steady state. (Note that you can keep running the main evolution loop - Code Block 7 - and the different plotting blocks without running the code blocks above them. You may also want to change $tmax$ in Code Block 4.) How did you know that the landscape reached steady state? Note the approximate time that it took to reach steady state for your own reference. (This will be usefull for later questions.) Include appropriate plots. (If you want to analyze these landscapes outside of Landlab or save for later, make sure you save the elevation data to a text file (Code Block 13).)
#
# ** NOTE, For the rest of the questions you should use Code Blocks 14 - 21. These will allow you to use the steady-state landscape created for question 1 - referred to here as the 'base landscape' - as the initial condition. Start by editing what you need to in Code Blocks 14 - 16. Run these each once, sequentially. You can run Code Block 17, the time loop, as many times as you need to, along with Code Blocks 18-21, which produce plots.**
#
# 2. **Transient landscape responding to an increase in rock uplift. ** Use the base landscape and increase rock uplift uniformly by a factor of 4 to 0.0004 m/yr. Make sure you update the rock uplift rate (Code Block 16) and ensure that $tmax$ is 1e5 yrs and $dt$ is 500 yrs (Code Block 15). Run this until the maximum elevation in the grid is ~ 170 m and observe how the landscape gets to this elevation, i.e. plot intermediate steps. What patterns do you see in the supporting plots that illustrate this type of transient? Which patterns, if any, are diagnostic of a landscape response to uniform increase in rock uplift rate? (You may need to answer this after completing all of the questions.)
#
# 3. ** Steady-state landscape with increased rock uplift. ** Now run the landscape from question 2 until it reaches steady state. (I.e. run the time loop, Code Block 17, a bunch of times. You can increase $tmax$ and $dt$ to make this run faster.) Provide a plot that illustrates that the landscape is in steady state. What aspects of the landscape have changed in comparison with the base landscape from question 1?
#
# 4. ** Increase erodibility. ** Start again from the base landscape, but this time increase $K_{sp}$ to 2E-5 (Code Block 14). Make sure rock uplift rate is set to the original value of 0.0001 m/yr (Code Block 16). Set $tmax$ to 1e5 yrs (Code Block 15). Run for 1e5 yrs and save the plots that you think are diagnostic. Run for another 1e5 yrs and save plots again. Now run for 5e5 yrs and save plots again. Quantitatively describe how the landscape evolves in response to the increase in erodibility and provide supporting plots. What could cause a uniform increase in erodibility?
#
# 5. ** Spatially varible uplift - discrete, massive earthquake. ** Start again from the base landscape, and make sure that $K_{sp}$ = 1E-5 (Code Block 14). Now add a seismic event to this steady state landscape - a fault that runs horizontally across the landscape at y = 4000 m, and instantaneously uplifts half the landscape by 10 meters (Code Block 16). In this case, we will keep background uplift uniform at 0.0001 m/yr. Set $tmax$ to 1e5 yrs and $dt$ to 500 yrs (Code Block 15) before evolving the landscape after the fault. Now run the time loop four times and look at the different plots after each loop. How does the landscape respond to this fault? What patterns do you see in the supporting plots that illustrate this type of transient? Which patterns, if any, are diagnostic of a channel response to an earthquake? (You may need to answer this after completing all of the questions.)
#
# 6. ** Spatially Varible Rock Uplift - discrete fault with two different uplift rates. ** Start again from the base landscape, and make sure that $K_{sp}$ = 1E-5 (Code Block 14). Now we will add a fault (at y = 4000 m) to this landscape. In this case the uplift rate on the footwall is higher (0.0004 m/yr) than on the hanging wall (uplift rate = 0.0001 m/yr). (Edit Code Block 16.) Set $tmax$ to 1e5 yrs and $dt$ to 500 yrs (Code Block 15). Now run the time loop four separate times and look at the different plots after each loop. How does the landscape respond to this fault? What patterns do you see in the supporting plots that illustrate this type of transient? Which patterns, if any, are diagnostic of a channel response to a this type of gradient in rock uplift rates? (You may need to answer this after completing all of the questions.)
#
# 7. ** Spatially Varible Rock Uplift - gradient in uplift across the range. ** Start again from the base landscape, and make sure that $K_{sp}$ = 1E-5 (Code Block 14). Now we will add a linear gradient in uplift rate across the entire range (edit Code Block 16). The maximum uplift rate will be 0.0004 m/yr at the core of the range, and 0.0001 m/yr at the front of the range. Set $tmax$ to 1e5 yrs (Code Block 4) and $dt$ to 500 yrs before you start running the time loop for the fault before you start running the time loop with the rock uplift gradient. Now run the time loop four separate times and look at the different plots after each loop. How does the landscape respond to this gradient in uplift rate? What patterns do you see in the supporting plots that illustrate this type of transient? Which patterns, if any, are diagnostic of a channel response to this type of gradient in rock uplift rates? (You may need to answer this after completing all of the questions.)
#
# 8. ** Final Reflection. ** Was your initial insight into how parameters would affect the landscape correct? Discuss in 6 sentences or less.
# +
# Code Block 14
number_of_rows = 50 # number of raster cells in vertical direction (y)
number_of_columns = 100 # number of raster cells in horizontal direction (x)
dxy2 = 200 # side length of a raster model cell, or resolution [m]
# Below is a raster (square cells) grid, with equal width and height
mg2 = RasterModelGrid((number_of_rows, number_of_columns), dxy2)
# Set boundary conditions - only the south side of the grid is open.
# Boolean parameters are sent to function in order of
# east, north, west, south.
mg2.set_closed_boundaries_at_grid_edges(True, True, True, False)
z2 = copy.copy(z1) # initialize the elevations with the steady state
# topography produced for question 1
z2 = mg2.add_field("topographic__elevation", z2, at="node")
# K_sp value for base landscape is 1e-5
K_sp2 = 1e-5 # units vary depending on m_sp and n_sp
m_sp2 = 0.5 # exponent on drainage area in stream power equation
n_sp2 = 1.0 # exponent on slope in stream power equation
frr2 = FlowAccumulator(mg2, flow_director='FlowDirectorD8') # intializing flow routing
spr2 = StreamPowerEroder(
mg2, K_sp=K_sp2, m_sp=m_sp2, n_sp=n_sp2,
threshold_sp=0.0) # initializing stream power incision
theta2 = m_sp2 / n_sp2
# initialize the component that will calculate channel steepness
sf2 = SteepnessFinder(mg2,
reference_concavity=theta2,
min_drainage_area=1000.0)
# initialize the component that will calculate the chi index
cf2 = ChiFinder(mg2,
min_drainage_area=1000.0,
reference_concavity=theta2,
use_true_dx=True)
# +
# Code Block 15
tmax = 1e5 # time for the model to run [yr] (Original value was 5E5 yr)
dt = 500 # time step [yr] (Original value was 500 yr)
total_time = 0 # amount of time the landscape has evolved [yr]
# total_time will increase as you keep running the code.
t = np.arange(0, tmax, dt) # each of the time steps that the code will run
# +
# Code Block 16
# uplift_rate [m/yr] (value was 0.0001 m/yr for base landscape)
uplift_rate = np.ones(mg2.number_of_nodes) * 0.0001
## If you want to add a one-time event that uplifts only part of the
## landscape, uncomment the 3 lines below
# fault_location = 4000 # [m]
# uplift_amount = 10 # [m]
# z2[np.nonzero(mg2.node_y>fault_location)] += uplift_amount
## IMPORTANT! To use the below fault generator, comment the one-time
## uplift event above if it isn't already commented out.
## Code below creates a fault horizontally across the grid.
## Uplift rates are greater where y values > fault location.
## To use, uncomment the 5 code lines below and edit to your values
# fault_location = 4000 # [m]
# low_uplift_rate = 0.0001 # [m/yr]
# high_uplift_rate = 0.0004 # [m/yr]
# uplift_rate[np.nonzero(mg2.node_y<fault_location)] = low_uplift_rate
# uplift_rate[np.nonzero(mg2.node_y>fault_location)] = high_uplift_rate
## IMPORTANT! To use below rock uplift gradient, comment the two
## uplift options above if they aren't already commented out.
## If you want a linear gradient in uplift rate
## (increasing uplift into the range),
## uncomment the 4 code lines below and edit to your values.
# low_uplift_rate = 0.0001 # [m/yr]
# high_uplift_rate = 0.0004 # [m/yr]
## below is uplift gradient per node row index, NOT row value in meters
# uplift_rate_gradient = (high_uplift_rate - low_uplift_rate)/(number_of_rows-3)
# uplift_rate = low_uplift_rate + ((mg2.node_y / dxy)-1) * uplift_rate_gradient
# +
# Code Block 17
for ti in t:
z2[mg2.core_nodes] += uplift_rate[mg2.core_nodes] * dt # uplift the landscape
frr2.run_one_step() # route flow
spr2.run_one_step(dt) # fluvial incision
total_time += dt # update time keeper
print(total_time)
# +
# Code Block 18
# Plot topography
plt.figure(8)
imshow_grid(mg2,
"topographic__elevation",
grid_units=("m", "m"),
var_name="Elevation (m)")
plt.title(f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m")
max_elev = np.max(z2)
print("Maximum elevation is ", np.max(z2))
# +
# Code Block 19
# Plot Channel Profiles and slope-area data along the channels
prf2 = ChannelProfiler(mg2,
number_of_watersheds=3,
main_channel_only=True,
minimum_channel_threshold=dxy**2)
prf2.run_one_step()
# plot the elevation as a function of distance upstream
plt.figure(9)
title_text = f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy} m"
prf2.plot_profiles(xlabel='distance upstream (m)',
ylabel='elevation (m)',
title=title_text)
# plot the location of the channels in map view
plt.figure(10)
prf2.plot_profiles_in_map_view()
# slope-area data in just the profiled channels
plt.figure(11)
for i, outlet_id in enumerate(prf2.data_structure):
for j, segment_id in enumerate(prf2.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = '_nolegend_'
segment = prf2.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.loglog(
mg2.at_node["drainage_area"][profile_ids],
mg2.at_node["topographic__steepest_slope"][profile_ids],
'.',
color=color,
label=label,
)
plt.legend(loc="lower left")
plt.xlabel("drainage area (m^2)")
plt.ylabel("channel slope [m/m]")
title_text = f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m"
plt.title(title_text)
# +
# Code Block 20
# Chi Plots
# calculate the chi index
cf2.calculate_chi()
# chi-elevation plots in the profiled channels
plt.figure(12)
for i, outlet_id in enumerate(prf2.data_structure):
for j, segment_id in enumerate(prf2.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = '_nolegend_'
segment = prf2.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
color = segment["color"]
plt.plot(
mg2.at_node["channel__chi_index"][profile_ids],
mg2.at_node["topographic__elevation"][profile_ids],
color=color,
label=label,
)
plt.xlabel("chi index (m)")
plt.ylabel("elevation (m)")
plt.legend(loc="lower right")
title_text = f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
plt.title(title_text)
# chi map
plt.figure(13)
imshow_grid(
mg2,
"channel__chi_index",
grid_units=("m", "m"),
var_name="Chi index (m)",
cmap="jet",
)
plt.title(
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
# +
# Code Block 21
# Plot channel steepness along profiles and across the landscape
# calculate channel steepness
sf2.calculate_steepnesses()
# plots of steepnes vs. distance upstream in the profiled channels
plt.figure(14)
for i, outlet_id in enumerate(prf2.data_structure):
for j, segment_id in enumerate(prf2.data_structure[outlet_id]):
if j == 0:
label = "channel {i}".format(i=i + 1)
else:
label = '_nolegend_'
segment = prf2.data_structure[outlet_id][segment_id]
profile_ids = segment["ids"]
distance_upstream = segment["distances"]
color = segment["color"]
plt.plot(
distance_upstream,
mg2.at_node["channel__steepness_index"][profile_ids],
'x',
color=color,
label=label,
)
plt.xlabel("distance upstream (m)")
plt.ylabel("steepness index")
plt.legend(loc="upper left")
plt.title(
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
# channel steepness map
plt.figure(15)
imshow_grid(
mg2,
"channel__steepness_index",
grid_units=("m", "m"),
var_name="Steepness index ",
cmap="jet",
)
plt.title(
f"$K_{{sp}}$={K_sp2}; $time$={total_time} yr; $dx$={dxy2} m; concavity={theta2}"
)
| notebooks/teaching/geomorphology_exercises/channels_streampower_notebooks/stream_power_channels_class_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Chapter 2 Recursive state estimation
#
# ## Ex2.1
p = [0.01, 0.99]
eta = 1
for i in range(1 , 11):
p = [p[0]*1, p[1]*1/3]
eta = 1/(p[0] + p[1])
p = [p[0]*eta, p[1]*eta]
print(i, p[0], p[1], eta)
| ch02_recursive_state_estimation/ch02_recursive_state_estimation.ipynb |