code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''venv'': venv)'
# name: python3
# ---
# # Check Axel's calculations.
# # Purpose
# * Axel has made a small script to compare AIS and GPS data.
# * This script will be examined and understood in this notebook.
# ## Results
# * the total sailed distance differs about 6% between SSPA AIS and GPS
# * This is however disregarding missing data and that the SSPA AIS has some data reduction.
# * If the 0-1 kts speeds are disregarded in the comparison, the difference is 2%.
# # Setup
# +
# # %load imports.py
from typing import no_type_check
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pyaisdb.database import DB
# -
db = DB()
df_speed_distances = pd.DataFrame(dtype='float64')
s_gps = pd.Series(dtype='float64')
s_ais = pd.Series(dtype='float64')
# ## GPS data
for i in range(30):
sql = f"""with blue_data as (SELECT time_info, lag(time_info, 1) OVER (ORDER by time_info ASC) as next_time, sog,
ST_Distance(pos::geography, lag(pos::geography, 1) OVER (ORDER by time_info ASC)) as dist
FROM projects._49145341_d2e2f_blue_data_varmdo
where time_info < '2020-07-19 23:59:59+02'
and time_info > '2020-07-10 00:00:00+02'
)
select sum(dist)/1852
from blue_data
where sog >= {i} and sog < {i + 1}"""
# print(sql)
distance = db.execute_and_return(sql)[0][0]
if distance:
#print(f'{i + 0.5} {round(result, 2)}')
speed = i + 0.5
s_gps[speed] = distance
# ## AIS data
for i in range(30):
sql = f"""select sum(st_length(segment::geography)) / 1852
from segments_sjfv_2020
where sog>={i} and sog < {i+1}
and mmsi=265520390
and date2 < '2020-07-19 23:59:59+02'
and date1 > '2020-07-10 00:00:00+02' """
distance = db.execute_and_return(sql)[0][0]
if distance:
#print(f'{i + 0.5} {round(result, 2)}')
speed = i + 0.5
s_ais[speed] = distance
df_speed_distances['GPS'] = s_gps
df_speed_distances['AIS'] = s_ais
df_speed_distances.index.name='speed'
df_speed_distances.head()
df_speed_distances.describe()
df_speed_distances.sum()
df_speed_distances.sum().pct_change()
fig,ax=plt.subplots()
fig.set_size_inches(17,7)
df_speed_distances.plot(style='.-',ax=ax, kind='bar');
ax.grid(True)
ax.set_ylabel('Distance [NM]')
ax.set_xlabel('Ship speed [kts]')
df_speed_time = df_speed_distances.div(df_speed_distances.index, axis=0)
fig,ax=plt.subplots()
fig.set_size_inches(17,7)
df_speed_time.plot(style='.-',ax=ax, kind='bar');
ax.grid(True)
ax.set_ylabel('Time [s]')
ax.set_xlabel('Ship speed [kts]')
df_speed_distances.iloc[1:].sum().pct_change()
| notebooks/01.01_check_axels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Run to Failure degradation simulation of NASA Turbo Jet Engine Fans
# # <a id='index'>Index</a>
# + <a href='#data_analysis'>1. Data Analysis</a>
# + <a href='#info_about_data'>1.1 Info about data:</a>
# + <a href='#noise_removal'>2. Noise removal and Normalization</a>
# + <a href='#training_and_validation'>3. Training LSTM Model to predict RUL</a>
# + <a href='#testing_var'>4. Testing VAR</a>
# + <a href='#health_score'>5 Health Score Assignment</a>
# + <a href='#pred_analysis'>6. Analysing Prediction</a>
# +
import numpy as np
import pandas as pd
import seaborn as sns
import math
import matplotlib.pyplot as plt
from matplotlib import cm
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.vector_ar.var_model import VAR
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import QuantileTransformer , PowerTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from keras import optimizers
from keras.models import Sequential
from keras.layers import TimeDistributed, Flatten
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from sklearn.metrics import mean_squared_error
import keras
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
cmap = cm.get_cmap('Spectral') # Colour map (there are many others)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from keras.models import load_model
# -
# # <a id='data_analysis'>1. Data analysis</a>
# <a href='#index'>Go back to Index</a>
# +
train_file = "train_FD001.txt"
test_file = "test_FD001.txt"
RUL_file = "RUL_FD001.txt"
df = pd.read_csv(train_file,sep=" ",header=None)
df.head()
# -
#columns = ['unit_number','time_in_cycles','setting_1','setting_2','TRA','T2','T24','T30','T50','P2','P15','P30','Nf',
# 'Nc','epr','Ps3 0','phi','NRf','NRc','BPR','farB','htBleed','Nf_dmd','PCNfR_dmd','W31','W32' ]
#delete NaN values
df.drop(columns=[26,27],inplace=True)
columns = ["Section-{}".format(i) for i in range(26)]
df.columns = columns
df.head()
# #### Dataset statistics for each parameter
df.describe()
# ## <a id='info_about_data'>1.1 Info about data:</a>
# - Section-0 is MachineID
# - Section-1 is time in, Cycles
# - Section-2...4 is Opertional Settings
# - Section-5...25 is sensor's data
#
#
# - Data Set: FD001
# - Train trjectories: 100
# - Test trajectories: 100
# - Conditions: ONE (Sea Level)
# - Fault Modes: ONE (HPC Degradation)
# +
# Names
MachineID_name = ["Section-0"]
RUL_name = ["Section-1"]
OS_name = ["Section-{}".format(i) for i in range(2,5)]
Sensor_name = ["Section-{}".format(i) for i in range(5,26)]
# Data in pandas DataFrame
MachineID_data = df[MachineID_name]
RUL_data = df[RUL_name]
OS_data = df[OS_name]
Sensor_data = df[Sensor_name]
# Data in pandas Series
MachineID_series = df["Section-0"]
RUL_series = df["Section-1"]
# -
grp = RUL_data.groupby(MachineID_series)
max_cycles = np.array([max(grp.get_group(i)["Section-1"]) for i in MachineID_series.unique()])
print("Max Life >> ",max(max_cycles))
print("Mean Life >> ",np.mean(max_cycles))
print("Min Life >> ",min(max_cycles))
for i in range(26):
print(str(i))
print(df['Section-'+str(i)])
# From the above vizulization its clear that
# - Section-4 (Oprational Setting-3)
# - Section-5 (Sensor-1)
# - Section-9 (Sensor-5)
# - Section-10 (Sensor-6)
# - Section-14 (Sensor-10)
# - Section-20 (Sensor-16)
# - Section-22 (Sensor-18)
# - Section-23 (Sensor-19)
#
# Does not play a vital role in variation of data and there std is also almost 0 so, these sensor data is useless for us hence, we can drop this coloumn data
#delete columns with constant values that do not carry information about the state of the unit
#data = pd.concat([RUL_data,OS_data,Sensor_data], axis=1)
df.drop(columns=["Section-0",
"Section-4", # Operatinal Setting
"Section-5", # Sensor data
"Section-9", # Sensor data
"Section-10", # Sensor data
"Section-14",# Sensor data
"Section-20",# Sensor data
"Section-22",# Sensor data
"Section-23"] , inplace=True)
df.head()
# # <a id='noise_removal'>2. Noise removal and Normalization</a>
# <a href='#index'>Go back to Index</a>
print(type(df))
gen = MinMaxScaler(feature_range=(0, 1))
df = gen.fit_transform(df)
df = pd.DataFrame(df)
#df = df.rolling(20).mean()
pt = PowerTransformer()
df = pt.fit_transform(df)
# +
# grouping w.r.t MID (Machine ID)
# col_names = df.columns
# def grouping(datafile, mid_series):
# data = [x for x in datafile.groupby(mid_series)]
# return data
# -
df
# # <a id='training_and_validation'>3. Training LSTM Model to predict RUL</a>
# <a href='#index'>Go back to Index</a>
# +
def RUL_df():
rul_lst = [j for i in MachineID_series.unique() for j in np.array(grp.get_group(i)[::-1]["Section-1"])]
rul_col = pd.DataFrame({"rul":rul_lst})
return rul_col
RUL_df().head()
# +
X_train = np.array(df)
y_train = np.array(RUL_df()).reshape(-1,1)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
print(X_train.shape,y_train.shape)
# -
print(max_cycles)
print(sum(max_cycles))
count = 0
for cycle_len in max_cycles:
for i in range(1):
y_train[count+i] = 0
count = count + cycle_len
print(count)
print(y_train)
# +
def create_dataset(X, look_back=1):
data = []
for i in range(len(X)-look_back-1):
data.append(X[i:(i+look_back)])
return np.array(data)
X_train = create_dataset(X_train)
y_train = y_train[2:]
print(X_train.shape,y_train.shape)
print(y_train)
# +
def build_model(layers):
#d = 0.2
model = Sequential()
model.add(LSTM(128, input_shape=(layers[1], layers[0]), return_sequences=True))
#model.add(Dropout(d))
model.add(LSTM(64, input_shape=(layers[1], layers[0]), return_sequences=False))
#model.add(Dropout(d))
model.add(Dense(16,kernel_initializer='uniform',activation='relu'))
model.add(Dense(1,kernel_initializer='uniform',activation='relu'))
model.compile(loss='mean_squared_error',optimizer='adam')
return model
model = build_model([17,1])
print(model.summary())
# +
history = model.fit(
X_train,
y_train,
batch_size=512,
epochs=75,
validation_split=0.15,
verbose=1)
print(history.history.keys())
# +
# model.save('LSTM_with_lookback_1.h5')
# -
# Loss Graph
plt.plot(history.epoch, history.history['loss'] , label = "loss")
plt.plot(history.epoch, history.history['val_loss'] , label = "val_loss")
plt.legend()
plt.show()
y_train_pred = model.predict(X_train)
print("mean_squared_error >> ", mean_squared_error(y_train,y_train_pred))
print("root_mean_squared_error >> ", math.sqrt(mean_squared_error(y_train,y_train_pred)))
print("mean_absolute_error >>",mean_absolute_error(y_train,y_train_pred))
# # LSTM (Lookback = 1) without VAR
# +
import numpy as np
import pandas as pd
import seaborn as sns
import math
import matplotlib.pyplot as plt
from matplotlib import cm
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.vector_ar.var_model import VAR
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import QuantileTransformer , PowerTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from keras import optimizers
from keras.models import Sequential
from keras.layers import TimeDistributed, Flatten
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
cmap = cm.get_cmap('Spectral') # Colour map (there are many others)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from keras.models import load_model
import pickle
model = load_model('LSTM_with_lookback_1.h5')
train_file = "train_FD001.txt"
test_file = "test_FD001.txt"
RUL_file = "RUL_FD001.txt"
columns = ["Section-{}".format(i) for i in range(26)]
df_test = pd.read_csv(test_file, sep=" ",header=None)
df_test.drop(columns=[26,27],inplace=True)
df_test.columns = columns
df_test.head()
df_rul = pd.read_csv(RUL_file, names=['rul'])
df_rul.head()
RUL_name = ["Section-1"]
RUL_data = df_test[RUL_name]
MachineID_series = df_test["Section-0"]
grp = RUL_data.groupby(MachineID_series)
max_cycles = np.array([max(grp.get_group(i)["Section-1"]) for i in MachineID_series.unique()])
max_cycles[0] = max_cycles[0] - 2
df_test.drop(df_test[["Section-0",
"Section-4", # Operatinal Setting
"Section-5", # Sensor data
"Section-9", # Sensor data
"Section-10", # Sensor data
"Section-14",# Sensor data
"Section-20",# Sensor data
"Section-22",# Sensor data
"Section-23"]], axis=1 , inplace=True)
#df_test = df_test.groupby(["Section-0"])
#print(df_test)
gen = MinMaxScaler(feature_range=(0, 1))
df_test = gen.fit_transform(df_test)
df_test = pd.DataFrame(df_test)
#df_test = df_test.rolling(20).mean()
pt = PowerTransformer()
df_test = pt.fit_transform(df_test)
df_test=np.nan_to_num(df_test)
X_test = np.array(df_test)
y_test = np.array(df_rul)
def create_dataset(X, look_back=1):
data = []
for i in range(len(X)-look_back-1):
data.append(X[i:(i+look_back)])
return np.array(data)
X_test = create_dataset(X_test)
#y_test = y_test[6:]
print(X_test.shape,y_test.shape)
pred = model.predict(X_test)
pred.shape
def scoring_function(actual,predicted):
d = []
for i in range(len(predicted)):
d.append((predicted[i] - actual[i]))
scores = []
for i in range(len(d)):
if d[i] >= 0:
scores.append(math.exp(d[i]/10) - 1)
else :
scores.append(math.exp((-1*d[i])/13) - 1)
return sum(scores)
final_pred = []
count = 0
for i in range(100):
j = max_cycles[i]
temp = pred[count+j-1]
count=count+j
final_pred.append(int(temp))
print(final_pred)
fig = plt.figure(figsize=(18,10))
plt.plot(final_pred,color='red', label='prediction')
plt.plot(y_test,color='blue', label='y_test')
fig.suptitle('RUL Prediction using LSTM with lookack=1', fontsize=35)
plt.xlabel("Engine Number", fontsize=35)
plt.ylabel("Remaining Useful Life", fontsize=35)
plt.legend(loc='upper left')
plt.grid()
plt.show()
print("mean_squared_error >> ", mean_squared_error(y_test,final_pred))
print("root_mean_squared_error >> ", math.sqrt(mean_squared_error(y_test,final_pred)))
print("mean_absolute_error >>",mean_absolute_error(y_test,final_pred))
print("scoring function >>",scoring_function(y_test,final_pred))
# +
df=pd.DataFrame(np.arange(1,101))
df['Actual']=y_test
df['Predicted']=final_pred
df=df.drop([0],axis=1)
sns.set_theme(style="whitegrid")
a4_dims = (18,10)
fig, ax = plt.subplots(figsize=a4_dims)
sns.lineplot(data = df,markers=True, dashes=False)
fig.suptitle('RUL Prediction using LSTM with lookack=1', fontsize=35)
plt.xlabel("Engine Number", fontsize=35)
plt.ylabel("Remaining Useful Life", fontsize=35)
# -
# +
# For training results
train_file = "train_FD001.txt"
test_file = "test_FD001.txt"
RUL_file = "RUL_FD001.txt"
df = pd.read_csv(train_file,sep=" ",header=None)
#columns = ['unit_number','time_in_cycles','setting_1','setting_2','TRA','T2','T24','T30','T50','P2','P15','P30','Nf',
# 'Nc','epr','Ps3 0','phi','NRf','NRc','BPR','farB','htBleed','Nf_dmd','PCNfR_dmd','W31','W32' ]
#delete NaN values
df.drop(columns=[26,27],inplace=True)
columns = ["Section-{}".format(i) for i in range(26)]
df.columns = columns
# Names
MachineID_name = ["Section-0"]
RUL_name = ["Section-1"]
OS_name = ["Section-{}".format(i) for i in range(2,5)]
Sensor_name = ["Section-{}".format(i) for i in range(5,26)]
# Data in pandas DataFrame
MachineID_data = df[MachineID_name]
RUL_data = df[RUL_name]
OS_data = df[OS_name]
Sensor_data = df[Sensor_name]
# Data in pandas Series
MachineID_series = df["Section-0"]
RUL_series = df["Section-1"]
grp = RUL_data.groupby(MachineID_series)
max_cycles = np.array([max(grp.get_group(i)["Section-1"]) for i in MachineID_series.unique()])
print("Max Life >> ",max(max_cycles))
print("Mean Life >> ",np.mean(max_cycles))
print("Min Life >> ",min(max_cycles))
#delete columns with constant values that do not carry information about the state of the unit
#data = pd.concat([RUL_data,OS_data,Sensor_data], axis=1)
df.drop(columns=["Section-0",
"Section-4", # Operatinal Setting
"Section-5", # Sensor data
"Section-9", # Sensor data
"Section-10", # Sensor data
"Section-14",# Sensor data
"Section-20",# Sensor data
"Section-22",# Sensor data
"Section-23"] , inplace=True)
gen = MinMaxScaler(feature_range=(0, 1))
df = gen.fit_transform(df)
df = pd.DataFrame(df)
#df = df.rolling(20).mean()
pt = PowerTransformer()
df = pt.fit_transform(df)
df=np.nan_to_num(df)
def RUL_df():
rul_lst = [j for i in MachineID_series.unique() for j in np.array(grp.get_group(i)[::-1]["Section-1"])]
rul_col = pd.DataFrame({"rul":rul_lst})
return rul_col
RUL_df().head()
X_train = np.array(df)
y_train = np.array(RUL_df()).reshape(-1,1)
count = 0
for cycle_len in max_cycles:
for i in range(6):
y_train[count+i] = 0
count = count + cycle_len
X_train = create_dataset(X_train)
y_train = y_train[2:]
y_train_pred = model.predict(X_train)
print("mean_squared_error >> ", mean_squared_error(y_train,y_train_pred))
print("root_mean_squared_error >> ", math.sqrt(mean_squared_error(y_train,y_train_pred)))
print("mean_absolute_error >>",mean_absolute_error(y_train,y_train_pred))
# -
| RUL Prediction using LSTM Lookback=1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ART
# ### Version : 5.4
from art import *
# ## Font Counter
FONT_COUNTER
# ⚠️ Some fonts don't support all characters
# ⚠️ From `Version 3.3` Non-ASCII fonts added (These fonts are not compatible with some environments)
# ## Font List (ASCII)
font_list("TEST123","ascii")
# ## Font List (Non-ASCII)
font_list("TEST123","non-ascii")
| FontList.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import Image
from IPython.core.display import HTML
from sympy import *; x,h,y,t = symbols("x h y t")
Image(url= "https://i.imgur.com/O5TZ9In.png")
# source https://nathancarter.github.io/how2data/site/how-to-find-the-critical-numbers-of-a-function-in-python-using-sympy/
f = 2*x**3 - 30*x**2 + 96*x -11
d = diff(f)
solve(Eq(d,0))
Image(url= "https://i.imgur.com/qO3a74O.png")
| Calculus_Homework/WWB13.6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + papermill={"duration": 0.163013, "end_time": "2021-01-18T10:42:10.750553", "exception": false, "start_time": "2021-01-18T10:42:10.587540", "status": "completed"} tags=[]
from IPython.display import YouTubeVideo
YouTubeVideo("yCYWCRbbtaM")
# + papermill={"duration": 0.020152, "end_time": "2021-01-18T10:42:10.784373", "exception": false, "start_time": "2021-01-18T10:42:10.764221", "status": "completed"} tags=[]
import numpy as np
import matplotlib.pyplot as plt
# + papermill={"duration": 0.021101, "end_time": "2021-01-18T10:42:10.818079", "exception": false, "start_time": "2021-01-18T10:42:10.796978", "status": "completed"} tags=[]
plt.rcParams.update({'font.size':14})
# + papermill={"duration": 0.024019, "end_time": "2021-01-18T10:42:10.855415", "exception": false, "start_time": "2021-01-18T10:42:10.831396", "status": "completed"} tags=[]
x = np.arange(1,20,0.5)
x
# + papermill={"duration": 0.026567, "end_time": "2021-01-18T10:42:10.895531", "exception": false, "start_time": "2021-01-18T10:42:10.868964", "status": "completed"} tags=[]
y1 = np.random.uniform(-5,5,len(x))
y1 = np.sort(y1)
y1
# + papermill={"duration": 0.025406, "end_time": "2021-01-18T10:42:10.934739", "exception": false, "start_time": "2021-01-18T10:42:10.909333", "status": "completed"} tags=[]
y2 = np.sin(x)
y2
# + papermill={"duration": 0.023665, "end_time": "2021-01-18T10:42:10.974595", "exception": false, "start_time": "2021-01-18T10:42:10.950930", "status": "completed"} tags=[]
y3 = np.exp(x)
y3
# + papermill={"duration": 0.025372, "end_time": "2021-01-18T10:42:11.014847", "exception": false, "start_time": "2021-01-18T10:42:10.989475", "status": "completed"} tags=[]
y4 = y1**2
y4
# + papermill={"duration": 0.917202, "end_time": "2021-01-18T10:42:11.947944", "exception": false, "start_time": "2021-01-18T10:42:11.030742", "status": "completed"} tags=[]
fig,axs = plt.subplots(2,2,sharex=True,figsize = (10,6))
plt.suptitle('Figura 1')
plt.subplots_adjust(hspace = 0.25,wspace = 0.15)
axs[0,0].set_title('y1')
axs[0,0].scatter(x,y1,c = 'tomato',marker = 's')
axs[0,1].set_title('y2')
axs[0,1].scatter(x,y2,c = 'green', marker = 'x')
axs[1,0].set_title('y3')
axs[1,0].scatter(x,y3,c = 'pink', marker = '*')
axs[1,1].set_title('y4')
axs[1,1].scatter(x,y4,c = 'orange')
plt.show()
# + papermill={"duration": 0.017213, "end_time": "2021-01-18T10:42:11.982384", "exception": false, "start_time": "2021-01-18T10:42:11.965171", "status": "completed"} tags=[]
# + papermill={"duration": 0.016998, "end_time": "2021-01-18T10:42:12.016723", "exception": false, "start_time": "2021-01-18T10:42:11.999725", "status": "completed"} tags=[]
# + papermill={"duration": 0.016028, "end_time": "2021-01-18T10:42:12.049378", "exception": false, "start_time": "2021-01-18T10:42:12.033350", "status": "completed"} tags=[]
# + papermill={"duration": 0.016085, "end_time": "2021-01-18T10:42:12.081852", "exception": false, "start_time": "2021-01-18T10:42:12.065767", "status": "completed"} tags=[]
# + papermill={"duration": 0.015941, "end_time": "2021-01-18T10:42:12.114185", "exception": false, "start_time": "2021-01-18T10:42:12.098244", "status": "completed"} tags=[]
# + papermill={"duration": 0.016793, "end_time": "2021-01-18T10:42:12.148366", "exception": false, "start_time": "2021-01-18T10:42:12.131573", "status": "completed"} tags=[]
# + papermill={"duration": 0.016733, "end_time": "2021-01-18T10:42:12.181933", "exception": false, "start_time": "2021-01-18T10:42:12.165200", "status": "completed"} tags=[]
# + papermill={"duration": 0.016163, "end_time": "2021-01-18T10:42:12.214591", "exception": false, "start_time": "2021-01-18T10:42:12.198428", "status": "completed"} tags=[]
| Aula09/subplots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# # Verifying zeros of the Lorenz system
# We begin by defining the vector field $f$ and its derivative $Df$ for the Lorenz equations, which are functions of a variable $x \in \mathbb{R}^3$ and three parameters $\sigma$, $\rho$, and $\beta$ that we denote by $s$, $r$, and $b$ respectively.
f(x, s, r, b) = [s * (x[2] - x[1]), r * x[1] - x[2] - x[1] * x[3], - b * x[3] + x[1] * x[2]]
Df(x, s, r, b) = [-s s 0; r - x[3] -1 -x[1]; x[2] x[1] -b]
# Next we set the parameter values and define the approximate solution $\bar{x}$.
s = 10; r = 28; b = 8/3;
x_bar = [8.4853, 8.4853, 27];
# The first step in applying the radii polynomial theorem is to choose $A$.
A = inv(Df(x_bar, s, r, b))
# Now define the bounds for the radii polynomial theorem.
# Load the LinearAlgebra Library
using LinearAlgebra
Y0 = norm(A * f(x_bar, s, r, b), Inf)
Z0 = norm(I - A * Df(x_bar, s, r, b), Inf)
Z2 = 2 * max(abs(A[1,2]) + abs(A[1,3]), abs(A[2,2]) + abs(A[2,3]), abs(A[3,2]) + abs(A[3,3]))
# The radii polynomial is given by $p(r) = Z_2 r^2 - (1 - Z_0) r + Y_0$.
# Install the Polynomials library
# Just need to do this once
using Pkg
Pkg.add("Polynomials")
using Polynomials
p = Poly([Y0, -(1 - Z0), Z2])
# Finally we find the roots of the radii polynomial.
roots(p)
| JuliaCode/Chapter2/Example_2.3.8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/https-deeplearning-ai/tensorflow-1-public/blob/adding_C3/C3/W4/ungraded_labs/C3_W4_Lab_2_irish_lyrics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="zX4Kg8DUTKWO"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="ylkgHaJ1hyBw"
# **Note:** This notebook can run using TensorFlow 2.5.0
# + id="bjcGo-3lhyBw"
# #!pip install tensorflow==2.5.0
# + id="BOwsuGQQY9OL"
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import numpy as np
# + id="pylt5qZYsWPh"
# irish-lyrics-eof.txt
# !gdown --id 15UqmiIm0xwh9mt0IYq2z3jHaauxQSTQT
# + id="PRnDnCW-Z7qv"
tokenizer = Tokenizer()
data = open('./irish-lyrics-eof.txt').read()
corpus = data.lower().split("\n")
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
print(tokenizer.word_index)
print(total_words)
# + id="soPGVheskaQP"
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# pad sequences
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# create predictors and label
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
# + id="pJtwVB2NbOAP"
print(tokenizer.word_index['in'])
print(tokenizer.word_index['the'])
print(tokenizer.word_index['town'])
print(tokenizer.word_index['of'])
print(tokenizer.word_index['athy'])
print(tokenizer.word_index['one'])
print(tokenizer.word_index['jeremy'])
print(tokenizer.word_index['lanigan'])
# + id="49Cv68JOakwv"
print(xs[6])
# + id="iY-jwvfgbEF8"
print(ys[6])
# + id="wtzlUMYadhKt"
print(xs[5])
print(ys[5])
# + id="H4myRpB1c4Gg"
print(tokenizer.word_index)
# + id="w9vH8Y59ajYL"
model = Sequential()
model.add(Embedding(total_words, 100, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(150)))
model.add(Dense(total_words, activation='softmax'))
adam = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
history = model.fit(xs, ys, epochs=100, verbose=1)
#print model.summary()
print(model)
# + id="3YXGelKThoTT"
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# + id="poeprYK8h-c7"
plot_graphs(history, 'accuracy')
# + id="6Vc6PHgxa6Hm"
seed_text = "I've got a bad feeling about this"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
| C3/W4/ungraded_labs/C3_W4_Lab_2_irish_lyrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !conda install -y -c conda-forge pyarrow python-snappy
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import glob
import matplotlib as mpl
# ## Summit cooling plant data
# +
COOLING_PATH = "/gpfs/alpine/proj-shared/stf218/data/lake/summit_cooling_plant"
cooling_file = glob.glob(COOLING_PATH + "/*.parquet")
# -
cooling_df = pd.read_parquet(cooling_file[0], engine="pyarrow").sort_index()
cooling_df.head(5)
cooling_df.tail(5)
# ## Splitting timestamp for groupings
# +
cooling_df['Year'] = cooling_df.index.year
cooling_df['Month'] = cooling_df.index.month
cooling_df['Week'] = cooling_df.index.week
cooling_df['Day'] = cooling_df.index.day
cooling_df.head(5)
# -
cooling_df.columns
# ## Calculate maxs and mins for Total Power
# +
week_list = sorted(cooling_df["Week"].unique())
maxs = np.zeros(len(week_list))
mins = np.zeros(len(week_list))
for idx, wk in enumerate(week_list):
week_df = cooling_df[cooling_df["Week"] == wk]
maxs[idx] = week_df["k100_total_power"].max()
new_min = week_df["k100_total_power"].min()
if (new_min > 0):
mins[idx] = new_min
# -
# ## Convert from kW to MW
# +
cooling_df["k100_total_power_mw"] = cooling_df["k100_total_power"] / 1000
maxs = maxs / 1000
mins = mins / 1000
# +
plt.style.use("acm_ieee_latex_pubstyle.txt")
# %matplotlib inline
# Initialize style
plt.style.use("acm_ieee_latex_pubstyle.txt")
plt.rcParams["axes.labelweight"] = "bold"
SINGLE_COLUMN = SINGLE_WIDTH = 8.0
DOUBLE_COLUMN = DOUBLE_WIDTH = 16.0
def width_height(width=SINGLE_COLUMN, height=None, columns=1, rows=1):
"""Width driven sizing for seaborn multi-grid layouts
Derive height & aspect kwargs for facetgrid, pairgrid & friends with a target sizing
in terms of width and height. Convenient for creating figures that has
to be fit into a certain width, while maintaining the correct aspect ratios of the fonts.
Call this function to derive the height & aspect
Currently, ACM & IEEE latex, single column figure defaults to a width of 8inch
and the double column figure defaults to a width of 8 inch for the correct text scaling
Usage:
- sns.FacetGrid( ... , **width_height())
:param width: target width in inches (4 inch for a single column figure is default)
:param height: target heigth in inches (width * 3/4 - golden ratio is the default)
:param columns: target # of columns for facetgrid
:param rows: target # of rows for facetgrid
"""
if height is None:
height = width * 3/4
ratio = float(width) / (float(height) / float(rows))
return {"height": (float(width) / ratio), "aspect": ratio / float(columns)}
# +
cooling_df.columns
week_list_np = np.array(week_list) - 1
print(week_list_np)
# +
plt.close()
# Override acm-ieee-latex-pubstyle
plt.figure(figsize=(8,6))
sns.despine(top=False, right=False)
sns.set_context(rc = {'patch.linewidth': 0.0, 'axes.labelsize': 12})
# Layout
gs = mpl.gridspec.GridSpec(5, 1, height_ratios=[2, 1, 1, 1, 1])
gs.update(hspace=0.2)
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1], sharex=ax0)
ax2 = plt.subplot(gs[2], sharex=ax0)
ax3 = plt.subplot(gs[3], sharex=ax0)
ax4 = plt.subplot(gs[4], sharex=ax0)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax3.get_xticklabels(), visible=False)
# Power plot with maxs and mins
sns.scatterplot(x=(week_list_np), y=maxs, marker="^", color="r", ax=ax0)
sns.boxplot(data=cooling_df, x='Week', y='k100_total_power_mw', showfliers=False, ax=ax0, linewidth=1, color="tab:green")
ax0.set_ylabel("Power (MW)")
ax0.set(xlabel=None)
ax0.set_ylim([2, 13])
ax0.set_yticks([2.5, 5, 7.5, 10, 12.5])
# PUE plot
sns.boxplot(data=cooling_df, x='Week',y='k100_pue', showfliers=False, ax=ax1, linewidth=1, color="tab:olive")
ax1.set_ylabel("PUE")
ax1.set(xlabel=None)
ax1.set_ylim([0.95, 1.6])
ax1.set_yticks([1.0, 1.25, 1.5])
# Wet Bulb Temp plot
sns.boxplot(data=cooling_df, x='Week',y='cep_outside_air_wet_bulb_temp', showfliers=False, ax=ax2, linewidth=1, color="tab:red")
ax2.set_ylabel("Wetbulb\nTemp (F)")
ax2.set(xlabel=None)
ax2.set_ylim([20, 100])
ax2.set_yticks([25, 50, 75, 100])
# CEP Supply Tonage plot
sns.boxplot(data=cooling_df, x='Week',y='cep_mtw_tons', showfliers=False, ax=ax3, linewidth=1, color="tab:cyan")
ax3.set_ylabel("MTW\nCooling\n(TOR)")
ax3.set(xlabel=None)
ax3.set_ylim([1000, 3500])
ax3.set_yticks([1000, 2000, 3000])
# CEP Chilled Water Tonage plot
sns.boxplot(data=cooling_df, x='Week',y='cep_chilled_water_tons', showfliers=False, ax=ax4, linewidth=1, color="tab:blue")
#sns.boxplot(data=cooling_df, x='Week',y='cep_cooling_tower_tons', showfliers=False, ax=ax4)
ax4.set_ylabel("Chiller\n(TOR)")
ax4.set_ylim([0, 2900])
ax4.set_yticks([0, 1000, 2000])
# Prepare x-axis for all 5 plots
ax4.set_xlim(0, 53, 10)
ax4.set_xlabel("")
my_xticks = [1, 4.5, 8.5, 12, 16, 20.5, 25, 29.5, 34, 38, 42.5, 47]
my_xtick_labels= ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"]
plt.xticks(my_xticks, my_xtick_labels)
for item in ax4.get_xticklabels():
item.set_rotation(50)
plt.subplots_adjust(hspace=.0)
plt.savefig("Summit_2020_Time_vs_Power_PUE_Cooling_formatted.pdf", format="pdf", bbox_inches='tight', pad_inches=0.01)
plt.show()
# -
| plots_codes/summit-pue-plot-clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
errors = pd.read_csv('task3_grid_search_errors.csv')
charngrams = pd.read_csv('task3_grid_search_charngrams.csv')
d2v = pd.read_csv('task3_grid_search_doc2vec.csv')
funcwords = pd.read_csv('task3_grid_search_funcwords.csv')
posngrams = pd.read_csv('task3_grid_search_posngrams.csv')
wordngrams = pd.read_csv('task3_grid_search_wordngrams.csv')
df = pd.DataFrame([
errors[errors['rank_test_score']==1][['mean_train_score', 'mean_test_score', 'params']].iloc[0],
charngrams[charngrams['rank_test_score']==1][['mean_train_score', 'mean_test_score', 'params']].iloc[0],
funcwords[funcwords['rank_test_score']==1][['mean_train_score', 'mean_test_score', 'params']].iloc[0],
posngrams[posngrams['rank_test_score']==1][['mean_train_score', 'mean_test_score', 'params']].iloc[0],
wordngrams[wordngrams['rank_test_score']==1][['mean_train_score', 'mean_test_score', 'params']].iloc[0],
d2v[d2v['rank_test_score']==1][['mean_train_score', 'mean_test_score', 'params']].iloc[0]
])
df['model'] = pd.Series(["errors","charngrams","funcwords","posngrams","wordngrams","d2v"], index=df.index)
df["mean_train_score"] = pd.Series(["{0:.2f}%".format(val * 100) for val in df['mean_train_score']], index=df.index)
df["mean_test_score"] = pd.Series(["{0:.2f}%".format(val * 100) for val in df['mean_test_score']], index=df.index)
df = df.reindex(columns=["model", "mean_train_score", "mean_test_score", 'params'])
df.set_index("model", inplace=True)
df.to_csv('task3_grid_search_results.csv')
df
# +
from os import listdir
from os.path import isfile, join
import re
import pandas as pd
files = [f for f in listdir("./") if isfile(join("./", f)) and f.endswith("csv") and f.startswith("task3_test")]
df_language = []
df_precision = []
df_recall = []
df_f1 = []
df_support = []
df_overall = []
for file in files:
feature, type = re.match("task3_test_([^_]+)_([^_]+).csv", file).groups()
if type not in ["train", "test"]:
continue
df = pd.read_csv(file)
df["overall"] = pd.Series(["({} {} {})".format(\
str(row["precision"]).lstrip('0'),
str(row["recall"]).lstrip('0'),
str(row["f1-score"]).lstrip('0')
) \
for idx, row \
in df.iterrows()], index=df.index)
df_language = ["feature", "type"] + df["language"].tolist()
df_precision.append([feature, type]+df["precision"].tolist())
df_recall.append([feature, type]+df["recall"].tolist())
df_f1.append([feature, type]+df["f1-score"].tolist())
df_support.append([feature, type]+df["support"].tolist())
df_overall.append([feature, type]+df["overall"].tolist())
pd.DataFrame(df_precision, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_test_df_precision.csv")
pd.DataFrame(df_recall, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_test_df_recall.csv")
pd.DataFrame(df_f1, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_test_df_f1.csv")
pd.DataFrame(df_overall, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_test_df_overall.csv")
# +
from os import listdir
from os.path import isfile, join
import re
import pandas as pd
files = [f for f in listdir("./") if isfile(join("./", f)) and f.endswith("csv") and f.startswith("task3_doc2vec_lstm_results_")]
df_language = []
df_precision = []
df_recall = []
df_f1 = []
df_support = []
df_overall = []
for file in files:
feature, type = re.match("task3_doc2vec_lstm_results_([^_]+)_([^_]+).csv", file).groups()
if type not in ["train", "test"]:
continue
df = pd.read_csv(file)
df["overall"] = pd.Series(["({} {} {})".format(\
str(row["precision"]).lstrip('0'),
str(row["recall"]).lstrip('0'),
str(row["f1-score"]).lstrip('0')
) \
for idx, row \
in df.iterrows()], index=df.index)
df_language = ["feature", "type"] + df["language"].tolist()
df_precision.append([feature, type]+df["precision"].tolist())
df_recall.append([feature, type]+df["recall"].tolist())
df_f1.append([feature, type]+df["f1-score"].tolist())
df_support.append([feature, type]+df["support"].tolist())
df_overall.append([feature, type]+df["overall"].tolist())
pd.DataFrame(df_precision, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_doc2vec_lstm_results_df_precision.csv")
pd.DataFrame(df_recall, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_doc2vec_lstm_results_df_recall.csv")
pd.DataFrame(df_f1, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_doc2vec_lstm_results_df_f1.csv")
pd.DataFrame(df_overall, columns=df_language).sort_values(by=["feature", "type"], ascending=False).to_csv("task3_doc2vec_lstm_results_df_overall.csv")
# -
| experiment_results/task3/stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inherited from 12-3
# Add add `MyRNN` class
# <img src='ex12-5.png'>
# +
import torch
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(99999999)
# -
full_data = 'hihello'
full_data = set([i for i in full_data])
full_data # Try minor change in arrangement
# +
word_to_ix = {'e': 0, 'h': 1, 'i': 2, 'l': 3, 'o': 4}
embeds = nn.Embedding(5, 5) # 5 chars, 5 dimensions
x_data = 'hihell'
lookup_tensor = torch.tensor([word_to_ix[i] for i in x_data], dtype=torch.long)
inputs = embeds(lookup_tensor)
# -
lookup_tensor
# Need to do this. Otherwise `AutoGrad` will be `Embedding` not mine
inputs = inputs.clone().detach()
inputs
tmp = [word_to_ix[i] for i in 'ihello']
labels = torch.tensor(tmp, dtype=torch.long)
labels
# # (2) Parameters
num_classes = 5
input_size = 5 # one_hot size
hidden_size = 5 # output from the LSTM. 5 to directly predict one-hot
batch_size = 1 # one sentence
sequence_length = 1 # Let's do one by one
num_layers = 1 # one-layer rnn
# # 1. Model
class MyRNN(nn.Module):
def __init__(self, input_size, hidden_size, sequence_length):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.sequence_length = sequence_length
self.b = torch.zeros(batch_size * sequence_length, batch_size * sequence_length)
self.W = torch.randn(batch_size * sequence_length, batch_size * sequence_length)
self.U = torch.randn(batch_size * sequence_length, batch_size * sequence_length)
self.c = torch.zeros(batch_size * sequence_length, batch_size * sequence_length)
self.V = torch.randn(batch_size * sequence_length, batch_size * sequence_length)
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax()
def forward(self, input, hidden):
"""
input.size() = (1, 6, 5) = (batch_size, sequence_length, dimension)
x = (6, 5) = (batch_size * sequence_length, input_size)
U = (6, 6) = (batch_size * sequence_length, batch_size * sequence_length)
h = (6, 6) = (batch_size * sequence_length, batch_size * sequence_length)
W = (6, 6) = (batch_size * sequence_length, batch _size * sequence_length)
b = (6, 6) = (batch_size * sequence_length, batch_size * sequence_length)
hidden = (1, 1, 5)
"""
x = input.view(-1, input_size) # Transpose in here already
import ipdb; ipdb.set_trace()
a_trans = self.b + self.W * hidden + torch.mm(self.U, x)
hidden_trans = self.tanh(a_trans)
output_trans = self.c + self.V * hidden_trans
y_hat_trans = self.softmax(output_trans)
# Do not return a transposed value
return torch.transpose(y_hat_trans, 0 ,1) , torch.transpose(hidden_trans, 0 ,1)
class Model(nn.Module):
def __init__(self,
input_size=5,
hidden_size=5,
num_layers=1,
batch_size=1,
sequence_length=1,
num_classes=5):
super().__init__()
self.rnn = MyRNN(input_size=input_size,
hidden_size=hidden_size,
sequence_length=sequence_length)
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_size = batch_size
self.sequence_length = sequence_length
self.num_classes = num_classes
# Fully-Connected layer
self.fc = nn.Linear(num_classes, num_classes)
def forward(self, x, hidden):
# Reshape input in (batch_size, sequence_length, input_size)
x = x.view(self.batch_size, self.sequence_length, self.input_size)
out, hidden = self.rnn(x, hidden)
out = self.fc(out) # Add here
out = out.view(-1, self.num_classes)
return hidden, out
def init_hidden(self):
return torch.zeros(self.num_layers, self.batch_size, self.hidden_size)
# # 2. Criterion & Loss
model = Model(sequence_length=6)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.1)
# # 3. Training
model = Model(input_size=5, hidden_size=5, num_layers=1,
batch_size=1, sequence_length=6, num_classes=5)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.1)
hidden = model.init_hidden()
loss = 0
idx = 0
for epoch in range(0, 10 + 1):
hidden.detach_()
hidden = hidden.detach()
hidden = hidden.clone().detach().requires_grad_(True) # New syntax from `1.0`
hidden, outputs = model(inputs, hidden)
optimizer.zero_grad()
loss = criterion(outputs, labels) # It wraps for-loop in here
loss.backward()
optimizer.step()
_, idx = outputs.max(1)
idx = idx.data.numpy()
# A bit acrobatic since I lookup `value` to see the `key`
result_str = [list(word_to_ix.keys())[list(word_to_ix.values()).index(i)] for i in idx]
print(f"epoch: {epoch}, loss: {loss.data}")
print(f"Predicted string: {''.join(result_str)}")
rnn = nn.RNN(10, 20, 2)
input = torch.randn(5, 3, 10)
h0 = torch.randn(2, 3, 20)
output, hn = rnn(input, h0)
| exercise12/exercise12-5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rahulmishra11/Titanic_Prediction/blob/main/TItacnicPredict.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ODMcbLpZGwtH"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
# + id="mrfLr8uKGwtP" outputId="16802fd1-2473-4a5c-c325-7cee4f907650"
df=pd.read_csv('/home/rahul/Titanic_Prediction_Kaggle/train.csv')
df.head()
# + id="HprR3wd2GwtR"
df.columns
data=df.drop(['Survived','Name','Cabin','Ticket'],axis=1)
# + id="7udTU3K_GwtU" outputId="c6f475a8-a214-437b-9941-dc22fc4f248a"
x_train=data.values
print(x_train)
# + id="f81IY_kMGwtU" outputId="023f74e5-9b05-4c19-a241-2ec393ddabf6"
y_train=(df.get('Survived')).values
print(y_train)
# + id="uS4HOJ9WGwtV"
# plt.plot(y_train,x_train)
# + id="r1R3O7rQGwtW"
plt.show()
# + id="mwKR0ErcGwtW" outputId="e6f0f6ea-d9a1-49b6-e4cc-4e49c65a46d2"
print(df.shape)
# + id="E411E9j4GwtX" outputId="61a3d9cd-434b-4e83-ae56-f77e9ff1ca52"
df['Survived'].value_counts()
# + id="41xaYgotGwtY" outputId="21911a9d-d87c-41a2-9764-b3ba9d9ae4a7"
df.plot(kind='scatter',x='Fare',y='Survived');
plt.show()
# + id="2BFHyj1jGwtY" outputId="728df6b5-9857-447c-8dd3-928ba63f4d93"
sns.set_style('whitegrid');
sns.FacetGrid(df,hue='Survived',height=4).map(plt.scatter,"Fare","Survived").add_legend();
plt.show()
# + id="aHfl6c7kGwtZ"
# + id="i9NiNuxCGwtZ" outputId="64b9846c-d8c8-40f9-f998-921dc26c984d"
print(x_train)
# + id="51im3bNGGwta"
# + id="WrYr2KpyGwta" outputId="6c25cd38-6a78-438b-e4fd-bfca7390bd1f"
print(x_train[0])
# + id="aQH_tHnhGwtb" outputId="20c65ea0-6722-4473-c939-ddfed8a9e328"
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(x_train[:,[3]])
# + id="15yP4FBPGwtb"
z=imp.transform(x_train[:,[3]])
# + id="kP0VRBVKGwtb" outputId="227330e0-20fe-4578-ed82-dc045ab9fb30"
print(z)
# + id="JalLHeT-Gwtc"
x_train[:,[3]]=z
# + id="rYjsS7rSGwtc" outputId="2caf62eb-a0e9-4a56-d1e1-21ce363e425f"
print(x_train[:,[2]])
# + id="w5SjA1j3Gwtc" outputId="61fe7965-bc07-4d9e-bce9-4f26d2f970d0"
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(x_train[:,[2]])
# + id="cXZhmzdzGwtd" outputId="4b76341e-8d6f-4184-e903-f488fe07dd70"
enc.categories_
# + id="srIosTanGwte"
z=enc.transform(x_train[:,[2]]).toarray()
# + id="Nz9IrVuGGwte" outputId="e1f8364e-0398-4640-dc4d-7ef581a89685"
print(z)
# + id="Sn58UcMtGwtf"
x_train=np.delete(x_train,2,axis=1)
# + id="NlT7ZwcYGwtf" outputId="94bf4d83-0953-47e9-ca5d-f0dc3a0ac7c4"
print(x_train[:,[2]])
# + id="spu65hOUGwtf"
x_train=np.append(x_train,z,axis=1)
# + id="cbC686nHGwtf" outputId="62780d23-1694-4a15-9db5-0d0faec870ce"
print(x_train[:,[6]])
# + id="feV-SeuXGwtg"
# from sklearn.neighbors import KNeighborsClassifier
# neigh = KNeighborsClassifier(n_neighbors=3)
# neigh.fit(x_train, y_train)
# + id="uDM47saYGwtg"
# + id="lQ-ryxwuGwtg" outputId="527142ef-dbfe-4674-8414-020f1fb36cc6"
imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imp.fit(x_train[:,[6]])
# + id="JajFE2dmGwth"
z=imp.transform(x_train[:,[6]])
# + id="AAU2NkCbGwth"
x_train[:,[6]]=z
# + id="V2eTeJNVGwth" outputId="db443e72-c35d-41e4-920c-30904832e4c7"
print(x_train[:,[6]])
# + id="rJ1Xi5RnGwti" outputId="c159ea0d-3a6f-4089-9eb8-623bf7e0a0c8"
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(x_train[:,[6]])
# + id="bIZ68sUIGwti" outputId="cf20052e-32b4-46dd-a114-0ab104eb992d"
enc.categories_
# + id="80A43wQ5Gwti" outputId="3f68b257-1693-43eb-f844-aad5a6dc0ca8"
z=enc.transform(x_train[:,[6]]).toarray()
print(z)
# + id="wMj6r6GuGwtj" outputId="207f4df1-6247-4b2e-8477-0de42358082b"
x_train=np.delete(x_train,6,axis=1)
print(x_train)
# + id="bv74mhSNGwtj"
x_train=np.append(x_train,z,axis=1)
# + id="vJ_O5-mxGwtk" outputId="6f60296c-b8b3-4df8-f2fc-cb8527f167de"
print(x_train)
# + id="GLqr3v_ZGwtk" outputId="d2ede4a2-32d4-4eff-d508-94b4ab08f88c"
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(x_train, y_train)
# + id="D0qyePEGGwtl"
test_data=pd.read_csv('test.csv')
# + id="VZA_S9m2Gwtl" outputId="1d3aa284-f8a0-4007-818d-5fd70af116b5"
test_data.columns
# + id="RHkDF3gtGwtm"
test_data=test_data.drop(['Name','Cabin','Ticket'],axis=1)
# + id="FjE7Sg-CGwtm" outputId="bdbaa096-370f-4869-d0e1-55a509051c9f"
x_test=test_data.values
test_data.columns
# + id="Ef9F-537Gwtm" outputId="f301ea71-d73b-42cc-9a86-dab50f37df8e"
print(x_test)
# + id="dEuwHDfDGwtn" outputId="e771b199-39d3-496d-ab80-baea4ad2bb38"
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(x_test[:,[3]])
# + id="mVwWarIwGwtn"
z=imp.transform(x_test[:,[3]])
# + id="TSH5K88DGwto" outputId="8fdfb01c-5885-4e00-838e-fc0d3d0ed5f8"
print(z)
# + id="0lzrFZaEGwto"
x_test[:,[3]]=z
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(x_test[:,[6]])
z=imp.transform(x_test[:,[6]])
x_test[:,[6]]=z
# + id="WO4hQpaaGwto" outputId="f4da4271-4205-48e3-dc99-003ca51c2ca9"
print(z)
# + id="P1pv1ZKJGwtp" outputId="198cc928-6854-45cc-db83-dc324f143744"
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(x_test[:,[2]])
enc.categories_
# + id="xAILBnUPGwtp"
z=enc.transform(x_test[:,[2]]).toarray()
# + id="ryAyunu3Gwtq" outputId="c3092d31-7a07-4113-b732-17d48badb7b1"
print(z)
# + id="RcSAGDdIGwtq" outputId="9ed468b7-0b4a-4392-c4d1-3b59b6a4ab5b"
imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imp.fit(x_test[:,[7]])
# + id="05pqWqExGwtr"
z2=imp.transform(x_test[:,[7]])
# + id="HBq2z1eBGwtr" outputId="7246ce61-35b4-4184-ddc8-c2ac1c09d6f0"
print(z2)
# + id="w5WGqqtrGwts" outputId="6317acc2-0073-48ee-8838-ed7e59ddd37c"
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(z2)
enc.categories_
# + id="I0htckE4Gwts"
z2=enc.transform(z2).toarray()
# + id="ioGF3mGpGwts" outputId="ff84d77f-3466-4a6c-c1c5-3a5c47ff02ef"
print(z2)
# + id="J7_IufkFGwtt"
x_test=np.delete(x_test,[2,7],axis=1)
# + id="yAa3hOm4Gwtt"
x_test=np.append(x_test,z,axis=1)
# + id="sjQptNrqGwtt"
x_test=np.append(x_test,z2,axis=1)
# + id="2Zn-pDGWGwtt" outputId="4036229e-7a11-40aa-f00f-6083fffe6f24"
print(x_test)
# + id="VVgtgF40Gwtu" outputId="ab085c40-f563-4686-9599-6178ecab8e69"
x_test.shape
# + id="gc1RIsBzGwtu" outputId="7c0b8b0a-3636-459f-a4dc-419ba50ba03f"
x_train.shape
# + id="VztG7eGcGwtu"
m=(neigh.predict(x_test))
# print(~np.isnan(x_test).any(axis=1))
# + id="_8AIk_nxGwtv" outputId="74aa4b43-d0bb-44ba-e331-5d9ef707bfd7"
print(m)
# + id="VQOKYB78Gwtv"
z=x_test[:,[0]]
# + id="U2ltIoi_Gwtv" outputId="7ce5b64a-1af2-414f-dabd-1ac6d172392f"
p=[]
for i in range(0,418):
p.append(z[i][0])
print(p)
# + id="IQqenZ__Gwtw"
z=p
# + id="EDIus8swGwtw" outputId="73687573-f876-4923-ca25-b62cb61d91e8"
print(z)
# + id="w2biKxAgGwtw"
d = {'PassengerId': z, 'Survived': m}
df2=pd.DataFrame(d).to_csv('predict.csv',index=False)
# + id="NWVQKaDNGwtw" outputId="8e69184e-1e3b-46b8-ee95-dae4f7985f67"
data.isna().sum()
# + id="r_JOAGGzGwtx"
| TItacnicPredict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dependencies
from bs4 import BeautifulSoup
from splinter import Browser
import requests
import pandas as pd
# # NASA Mars News
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# URL of page to be scraped
url = 'https://mars.nasa.gov/news/'
#will take you to the url
browser.visit(url)
# Create BeautifulSoup object; parse with 'html.parser'
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Extract title text
#title = soup.find('div', class_="content_title").text
title = soup.find('li', class_='slide').find('div', class_='content_title').text
print(title)
# Print all paragraph texts
paragraph = soup.find('li', class_='slide').find('div', class_="article_teaser_body").text
print(paragraph)
# # JPL Mars Space Images - Featured Image
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
#will take you to the url
browser.visit(url)
#access the html that is currently on the page in the browser
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#find the featured image
picture = soup.find('div', class_='carousel_container').find("article")["style"]
picture
feature_img = picture.split("'")[1]
feature_img_url = "https://www.jpl.nasa.gov" + feature_img
feature_img_url
# # Mars Weather twitter account
# URL of page to be scraped
url = 'https://twitter.com/marswxreport?lang=en'
# Retrieve page with the requests module
response = requests.get(url)
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(response.text, 'html.parser')
# Examine the results, then determine element that contains sought info
print(soup.prettify())
# Extract title text
twitter_title = soup.title.text
print(twitter_title)
# to find weather, search Sol in soup.prettify
#<div class="js-tweet-text-container">
#<p class="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text" data-aria-label-part="0" lang="en">
weather = soup.find('div', class_='js-tweet-text-container').find('p', class_="TweetTextSize").text
print(weather)
# # Mars Facts
# URL of page to be scraped
url = 'https://space-facts.com/mars/'
# use the read_html function in Pandas to automatically scrape any tabular data from a page.
table = pd.read_html(url)
table
#return is a list of dataframes for any tabular data that Pandas found
type(table)
df = table[0]
df.head
# Assign the columns heading
df.columns = ['Description','Value']
# Set the index to the `Description` column without row indexing
df.set_index('Description', inplace=True)
df
df_html_table = df.to_html()
df_html_table
#remove \n
df_html_table = df_html_table.replace('\n', '')
df_html_table
# # Mars Hemispheres
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# +
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('div', class_='description')
#create list for hemisphere url
url_list = []
for picture in items:
title = picture.find('h3').text
browser.click_link_by_partial_text (title)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_url = soup.find('div', class_='downloads').find('a')['href']
hemi = {
'title':title,
'image_url':mars_url
}
url_list.append (hemi)
browser.back()
print (url_list)
# -
| jupyter/project2_mars.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="../imgs/logo.png" width="20%" align="right" style="margin:0px 20px">
#
#
# # Evolutionary Computation
#
# ## 5.2 CMA-ES
#
# <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" align="left" src="https://i.creativecommons.org/l/by-sa/4.0/80x15.png" /></a> | <NAME> | <a href="https://d9w.github.io/evolution/">https://d9w.github.io/evolution/</a>
# + [markdown] slideshow={"slide_type": "slide"}
# # CMA-ES
#
# In this section, we'll discuss the Covariance Matrix Adaptation Evolutionary Strategy, or CMA-ES [1, 2]. This is one of the most well-known evolutionary algorithms in general and is a state-of-the-art algorithm for continuous optimization. The strength of this method is that it adapts the distribution it uses to generate the next population based on the current distribution of individuals. In the previous section, we were limited to a Normal distribution with a fixed $\sigma$. The adaptive distribution of CMA-ES means it will cross search spaces faster and narrow in more exactly on optimal points.
#
# [1] Hansen, Nikolaus, and <NAME>. "Adapting arbitrary normal mutation distributions in evolution strategies: The covariance matrix adaptation." Proceedings of IEEE international conference on evolutionary computation. IEEE, 1996.
#
# [2] Hansen, Nikolaus, and <NAME>. "Completely derandomized self-adaptation in evolution strategies." Evolutionary computation 9.2 (2001): 159-195.
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="../imgs/cmaes.png" width="60%" height="auto">
# + [markdown] slideshow={"slide_type": "slide"}
# Let's go through a simplified version of the algorithm from [3]. There are improvements to CMA-ES beyond this basic framework, but that's beyond the scope of today. [4] provides a good review of different CMA-ES modifications.
#
# [3] <NAME> (2007) Evolution strategies. Scholarpedia, 2(8):1965.
#
# [4] Hansen, Nikolaus. "The CMA evolution strategy: a comparing review." Towards a new evolutionary computation. Springer, Berlin, Heidelberg, 2006. 75-102.
# + [markdown] slideshow={"slide_type": "slide"}
# We'll start by creating a random individual $\mathbf{y}$ which will be our first expert. We also create a diagonal covariance matrix $\mathbf{C}$.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L1):} \quad
# \forall l=1, \ldots, \lambda : \;\;
# \begin{cases}
# & \mathbf{w}_l
# \leftarrow \sigma \sqrt{ \mathbf{C} } \,
# \mathbf{N}_l(\mathbf{0}, \mathbf{1}),\\[2mm]
# & \mathbf{y}_l \leftarrow \mathbf{y} + \mathbf{w}_l, \\[2mm]
# & F_l \leftarrow F(\mathbf{y}_l),
# \end{cases}$
# + [markdown] slideshow={"slide_type": "slide"}
# In the first step, (L1), $\lambda$ offspring $\mathbf{y}_l$ are created by transforming standard normally distributed random vectors using a transformation matrix $\sqrt{\mathbf{C}}$ which is given by Cholesky decomposition of the covariance matrix $\mathbf{C}$ and the global step size factor $\sigma$. We also evaluate every individual, creating $F$.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L2):} \quad
# \mathbf{y} \leftarrow \mathbf{y} + \langle \mathbf{w} \rangle$
# + [markdown] slideshow={"slide_type": "fragment"}
# In (L2) the best $\mu$ mutations are recombined forming the recombinant $\mathbf{y}$ (center of mass individual) for the next generation.
# + [markdown] slideshow={"slide_type": "slide"}
# $\mbox{(L3):} \quad
# \mathbf{s} \leftarrow \left(1-\frac{1}{\tau}\right)\mathbf{s}
# + \sqrt{\frac{\mu}{\tau} \left(2-\frac{1}{\tau}\right)} \,
# \frac{\langle \mathbf{w} \rangle}{\sigma}$
# + [markdown] slideshow={"slide_type": "fragment"}
# Vector $\langle \mathbf{w} \rangle$ combines individuals from two consecutive generations so $\langle \mathbf{w} \rangle/\sigma$ represents the tendency of evolution in the search space. In (L3), this information is cumulated in the $\mathbf{s}$ vector, which exponentially decays with the time constant $\tau$. A good default for this is $\tau=\sqrt{n}$.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L4):} \quad
# \mathbf{C} \leftarrow
# \left(1-\frac{1}{\tau_{\mathrm{c}}}\right)\mathbf{C}
# + \frac{1}{\tau_{\mathrm{c}}} \mathbf{s} \mathbf{s}^T$
# + [markdown] slideshow={"slide_type": "fragment"}
# In (L4), the direction vector $\mathbf{s}$ is used to update the covariance matrix $\mathbf{C}$ with time constant $\tau_{\mathrm{c}} \propto n^2$
# + [markdown] slideshow={"slide_type": "slide"}
# $\mbox{(L5):} \quad
# \mathbf{s}_\sigma
# \leftarrow \left(1-\frac{1}{\tau_\sigma}\right) \mathbf{s}_\sigma
# + \sqrt{\frac{\mu}{\tau_\sigma}
# \left(2-\frac{1}{\tau_\sigma}\right)} \,
# \langle \mathbf{N}(\mathbf{0}, \mathbf{1}) \rangle$
# + [markdown] slideshow={"slide_type": "slide"}
# $\mbox{(L6):} \quad
# \sigma \leftarrow \sigma\exp\left[
# \frac{\| \mathbf{s}_{\sigma} \|^2 - n}
# {2 n \sqrt{n} }
# \right]$
# + [markdown] slideshow={"slide_type": "fragment"}
# The distribution standard deviation $\sigma$ is then calculated in (L5) and (L6) using the cumulated step size adaptation (CSA) technique with time constant $\tau_\sigma = \sqrt{n}$ (initially $\mathbf{s}_\sigma = \mathbf{0}$). $\langle \mathbf{N}(\mathbf{0}, \mathbf{1}) \rangle$ is the distribution we calculated in (L1).
# + [markdown] slideshow={"slide_type": "slide"}
# So instead of simply using a Normal distribution to create the next generation, CMA-ES transforms a normal distribution by the covariance matrix $\mathbf{C}$. It also moves at self-adjusting $\sigma$. This makes its movement around the search space much more effective, as it is informed by the shape of the search space given through the fitness values $F$.
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="../imgs/cmaes_steps.png" width="80%" height="auto">
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's see an example of that.
# + slideshow={"slide_type": "slide"}
using Random
using LinearAlgebra
using Statistics
using Plots
# + slideshow={"slide_type": "fragment"}
solution = [3.5, -0.2]
sphere(x::Array{Float64}) = sum((x .- solution).^2)
himmelblau(x::Array{Float64}) = (x[1]^2 + x[2] - 11)^2 + (x[1] + x[2]^2 - 7)^2
styblinski_tang(x::Array{Float64}) = sum(x.^4 .- 16 .* x.^2 .+ 5 .* x) / 2.0
rastrigin(x::Array{Float64}) = 10.0 * length(x) .+ sum((x .- solution).^2 .- 10 .* cos.(2*pi.*(x .- solution)))
objective = sphere
# + [markdown] slideshow={"slide_type": "slide"}
# As with last time, we'll optimize over 2 continuous variables. We'll set default values of the time constants based on $N$. These are recommended values for CMA-ES [5]
#
# [5] Hansen, Nikolaus, <NAME>, and <NAME>. "Reducing the time complexity of the derandomized evolution strategy with covariance matrix adaptation (CMA-ES)." Evolutionary computation 11.1 (2003): 1-18.
# + slideshow={"slide_type": "fragment"}
N = 2 # dimension
μ = 5 # λ / 2 population which inform the next generations
λ = 10 # 4 + 3 log (N) population size
τ = sqrt(N)
τ_c = N^2
τ_σ = sqrt(N);
# + [markdown] slideshow={"slide_type": "slide"}
# Now let's make our random expert and create placeholder fitness and offspring vectors.
# + slideshow={"slide_type": "fragment"}
y = randn(N) # first expert
offspring = Array{Array{Float64}}(undef, λ)
F = Inf .* ones(λ); # fitness vector
# + [markdown] slideshow={"slide_type": "fragment"}
# Finally, we need to initialize $\mathbf{C}, \mathbf{N}, \mathbf{w}, \mathbf{s}, \mathbf{s_\sigma},$ and $\mathbf{\sigma}$.
# + slideshow={"slide_type": "fragment"}
C = Diagonal{Float64}(I, N)
W = zeros(N, λ)
s = zeros(N)
s_σ = zeros(N)
σ = 1.0
E = zeros(N, λ);
# + [markdown] slideshow={"slide_type": "slide"}
# Step 1: We calculate the offspring distribution $\mathbf{w}$ then center it at the center-of-mass individual $\mathbf{y}$.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L1):} \quad
# \forall l=1, \ldots, \lambda : \;\;
# \begin{cases}
# & \mathbf{w}_l
# \leftarrow \sigma \sqrt{ \mathbf{C} } \,
# \mathbf{N}_l(\mathbf{0}, \mathbf{1}),\\[2mm]
# & \mathbf{y}_l \leftarrow \mathbf{y} + \mathbf{w}_l, \\[2mm]
# & F_l \leftarrow F(\mathbf{y}_l),
# \end{cases}$
# + slideshow={"slide_type": "fragment"}
sqrt_c = cholesky((C + C') / 2.0).U
# + [markdown] slideshow={"slide_type": "fragment"}
# Note that our $\mathbf{C}$ covariance matrix is currently 1, so this first distribution will just be a normal distribution without any transformation. We will then evaluate every individual.
# + slideshow={"slide_type": "slide"}
for i in 1:λ
E[:,i] = randn(N) # individual
W[:,i] = σ * (sqrt_c * E[:,i]) # deviation from expert
offspring[i] = y + W[:,i] # new individual
F[i] = objective(offspring[i]) # its fitness
end
# + [markdown] slideshow={"slide_type": "fragment"}
# Now that we have evaluated the individuals, we will select a subset of them to inform the next generation. We'll simply use a truncation selection, taking the top $\mu$ individuals.
# + slideshow={"slide_type": "fragment"}
idx = sortperm(F)[1:μ]
# + [markdown] slideshow={"slide_type": "slide"}
# Now step 2. We update $\mathbf{y}$ using the top $\mu$ individuals in $\mathbf{w}$
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L2):} \quad
# \mathbf{y} \leftarrow \mathbf{y} + \langle \mathbf{w} \rangle$
# + slideshow={"slide_type": "fragment"}
w = vec(mean(W[:,idx], dims=2))
y += w
# + [markdown] slideshow={"slide_type": "slide"}
# In step 3, we update the direction vector $\mathbf{s}$.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L3):} \quad
# \mathbf{s} \leftarrow \left(1-\frac{1}{\tau}\right)\mathbf{s}
# + \sqrt{\frac{\mu}{\tau} \left(2-\frac{1}{\tau}\right)} \,
# \frac{\langle \mathbf{w} \rangle}{\sigma}$
# + slideshow={"slide_type": "fragment"}
s = (1.0 - 1.0/τ)*s + (sqrt(μ/τ * (2.0 - 1.0/τ))/σ)*w
# + [markdown] slideshow={"slide_type": "fragment"}
# Next we update our covariance matrix $\mathbf{C}$. Note that it will no longer be a diagonal matrix: our next update will use a transformed distribution to generate the population.
# + [markdown] slideshow={"slide_type": "slide"}
# $\mbox{(L4):} \quad
# \mathbf{C} \leftarrow
# \left(1-\frac{1}{\tau_{\mathrm{c}}}\right)\mathbf{C}
# + \frac{1}{\tau_{\mathrm{c}}} \mathbf{s} \mathbf{s}^T$
# + slideshow={"slide_type": "fragment"}
C = (1.0 - 1.0/τ_c).*C + (s./τ_c)*s'
# + [markdown] slideshow={"slide_type": "slide"}
# Finally, we update the $\sigma$, which is the standard deviation of the distribution we generate in the first step. Note that its initial value was `1.0`.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L5):} \quad
# \mathbf{s}_\sigma
# \leftarrow \left(1-\frac{1}{\tau_\sigma}\right) \mathbf{s}_\sigma
# + \sqrt{\frac{\mu}{\tau_\sigma}
# \left(2-\frac{1}{\tau_\sigma}\right)} \,
# \langle \mathbf{N}(\mathbf{0}, \mathbf{1}) \rangle$
# + [markdown] slideshow={"slide_type": "fragment"}
# $\mbox{(L6):} \quad
# \sigma \leftarrow \sigma\exp\left[
# \frac{\| \mathbf{s}_{\sigma} \|^2 - n}
# {2 n \sqrt{n} }
# \right]$
# + slideshow={"slide_type": "fragment"}
ɛ = vec(mean(E[:,idx], dims=2))
s_σ = (1.0 - 1.0/τ_σ)*s_σ + sqrt(μ/τ_σ*(2.0 - 1.0/τ_σ))*ɛ
σ = σ*exp(((s_σ'*s_σ)[1] - N)/(2*N*sqrt(N)))
# + [markdown] slideshow={"slide_type": "fragment"}
# That's a lot! Let's put it all together in an object and see how it runs over multiple iterations.
# + slideshow={"slide_type": "slide"}
mutable struct CMAES
N::Int
μ::Int
λ::Int
τ::Float64 # constante de temps de mise à jour de s
τ_c::Float64
τ_σ::Float64
population::Array{Array{Float64}}
offspring::Array{Array{Float64}}
F_μ::Array{Float64}
F_λ::Array{Float64}
C::Array{Float64}
s::Array{Float64}
s_σ::Array{Float64}
σ::Float64
E::Array{Float64}
W::Array{Float64}
x::Array{Float64}
end
# + slideshow={"slide_type": "slide"}
function CMAES(;N=2, λ=10, μ=1, τ=sqrt(N), τ_c=N^2, τ_σ=sqrt(N))
x = randn(N)
population = fill(x, µ)
offspring = Array{Array{Float64}}(undef, λ)
F_µ = Inf .* ones(µ)
F_λ = Inf .* ones(λ)
C = Array(Diagonal{Float64}(I, N))
s = zeros(N)
s_σ = zeros(N)
σ = 1.0
E = zeros(N, λ)
W = zeros(N, λ);
CMAES(N, μ, λ, τ, τ_c, τ_σ, population, offspring, F_µ, F_λ, C, s, s_σ, σ, E, W, x)
end
# + slideshow={"slide_type": "fragment"}
c = CMAES()
# + slideshow={"slide_type": "slide"}
function step!(c::CMAES; obj=objective, visualize=false, anim=Nothing)
# L1
sqrt_c = cholesky((c.C + c.C') / 2.0).U
for i in 1:c.λ
c.E[:,i] = randn(c.N)
c.W[:,i] = c.σ * (sqrt_c * c.E[:,i])
c.offspring[i] = c.x + c.W[:,i]
c.F_λ[i] = obj(c.offspring[i])
end
# Select new parent population
idx = sortperm(c.F_λ)[1:c.μ]
for i in 1:c.μ
c.population[i] = c.offspring[idx[i]]
c.F_μ[i] = c.F_λ[idx[i]]
end
# L2
w = vec(mean(c.W[:,idx], dims=2))
c.x += w
# L3
c.s = (1.0 - 1.0/c.τ)*c.s + (sqrt(c.μ/c.τ * (2.0 - 1.0/c.τ))/c.σ)*w
# L4
c.C = (1.0 - 1.0/c.τ_c).*c.C + (c.s./c.τ_c)*c.s'
# L5
ɛ = vec(mean(c.E[:,idx], dims=2))
c.s_σ = (1.0 - 1.0/c.τ_σ)*c.s_σ + sqrt(c.μ/c.τ_σ*(2.0 - 1.0/c.τ_σ))*ɛ
# L6
c.σ = c.σ*exp(((c.s_σ'*c.s_σ)[1] - c.N)/(2*c.N*sqrt(c.N)))
if visualize
plot(xs, ys, fz, st=:contour)
scatter!([c.offspring[i][1] for i in 1:λ], [c.offspring[i][2] for i in 1:λ],
xlims=(-5, 5), ylims=(-5, 5), legend=:none)
scatter!([c.x[1]], [c.x[2]], color=:black, marker=:rect,
xlims=(-5, 5), ylims=(-5, 5), legend=:none)
frame(anim)
end
c
end
# + slideshow={"slide_type": "slide"}
function plot_obj()
c = CMAES(;λ=30, μ=6)
println("x initial: ", c.x)
anim = Animation()
for i in 1:100
v = mod(i, 1) == 0
step!(c, visualize=v, anim=anim)
end
println("x final: ", c.x)
anim
end
# + slideshow={"slide_type": "slide"}
xs = -5.0:0.1:5.0
ys = -5.0:0.1:5.0
objective = rastrigin # sphere, himmelblau, styblinski_tang, rastrigin
fz(x, y) = objective([x, y])
println(solution) # optimal for sphere and rastrigin
anim = plot_obj()
gif(anim)
# -
using FFMPEG
fn = "tmp.gif"
animdir = anim.dir
palette="palettegen=stats_mode=single[pal],[0:v][pal]paletteuse=new=1"
#ffmpeg_exe(`ffmpeg -v 16 -framerate $fps -i $(anim.dir)/%06d.png -lavfi "$palette" -loop 0 -y $fn`)
run(`ffmpeg -v 16 -framerate 20 -i $(anim.dir)/%06d.png -lavfi "$palette" -loop 0 -y $fn`)
Plots.AnimatedGif(fn)
# + [markdown] slideshow={"slide_type": "slide"}
# <div class="alert alert-success">
# <b>Exercise</b>
# <br/>
# Modify the parent population size and child population of the problem. Can you get CMA-ES to reliably converge on the Rastrigin function? What about for higer dimensions of $n$? Report your $\mu$ and $\lambda$ values in the class chat.
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# # Breaking down CMA-ES
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="../imgs/cmaes_steps.png" width="80%" height="auto">
# -
# As noted, CMA-ES is a combination of different improvements to evolutionary strategies. [Recent study](https://arxiv.org/pdf/1802.08842.pdf) has shown that evolutionary strategies can scale well to problems of many dimensions by using some of these improvements (rank-based fitness) while leaving out others which are $O(n^2)$ (the covariance matrix).
# <div class="alert alert-success">
# <b>Exercise</b>
# <br/>
# Adapt the $(1,\lambda)$ Evolutionary Strategy to include rank-based fitness. You may use the code from CMA-ES above. Compare the results on the 4 provided fitness functions.
# <div>
wr = zeros(npop)
for i in 1:npop
wr[i] = (log(1+0.5)-log(i)) / (log(1+0.5)-log(1))
end
# +
using Statistics
using LinearAlgebra
using Plots
gr(reuse=true)
s = [3.5, -0.2]
sphere(x::Array{Float64}) = -sum((x .- s).^2)
objective = sphere
xs = -5:0.1:5
ys = -5:0.1:5
fz(x, y) = objective([x, y])
plot(plot(xs, ys, fz, st=:surface), plot(xs, ys, fz, st=:contour), size = (800, 300))
npop = 50 # population size
sigma = 0.1 # noise standard deviation
alpha = 0.001 # step size
x = randn(2) # initial expert
N = randn(npop, 2)
P = repeat(x, 1, npop)' .+ sigma .* N;
R = zeros(npop)
for i in eachindex(R)
R[i] = objective(P[i, :])
end
A = (R .- mean(R)) ./ std(R);
xs = floor(minimum(P[:,1]), digits=1):0.1:ceil(maximum(P[:,1]), digits=1)
ys = floor(minimum(P[:,2]), digits=1):0.1:ceil(maximum(P[:,2]), digits=1)
fz(x, y) = objective([x, y])
plot(xs, ys, fz, st=:contour)
scatter!(P[:, 1], P[:, 2], zcolor=R, legend=:none)
scatter!([x[1]], [x[2]], color=:black, marker=:rect)
(dot(N[:, 1], A) / npop, dot(N[:, 2], A) / npop)
println("Solution :", s)
println("x : ", x)
x = x .+ alpha/(npop * sigma) .* [dot(N[:, i], A) for i in 1:size(N, 2)]
println("x`: ", x)
xs = -5.0:0.1:5.0
ys = -5.0:0.1:5.0
fz(x, y) = objective([x, y]);
function step(x::Array{Float64}; npop=50, sigma=0.1, alpha=0.01, visualize=false, anim=Nothing)
N = randn(npop, 2)
P = repeat(x, 1, npop)' .+ sigma .* N
R = zeros(npop)
# Initialize weigths
wr = [(log(1+0.5)-log(i)) / (log(1+0.5)-log(1)) for i in 1:npop]
for i in eachindex(R)
R[i] = objective(P[i, :])
end
# A = (R .- mean(R)) ./ std(R)
# Here comes the change with weigths
idx = sortperm(R)
fitness_sorted_N = zeros(npop, 2)
for i in 1:npop
fitness_sorted_N[i,:] = N[idx[i],:]
end
if visualize
plot(xs, ys, fz, st=:contour)
scatter!(P[:, 1], P[:, 2], xlims=(-5, 5), ylims=(-5, 5), zcolor=R)
scatter!([x[1]], [x[2]], legend=:none, color=:black, marker=:rect)
frame(anim)
end
# x .+ alpha/(npop * sigma) .* [dot(N[:, i], A) for i in 1:size(N, 2)]
x .+ sigma * [dot(fitness_sorted_N[:, i], wr) for i in 1:size(N, 2)]
end
function plot_obj()
x = randn(2)
println("x initial: ", x)
anim = Animation()
for i in 1:500
v = mod(i, 10) == 0
x = step(x, npop=30, sigma=0.5, alpha=0.06, visualize=v, anim=anim)
end
anim
end
himmelblau(x::Array{Float64}) = -((x[1]^2 + x[2] - 11)^2 + (x[1] + x[2]^2 - 7)^2)
styblinski_tang(x::Array{Float64}) = -(sum(x.^4 .- 16 .* x.^2 .+ 5 .* x) / 2.0)
rastrigin(x::Array{Float64}) = -(10.0 * length(x) .+ sum((x .- s).^2 .- 10 .* cos.(2*pi.*(x .- s))))
objective = rastrigin # sphere, himmelblau, styblinski_tang, rastrigin
anim = plot_obj()
gif(anim)
# -
| 5_strategies/2_CMA-ES.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# CRIANDO GRÁFICOS TREEMAP COM PYTHON + SQUARIFY + PLOTLY
# Em visualização de dados e computação, treemap é uma técnica de visualização para representar dados hierárquicos usando retângulos aninhados. Treemaps exibem dados hierárquicos como um conjunto de retângulos aninhados."Wikipédia"
# + id="Wzirvk-XxQZ3"
# Importação de pacotes de análise e visualização
import pandas as pd
import squarify
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="AWjSInROyK7J"
# Carrega o dataset apenas com a coluna "Posição" respeitando a LGPD
conexoes = pd.read_csv ("Connections.csv",usecols=[4])
# + colab={"base_uri": "https://localhost:8080/", "height": 428} id="oxzSamhNySlC" outputId="84dbbf52-7d7f-418a-d86d-bb01d955c2b0"
conexoes.head() # Primeiras linhas
# + id="f4VxysK1zq7W"
# Atribuição das top 16 posições mais populadas na base
df1 = conexoes["Position"].value_counts()\
.to_frame ()\
.reset_index ()\
.head(16)
# + colab={"base_uri": "https://localhost:8080/", "height": 596} id="NB3m5hy3C0wr" outputId="715752a7-7bf5-41c5-8ffa-b0b8dc5e20ed"
df1
# -
# ●TREEMAP COM SQUARIFY
# + colab={"base_uri": "https://localhost:8080/", "height": 318} id="CgaaIjBN1EwD" outputId="0e676d3c-b43e-4c6e-a808-04ec35b7aa79"
plt.figure(figsize = (15,15))
squarify.plot(sizes = df1['Position'], label=df1['index'], alpha=.8 )
plt.axis('off')
plt.savefig("treemap.png",dpi=300,format="png",orientation="portrait",transparent=False,facecolor='w', edgecolor='w',bbox_inches='tight')
plt.show()
# -
# ■TREEMAP COM PLOTLY EXPRESS
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="KNe7Ai9sGMNQ" outputId="e259f6d8-9390-40ce-cd31-8cdffdb1604e"
# Importação dos pacotes plotly
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.io as pio
pio.renderers
pio.renderers.default="notebook"
import plotly.express as px
# +
# cria a figura
fig = px.treemap(df1, #base dataframe
path=['index'], #coluna índice do dataframe
values='Position') #valores das posições
fig.show() #exibe
# -
| DATA_VISUALIZATION/Treemap_LinkedIn_conexoes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1 Overview
# ---
# **Overview**
#
# - [1.0 Chaos, Fractals, and Dynamics](#1.0-Chaos,-Fractals,-and-Dynamics)
# - [1.1 Capsule History of Dynamics](#1.1-Capsule-History-of-Dynamics)
# - [1.2 The Importance of Being Nonlinear](#1.2-The-Importance-of-Being-Nonlinear)
# - [Nonautonomous Systems](#Nonautonomous-Systems)
# - [Why Are Nonlinear Problems So Hard?](#Why-Are-Nonlinear-Problems-So-Hard?)
# - [1.3 A Dynamical View of the World](#1.3-A-Dynamical-View-of-the-World)
# ## 01.00. Chaos, Fractals, and Dynamics
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# We just subclass Rectangle so that it can be called with an Axes
# instance, causing the rectangle to update its shape to match the
# bounds of the Axes
class UpdatingRect(Rectangle):
def __call__(self, ax):
self.set_bounds(*ax.viewLim.bounds)
ax.figure.canvas.draw_idle()
# A class that will regenerate a fractal set as we zoom in, so that you
# can actually see the increasing detail. A box in the left panel will show
# the area to which we are zoomed.
class MandelbrotDisplay(object):
def __init__(self, h=500, w=500, niter=50, radius=2., power=2):
self.height = h
self.width = w
self.niter = niter
self.radius = radius
self.power = power
def __call__(self, xstart, xend, ystart, yend):
self.x = np.linspace(xstart, xend, self.width)
self.y = np.linspace(ystart, yend, self.height).reshape(-1, 1)
c = self.x + 1.0j * self.y
threshold_time = np.zeros((self.height, self.width))
z = np.zeros(threshold_time.shape, dtype=complex)
mask = np.ones(threshold_time.shape, dtype=bool)
for i in range(self.niter):
z[mask] = z[mask]**self.power + c[mask]
mask = (np.abs(z) < self.radius)
threshold_time += mask
return threshold_time
def ax_update(self, ax):
ax.set_autoscale_on(False) # Otherwise, infinite loop
# Get the number of points from the number of pixels in the window
dims = ax.patch.get_window_extent().bounds
self.width = int(dims[2] + 0.5)
self.height = int(dims[2] + 0.5)
# Get the range for the new area
xstart, ystart, xdelta, ydelta = ax.viewLim.bounds
xend = xstart + xdelta
yend = ystart + ydelta
# Update the image object with our new data and extent
im = ax.images[-1]
im.set_data(self.__call__(xstart, xend, ystart, yend))
im.set_extent((xstart, xend, ystart, yend))
ax.figure.canvas.draw_idle()
md = MandelbrotDisplay()
Z = md(-2., 0.5, -1.25, 1.25)
fig1, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black', linewidth=1.0)
rect.set_bounds(*ax2.viewLim.bounds)
ax1.add_patch(rect)
# Connect for changing the view limits
ax2.callbacks.connect('xlim_changed', rect)
ax2.callbacks.connect('ylim_changed', rect)
ax2.callbacks.connect('xlim_changed', md.ax_update)
ax2.callbacks.connect('ylim_changed', md.ax_update)
ax2.set_title("Zoom here")
plt.show()
# -
def Lorenz():
for :
x
# +
# %matplotlib inline
from ipywidgets import interact, interactive
from IPython.display import clear_output, display, HTML
import numpy as np
from scipy import integrate
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
from matplotlib import animation
# -
def solve_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8./3, rho=28.0):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
def lorenz_deriv(x_y_z, t0, sigma=sigma, beta=beta, rho=rho):
"""Compute the time-derivative of a Lorenz system."""
x, y, z = x_y_z
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(1)
x0 = -15 + 30 * np.random.random((N, 3))
# Solve for the trajectories
t = np.linspace(0, max_time, int(250*max_time))
x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t)
for x0i in x0])
# choose a different color for each trajectory
colors = plt.cm.viridis(np.linspace(0, 1, N))
for i in range(N):
x, y, z = x_t[i,:,:].T
lines = ax.plot(x, y, z, '-', c=colors[i])
plt.setp(lines, linewidth=2)
ax.view_init(30, angle)
plt.show()
return t, x_t
t, x_t = solve_lorenz(angle=0, N=10)
w = interactive(solve_lorenz, angle=(0.,360.), max_time=(0.1, 4.0),
N=(0,50), sigma=(0.0,50.0), rho=(0.0,50.0))
display(w)
# ## 01.01. Capsule History of Dynamics
# ---
# ## 01.02. The Importance of Being Nonlinear
# ---
# $$ m \frac{d^2 x}{dt^2} + b \frac{dx}{dt} + kx = 0 \tag{1} $$
#
# $$ \frac{\partial u}{\partial t} = \frac{\partial ^2 u}{\partial x^2} $$
#
# $$
# \begin{align*}
# \dot{x}_1 &= f_1(x_1, \cdots , x_n) \\
# & \vdots \\
# \dot{x}_n &= f_n(x_1, \cdots , x_n)
# \end{align*}
# \tag{2}
# $$
#
# $$\dot{x}_1 \equiv \frac{dx_i}{dt}$$
#
# $$
# \begin{align*}
# \dot{x}_2
# &= \ddot{x} = - \frac{b}{m} \dot{x} - \frac{k}{m} x \\
# &= - \frac{b}{m} x_2 - \frac{k}{m} x_1
# \end{align*}
# $$
#
#
# $$
# \begin{cases}
# \dot{x}_1 &= x_2 \\
# \dot{x}_2 &= - \frac{b}{m} x_2 - \frac{k}{m} x_1
# \end{cases}
# $$
# **nonlinear**
#
# e.g., pendulum
#
# $$
# \begin{cases}
# \dot{x}_1 = x_2 \\
# \dot{x}_2 = - \frac{g}{L} \sin(x_1)
# \end{cases}
# $$
#
# - $x$
# - $g$: gravity acceleration
# - $L$: length
# ### 01.02.01. Nonautonomous Systems
# **forced harmonic oscillator**
#
# $$ m\ddot{x} + b\dot{x} + kx = F \cos t $$
#
# let:
# - $x_1 = x$
# - $x_2 = \dot{x}$
# - $x_3 = t$, then $\dot{x}_3 = 1$
#
# $$
# \begin{cases}
# \dot{x}_1 = x_2 \\
# \dot{x}_2 = \frac{1}{m} (-kx_1 - bx_2 + F\cos x_3) \\
# \dot{x}_3 = 1
# \end{cases}
# \tag{3}
# $$
# ### 01.02.02. Why Are Nonlinear Problems So Hard?
# ## 01.03. A Dynamical View of the World
# | | n=1 | n=2 | n≥3 | n >>1 | Continuum |
# |-----------|-----|-----|-----|-------|-----------|
# | Linear | **Growth, decay, equilibrilium** | **Oscillation** | | **Collective phenomenon** | **Waves & pattern** |
# | Nonlinear | | | **Chaos** | | **Spatio-temporal complexity** |
# ---
# **Overview**
#
# - [1.0 Chaos, Fractals, and Dynamics](#1.0-Chaos,-Fractals,-and-Dynamics)
# - [1.1 Capsule History of Dynamics](#1.1-Capsule-History-of-Dynamics)
# - [1.2 The Importance of Being Nonlinear](#1.2-The-Importance-of-Being-Nonlinear)
# - [Nonautonomous Systems](#Nonautonomous-Systems)
# - [Why Are Nonlinear Problems So Hard?](#Why-Are-Nonlinear-Problems-So-Hard?)
# - [1.3 A Dynamical View of the World](#1.3-A-Dynamical-View-of-the-World)
| 01/note.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Tutorial 6: Lens Modeling
# =========================
#
# When modeling complex source's with parametric profiles, we quickly entered a regime where our `NonLinearSearch` was
# faced with a parameter space of dimensionality N=30+ parameters. This made the model-fitting inefficient, and very
# likely to infer a local maxima.
#
# Because `Inversion`'s are linear, they don't suffer this problelm, making them a very a powerful tool for modeling
# strong lenses. Furthermore, they have *more* freemdom than paramwtric profiles, not relying on specific analytic
# light distributions and symmetric profile shapes, allowing us to fit more complex mass models and ask ever more
# interesting scientific questions!
#
# However, `Inversion` have some short comings that we need to be aware of before we begin using them for lens modeling.
# That`s what we are going to cover in this tutorial.
# +
# %matplotlib inline
from pyprojroot import here
workspace_path = str(here())
# %cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autolens as al
import autolens.plot as aplt
# -
# we'll use the same strong lensing data as the previous tutorial, where:
#
# - The lens `Galaxy`'s light is omitted.
# - The lens `Galaxy`'s total mass distribution is an `EllipticalIsothermal`.
# - The source `Galaxy`'s `LightProfile` is an `EllipticalSersic`.
# +
dataset_name = "mass_sie__source_sersic__2"
dataset_path = path.join("dataset", "howtolens", "chapter_4", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.05,
)
mask = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.5
)
aplt.Imaging.subplot_imaging(imaging=imaging, mask=mask)
# -
# This function fits the `Imaging` data with a `Tracer`, returning a `FitImaging` object.
def perform_fit_with_lens__source_galaxy(imaging, lens_galaxy, source_galaxy):
mask = al.Mask2D.circular_annular(
shape_2d=imaging.shape_2d,
pixel_scales=imaging.pixel_scales,
sub_size=1,
inner_radius=0.5,
outer_radius=2.2,
)
masked_imaging = al.MaskedImaging(
imaging=imaging, mask=mask, settings=al.SettingsMaskedImaging(sub_size=1)
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
return al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
# To see the short-comings of an `Inversion`, we begin by performing a fit where the lens galaxy has an incorrect
# mass-model (I've reduced its Einstein Radius from 1.6 to 0.8). This is the sort of mass moddel the non-linear search
# might sample at the beginning of a model-fit.
# +
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=0.8
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(shape=(20, 20)),
regularization=al.reg.Constant(coefficient=1.0),
)
fit = perform_fit_with_lens__source_galaxy(
imaging=imaging, lens_galaxy=lens_galaxy, source_galaxy=source_galaxy
)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
aplt.FitImaging.subplot_of_plane(
fit=fit, plane_index=1, include=aplt.Include(mask=True)
)
# -
# What happened!? This incorrect mass-model provides a really good_fit to the image! The residuals and chi-squared-map
# are as good as the ones we saw in the last tutorial.
#
# How can an incorrect lens model provide such a fit? Well, as I'm sure you noticed, the source has been reconstructed
# as a demagnified version of the image. Clearly, this isn't a physical solution or a solution that we want our
# non-linear search to find, but for `Inversion`'s these solutions are real; they exist.
#
# This isn't necessarily problematic for lens modeling. Afterall, the source reconstruction above is extremely complex,
# in that it requires a lot of pixels to fit the image accurately. Indeed, its Bayesian log evidence is much lower than
# the correct solution.
# +
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(shape=(20, 20)),
regularization=al.reg.Constant(coefficient=1.0),
)
correct_fit = perform_fit_with_lens__source_galaxy(
imaging=imaging, lens_galaxy=lens_galaxy, source_galaxy=source_galaxy
)
aplt.FitImaging.subplot_fit_imaging(fit=correct_fit, include=aplt.Include(mask=True))
aplt.FitImaging.subplot_of_plane(
fit=fit, plane_index=1, include=aplt.Include(mask=True)
)
print("Bayesian Evidence of Incorrect Fit:")
print(fit.log_evidence)
print("Bayesian Evidence of Correct Fit:")
print(correct_fit.log_evidence)
# -
# The log evidence *is* lower. However, the difference in log evidence isn't *that large*. This is going to be a problem
# for the non-linear search, as its going to see *a lot* of solutions with really high log evidence value. Furthermore,
# these solutions occupy a *large volumne* of parameter space (e.g. everywhere the lens model that is wrong). This makes
# it easy for the `NonLinearSearch` to get lost searching through these unphysical solutions and, unfortunately, infer an
# incorrect lens model (e.g. a local maxima).
#
# There is no simple fix for this. The reality is that for an `Inversion` these solutions exist. This is how phase
# linking and pipelines were initially conceived, they offer a simple solution to this problem. We write a pipeline that
# begins by modeling the source galaxy as a `LightProfile`, `initializing` our lens mass model. Then, when we switch to
# an `Inversion` in the next phase, our mass model starts in the correct regions of parameter space and doesn`t get lost
# sampling these incorrect solutions.
#
# Its not ideal, but its also not a big problem. Furthermore, `LightProfile`'ss run faster computationally than
# `Inversion`'s, so breaking down the lens modeling procedure in this way is actually a lot faster than starting with an
# `Inversion` anyway!
# Okay, so we've covered incorrect solutions, lets end by noting that we can model profiles and inversions at the same
# time. We do this when we want to simultaneously fit and subtract the light of a lens galaxy and reconstruct its lensed
# source using an `Inversion`. To do this, all we have to do is give the lens galaxy a `LightProfile`.
# +
dataset_name = "light_sersic__mass_sie__source_sersic"
dataset_path = path.join("dataset", "howtolens", "chapter_4", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.05,
)
mask = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.5
)
aplt.Imaging.subplot_imaging(imaging=imaging, mask=mask)
aplt.FitImaging.subplot_of_plane(
fit=fit, plane_index=1, include=aplt.Include(mask=True)
)
# -
# When fitting such an image we now want to include the lens`s light in the analysis. Lets update our mask to be
# circular so that it includes the central regions of the image and lens galaxy.
mask = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=2, radius=2.5
)
# As I said above, performing this fit is the same as usual, we just give the lens galaxy a `LightProfile`.
lens_galaxy = al.Galaxy(
redshift=0.5,
bulge=al.lp.SphericalSersic(
centre=(0.0, 0.0), intensity=0.2, effective_radius=0.8, sersic_index=4.0
),
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
# These are all the usual things we do when setting up a fit.
# +
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(shape=(20, 20)),
regularization=al.reg.Constant(coefficient=1.0),
)
masked_imaging = al.MaskedImaging(
imaging=imaging, mask=mask, settings=al.SettingsMaskedImaging(sub_size=2)
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
# -
# This fit now subtracts the lens `Galaxy`'s light from the image and fits the resulting source-only image with the
# `Inversion`. When we plot the image, a new panel on the sub-plot appears showing the model image of the lens galaxy.
# +
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
aplt.FitImaging.subplot_of_plane(
fit=fit, plane_index=1, include=aplt.Include(mask=True)
)
# -
# Of course if the lens subtraction is rubbish so is our fit, so we can be sure that our lens model wants to fit the
# lens `Galaxy`'s light accurately (below, I've increased the lens galaxy intensity from 0.2 to 0.3).
# +
lens_galaxy = al.Galaxy(
redshift=0.5,
bulge=al.lp.SphericalSersic(
centre=(0.0, 0.0), intensity=0.3, effective_radius=0.8, sersic_index=4.0
),
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
aplt.FitImaging.subplot_of_plane(
fit=fit, plane_index=1, include=aplt.Include(mask=True)
)
# -
# And with that, we're done. Finally, I'll point out a few things about what we've covered to get you thinking about
# the next tutorial on adaption.
#
# - The unphysical solutions above are clearly problematic. Whilst they have lower Bayesian evidences their existance
# will still impact our inferred lens model. However, the `Pixelization`'s that we used in this chapter do not
# adapt to the images they are fitting, meaning the correct solutions achieve much lower Bayesian log evidence
# values than is actually possible. Thus, once we've covered adaption, these issues will be resolved!
#
# - When the lens `Galaxy`'s light is subtracted perfectly it leaves no residuals. However, if it isn't subtracted
# perfectly it does leave residuals, which will be fitted by the `Inversion`. If the residual are significant this is
# going to mess with our source reconstruction and can lead to some pretty nasty systematics. In the next chapter,
# we'll learn how our adaptive analysis can prevent this residual fitting.
| howtolens/chapter_4_inversions/tutorial_6_lens_modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import ROOT
import root_numpy
import matplotlib.pyplot as plt
PATH = "/root/minibias2.root"
import warnings
warnings.filterwarnings('ignore')
import json
# -
import os.path as osp
with open("./branches_photon.config") as f:
features = json.load(f)
features['per_event']
features['per_lumisection']
# +
def get_index(leaves, indxes):
"""
Produces names of branches with indexes in them.
"""
return [ leaf + "[%d]" % index for leaf in leaves for index in indxes ]
def split_by_events(data_root, leaves, batch_size, test_leaf = 0):
"""
Turns data from root numpy into matrix <N events> by <number of features>.
Returns if another batch is needed by testing test_leaf (usually pt - momentum) against exact float zero.
If any particles has test_leaf equal to exact zeros, they are just doesn't exist in original root file, so
needed to be truncated.
Otherwise, event might be read incompletely, i.e. number of particles > batch_size.
"""
events = list()
for event_i in xrange(data_root.shape[0]):
event = data_root[event_i]
d = np.array([ event[i] for i in xrange(len(leaves) * batch_size) ]).reshape(len(leaves), -1).T
event_idx = d[:, test_leaf] > 0.0
### other features must be exact zero also
assert np.all(d[np.logical_not(event_idx), :] == 0.0)
events.append(d[event_idx, :])
need_another_batch = np.any([
event.shape[0] == batch_size for event in events
])
return need_another_batch, events
def read_batch(path, treename, leaves, batch_size, each = 1, test_leaf = 0):
event_batches = None
need_another_batch = True
batch_offset = 0
while need_another_batch:
branches = get_index(leaves, np.arange(batch_size)[::each] + batch_offset)
data_root = root_numpy.root2array(path, treename='Events', branches=branches, )
need_another_batch, events = split_by_events(data_root, leaves, batch_size / each, test_leaf = 0)
batch_offset += batch_size
if event_batches is None:
event_batches = [ [event] for event in events ]
else:
assert len(event_batches) == len(events)
event_batches = [
batches + [batch] for batches, batch in zip(event_batches, events)
]
return [ np.vstack(event) for event in event_batches ]
def read_lumidata(path, lumifeatures):
names = [ f.split('.')[-1] for f in lumifeatures ]
lumidata = root_numpy.root2array(path, treename='Events', branches=lumifeatures)
lumi = np.zeros(shape=(lumidata.shape[0], len(lumifeatures)))
for i in xrange(lumidata.shape[0]):
lumi[i, :] = np.array([ lumidata[i][j] for j in range(len(lumifeatures)) ])
lumi = pd.DataFrame(lumi, columns=names)
lumi['luminosityBlock_'] = lumi['luminosityBlock_'].astype('int64')
lumi['run_'] = lumi['run_'].astype('int64')
return lumi
def read_lumisection(path, features):
lumi = read_lumidata(path, features['per_lumisection'])
events = dict()
for category in features['per_event']:
fs = features['per_event'][category]['branches']
read_each = features['per_event'][category]['read_each']
batch_size = features['per_event'][category]['batch']
assert batch_size > read_each
assert batch_size % read_each == 0
events[category] = read_batch(path, treename='Events', leaves=fs,
batch_size=batch_size, each=read_each, test_leaf=0)
return lumi, events
# +
def get_percentile_paticles(event, n = 3, test_feature = 0):
sort_idx = np.argsort(event[:, test_feature])
event = event[sort_idx, :]
if event.shape[0] >= n:
### preserving the last event with maximal momentum (test_feature)
fetch_idx = [i * (event.shape[0] / n) for i in xrange(n-1)] + [event.shape[0] - 1]
return event[fetch_idx, :]
else:
missing = n - event.shape[0]
return np.vstack([
np.zeros(shape=(missing, event.shape[1])),
event
])
def integrate(event):
try:
pt = event[:, 0]
eta = event[:, 1]
phi = event[:, 2]
theta = 2.0 * np.arctan(np.exp(-eta))
px = np.sum(pt * np.cos(theta))
py = np.sum(pt * np.sin(theta) * np.cos(phi))
pz = np.sum(pt * np.sin(theta) * np.sin(phi))
return np.array([px, py, pz])
except:
return np.zeros(shape=3)
def process_channel(channel, branches, prefix = "", n = 3, test_feature=0):
selected = np.array([
get_percentile_paticles(event, n = n, test_feature = test_feature).flatten()
for event in channel
]).astype('float32')
total_momentum = np.array([
integrate(event[:3, :])
for event in channel
]).astype('float32')
branch_names = [ prefix + '_' + branch.split(".")[-1] for branch in branches ]
feature_names = [
"%s_q%d" % (branch, q + 1) for q in range(n) for branch in branch_names
] + [
prefix + '_' + "P%s" % component for component in list("xyz")
]
df = pd.DataFrame(np.hstack([selected, total_momentum]), columns = feature_names)
return df
# -
def process(data, features, lumidata, n = 3, test_feature = 0):
channels = list()
names = list(lumidata.columns)
for category in data:
d = process_channel(data[category], features[category]['branches'],
prefix = category, n = n, test_feature = test_feature)
names += list(d.columns)
channels.append(d)
df = pd.concat([lumidata] + channels, axis=1, names = names, ignore_index=True)
df.columns = names
return df
for cat in events:
plt.hist([ event.shape[0] for event in events[cat] ])
plt.title(cat)
plt.show()
c = process(events, features['per_event'], lumidata)
c.to_pickle("c.pickled")
c
# +
# %%time
lumidata, events = read_lumisection("root://eospublic.cern.ch//eos/opendata/cms/Run2010B/Photon/AOD/Apr21ReReco-v1/0000/041D347A-D271-E011-908B-0017A477003C.root", features)
# +
# %%time
#data = read_lumisection("../../minibias2.root", features)
# +
import cPickle
with open('minibias2.pickled', 'w') as f:
cPickle.dump(a, f)
# -
for category in a['events']:
plt.hist( [ x.shape[0] for x in a['events'][category] ], bins=20)
plt.title(category)
plt.show()
| data-extraction/CMS root to csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 1
# ! pip install pylint
# +
# %%writefile primeNumber.py
'''declaring primenumber function'''
def prime_number(num):
'''to check the given number is prime or not'''
for i in range(2,num):
if num%i==0:
break
else:
return num
# -
# !pylint prime_number.py
# +
import primeNumber
primeNumber.prime_number(17)
# +
# %%writefile testPrimeNumber.py
import unittest
import primeNumber
class testPrime(unittest.TestCase):
def testOne(self):
temp = 17
result = primeNumber.prime_number(temp)
self.assertEquals(result, 17)
def testTwo(self):
temp = 21
result = primeNumber.prime_number(temp)
self.assertEquals(result, 21)
if __name__ == "__main__":
unittest.main()
# -
# !python testPrimeNumber.py
# # Assignment 2
# +
#generator function
def armStrongGen(lst):
for item in lst:
sum=0
temp=item
while temp > 0:
sum = sum + ((temp%10)**3)
temp//=10
if sum == item:
yield item
# main
lst=list(range(1,1000))
print(list(armStrongGen(lst)))
| Day 9 Assignment/Day 9 Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Mock Spectra
#
# Attempting to make synthetic spectra look real.
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import urllib
# We will try to use `urllib` to pull synthetic spectra from the Phoenix model atmosphere server. This way, we can avoid storing large raw spectrum files and keep only the processed spectra. To make life easier, we should define a function to spit out a Phoenix spectrum file URL given a set of input parameters.
def phoenixFileURL(Teff, logg, FeH=0.0, aFe=0.0, brand='BT-Settl', solar_abund='CIFIST2011_2015'):
""" Create file name for a Phoenix synthetic spectrum """
if Teff % 100.0 != 0.0:
raise ValueError('Invalid temperature request for Phoenix server.')
if logg not in np.arange(-0.5, 5.6, 0.5):
raise ValueError('Invalid log(g) request for Phoenix server.')
url = 'https://phoenix.ens-lyon.fr/Grids/{:s}/{:s}/SPECTRA'.format(brand, solar_abund)
filename = 'lte{:05.1f}{:+4.1f}-{:3.1f}a{:+3.1f}.{:s}.spec.7.xz'.format(Teff/100.0, -1.0*logg, FeH, aFe, brand)
return url, filename
# Testing the Phoenix URL and file name resolver to ensure proper URL request in `urllib`.
phoenixFileURL(3000.0, 5.0)
# Now try requesting the file from the Phoenix server (note: need internet access)
addr, filename = phoenixFileURL(3000.0, 5.0)
urllib.urlretrieve('{0}/{1}'.format(addr, filename), filename)
spectrum = np.genfromtxt('spectra/{0}'.format(filename[:-3]), usecols=(0, 1))
# Great. So, now we have properly pulled the data to the local directory structure. However, there are several complications that need to get figured out.
# 1. Data must be saved to a temporary file.
# 2. Data must be unziped (unxz).
# 3. All instances of Fortran doubles must be converted from D exponentials to E.
#
# Now, we must trim the file as there is a significant amount of data that we don't need: everything below 3000 Å and above 4.0 microns.
spectrum = np.array([line for line in spectrum if 3000.0 <= line[0] <= 40000.0])
spectrum[:, 1] = 10.0**(spectrum[:, 1] - 8.0)
# Let's take a look at part of the raw spectrum, say the optical.
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.set_xlabel('Wavelength ($\\AA$)', fontsize=20.0)
ax.set_ylabel('Flux', fontsize=20.0)
ax.tick_params(which='major', axis='both', length=10., labelsize=16.)
ax.set_xlim(5000., 7000.)
ax.plot(spectrum[:,0], spectrum[:,1], '-', color='#800000')
# -
# Of course, this is too high of resolution to be passable as a real spectrum. Two things need to happen: we need to degrade the resolution and add noise. To degrade the resolution, we'll convolve the spectrum with a Gaussian kernel whose FWHM is equal to the desired spectral resolution.
# +
fwhm = 2.5 # R ~ 1000 at 5000 Å
domain = np.arange(-5.0*fwhm, 5.0*fwhm, 0.02) # note: must have same spacing as spectrum
window = np.exp(-0.5*(domain/fwhm)**2)/np.sqrt(2.0*np.pi*fwhm**2)
# visualize the window function (Kernel)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.set_xlabel('$\\Delta\\lambda$ ($\\AA$)', fontsize=20.0)
ax.set_ylabel('Window', fontsize=20.0)
ax.tick_params(which='major', axis='both', length=10., labelsize=16.)
ax.plot(domain, window, '-', lw=2, color='#1e90ff')
# -
# Finally, we convolve the Gaussian kernel with the original spectrum, being careful to preserve the shape of the original spectrum.
degraded = np.convolve(spectrum[:, 1], window, mode='same')
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.set_xlabel('Wavelength ($\\AA$)', fontsize=20.0)
ax.set_ylabel('Flux', fontsize=20.0)
ax.tick_params(which='major', axis='both', length=10., labelsize=16.)
ax.set_xlim(5000., 7000.)
ax.set_ylim(0.0, 0.5)
ax.plot(spectrum[:,0], degraded/1.e7, '-', lw=2, color='#800000')
# -
# For comparison we can load an SDSS template of and M3 star, which is presumably warmer than the spectrum created here.
sdss_template = np.genfromtxt('../../../Projects/BlindSpot/spectra/tmp/SDSS_DR2_M3.template', usecols=(0, 1))
# +
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.set_xlabel('Wavelength ($\\AA$)', fontsize=20.0)
ax.set_ylabel('Flux', fontsize=20.0)
ax.tick_params(which='major', axis='both', length=10., labelsize=16.)
ax.set_xlim(5000., 7000.)
ax.set_ylim(0., 10.)
ax.plot(sdss_template[:, 0], sdss_template[:, 1]/10. + 3.0, '-', lw=2, color='#444444')
ax.plot(spectrum[:,0], degraded/1.e6, '-', lw=2, color='#800000')
# -
#
| Projects/BlindSpot/synthetic_spectra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Capstone Project: Create a Customer Segmentation Report for Arvato Financial Services
#
# In this project, you will analyze demographics data for customers of a mail-order sales company in Germany, comparing it against demographics information for the general population. You'll use unsupervised learning techniques to perform customer segmentation, identifying the parts of the population that best describe the core customer base of the company. Then, you'll apply what you've learned on a third dataset with demographics information for targets of a marketing campaign for the company, and use a model to predict which individuals are most likely to convert into becoming customers for the company. The data that you will use has been provided by our partners at Bertelsmann Arvato Analytics, and represents a real-life data science task.
#
# If you completed the first term of this program, you will be familiar with the first part of this project, from the unsupervised learning project. The versions of those two datasets used in this project will include many more features and has not been pre-cleaned. You are also free to choose whatever approach you'd like to analyzing the data rather than follow pre-determined steps. In your work on this project, make sure that you carefully document your steps and decisions, since your main deliverable for this project will be a blog post reporting your findings.
# ### Importing Libraries
# +
# import libraries here; add more as necessary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#imports to help me plot my venn diagrams
import matplotlib_venn as venn2
from matplotlib_venn import venn2
from pylab import rcParams
# import the util.py file where I define my functions
from utils import *
# sklearn
from sklearn.preprocessing import StandardScaler, Imputer, RobustScaler, MinMaxScaler, OneHotEncoder
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import confusion_matrix,precision_recall_fscore_support
from sklearn.utils.multiclass import unique_labels
from sklearn.linear_model import LinearRegression
# magic word for producing visualizations in notebook
# %matplotlib inline
# -
# ## Part 0: Get to Know the Data
#
# There are four data files associated with this project:
#
# - `Udacity_AZDIAS_052018.csv`: Demographics data for the general population of Germany; 891 211 persons (rows) x 366 features (columns).
# - `Udacity_CUSTOMERS_052018.csv`: Demographics data for customers of a mail-order company; 191 652 persons (rows) x 369 features (columns).
# - `Udacity_MAILOUT_052018_TRAIN.csv`: Demographics data for individuals who were targets of a marketing campaign; 42 982 persons (rows) x 367 (columns).
# - `Udacity_MAILOUT_052018_TEST.csv`: Demographics data for individuals who were targets of a marketing campaign; 42 833 persons (rows) x 366 (columns).
#
# Each row of the demographics files represents a single person, but also includes information outside of individuals, including information about their household, building, and neighborhood. Use the information from the first two files to figure out how customers ("CUSTOMERS") are similar to or differ from the general population at large ("AZDIAS"), then use your analysis to make predictions on the other two files ("MAILOUT"), predicting which recipients are most likely to become a customer for the mail-order company.
#
# The "CUSTOMERS" file contains three extra columns ('CUSTOMER_GROUP', 'ONLINE_PURCHASE', and 'PRODUCT_GROUP'), which provide broad information about the customers depicted in the file. The original "MAILOUT" file included one additional column, "RESPONSE", which indicated whether or not each recipient became a customer of the company. For the "TRAIN" subset, this column has been retained, but in the "TEST" subset it has been removed; it is against that withheld column that your final predictions will be assessed in the Kaggle competition.
#
# Otherwise, all of the remaining columns are the same between the three data files. For more information about the columns depicted in the files, you can refer to two Excel spreadsheets provided in the workspace. [One of them](./DIAS Information Levels - Attributes 2017.xlsx) is a top-level list of attributes and descriptions, organized by informational category. [The other](./DIAS Attributes - Values 2017.xlsx) is a detailed mapping of data values for each feature in alphabetical order.
#
# In the below cell, we've provided some initial code to load in the first two datasets. Note for all of the `.csv` data files in this project that they're semicolon (`;`) delimited, so an additional argument in the [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) call has been included to read in the data properly. Also, considering the size of the datasets, it may take some time for them to load completely.
#
# You'll notice when the data is loaded in that a warning message will immediately pop up. Before you really start digging into the modeling and analysis, you're going to need to perform some cleaning. Take some time to browse the structure of the data and look over the informational spreadsheets to understand the data values. Make some decisions on which features to keep, which features to drop, and if any revisions need to be made on data formats. It'll be a good idea to create a function with pre-processing steps, since you'll need to clean all of the datasets before you work with them.
# load in the data
'''
There are 2 warnings when we read in the datasets:
DtypeWarning: Columns (19,20) have mixed types. Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
This warning happens when pandas attempts to guess datatypes on particular columns, I will address this on
the pre-processing steps
'''
azdias = pd.read_csv(r"C:\Users\sousa\Desktop\github\Arvato\data\azdias.csv")
customers = pd.read_csv(r"C:\Users\sousa\Desktop\github\Arvato\data\customers.csv")
attributes = pd.read_csv(r"C:\Users\sousa\Desktop\github\Arvato\data\features.csv")
# I will now check what is the problem with the columns 19 and 20
# getting the name of these columns
print(azdias.iloc[:,19:21].columns)
print(customers.iloc[:,19:21].columns)
# checking the unique values in these columns for possible issues
print(azdias.CAMEO_DEUG_2015.unique())
print(azdias.CAMEO_INTL_2015.unique())
print(customers.CAMEO_DEUG_2015.unique())
print(customers.CAMEO_INTL_2015.unique())
# It seems like the mixed type issue comes from that X that appears in these columns.
# There are ints, floats and strings all in the mix
cols = ['CAMEO_DEUG_2015', 'CAMEO_INTL_2015']
azdias = mixed_type_fixer(azdias, cols)
customers = mixed_type_fixer(customers, cols)
# #### Checking if values were fixed
# #### Change this cell to code if you want to perform the checks
#
# azdias.CAMEO_DEUG_2015.unique()
# customers.CAMEO_INTL_2015.unique()
# Considering the appearance of these mixed type data entries I created a function to check the dtype of the different attributes
#
# This might be useful in case some attributes have too many category values, which might fragment the data clustering too much.
#doing a quick check of categorical features and see if some are too granular to be maintained
cat_check = categorical_checker(azdias, attributes)
customers.AKT_DAT_KL.unique()
# Based on the categorical info it might be a good idea do drop CAMEO_DEU_2015 column, it is far too fragmented with 45 different category values, this is an idea to revisit after testing the models
# There is an extra column called Unnamed that seems like an index duplication, I will now drop it
#dropping unnamed column
azdias = azdias.drop(azdias.columns[0], axis = 1)
customers = customers.drop(customers.columns[0], axis = 1)
# We also have 3 columns that are different between azdias and customers:
#
# 'CUSTOMER_GROUP', 'ONLINE_PURCHASE', and 'PRODUCT_GROUP'
#
# I will drop those to harmonize the 2 datasets
customers = customers.drop(['CUSTOMER_GROUP', 'ONLINE_PURCHASE', 'PRODUCT_GROUP'], inplace=False, axis=1)
# #### I will now check overal shapes of the datasets
# #### Azdias Shape
# +
# checking how the azdias dataframe looks like
print('Printing dataframe shape')
print(azdias.shape)
print('________________________________________________________')
azdias.head()
# -
# #### Customers Shape
# +
# checking how the customer dataframe looks like
print('Printing dataframe shape')
print(customers.shape)
print('________________________________________________________')
customers.head()
# -
# #### Attributes shape
# Check the summary csv file
print(attributes.shape)
attributes.head()
# ### On the dataframe shapes:
#
# #### For now it is noted that the 2 initial working dataframes are harmonized in terms of number of columns:
# #### azdias: (891221, 366)
# #### customers: (191652, 366)
# #### attributes: (332, 5)
#saving the unique attribute names to lists
attributes_list = attributes.attribute.unique().tolist()
azdias_list = list(azdias.columns)
customers_list = list(customers.columns)
# +
#establishing uniqueness of the attributes accross the datasets in work
common_to_all = (set(attributes_list) & set(azdias_list) & set(customers_list))
unique_to_azdias = (set(azdias_list) - set(attributes_list) - set(customers_list))
unique_to_customers = (set(customers_list) - set(attributes_list) - set(azdias_list))
unique_to_attributes = (set(attributes_list) - set(customers_list) - set(azdias_list))
unique_to_attributes_vs_azdias = (set(attributes_list) - set(azdias_list))
unique_to_azdias_vs_attributes = (set(attributes_list) - set(azdias_list))
common_azdias_attributes = (set(azdias_list) & set(attributes_list))
print("No of items common to all 3 daframes: " + str(len(common_to_all)))
print("No of items exclusive to azdias: " + str(len(unique_to_azdias)))
print("No of items exclusive to customers: " + str(len(unique_to_customers)))
print("No of items exclusive to attributes: " + str(len(unique_to_attributes)))
print("No of items overlapping between azdias and attributes: " + str(len(common_azdias_attributes)))
print("No of items exclusive to attributes vs azdias: " + str(len(unique_to_attributes_vs_azdias)))
print("No of items exclusive to azdias vs attributes: " + str(len(unique_to_azdias_vs_attributes)))
# +
rcParams['figure.figsize'] = 8, 8
ax = plt.axes()
ax.set_facecolor('lightgrey')
v = venn2([len(azdias_list), len(attributes_list), len(common_azdias_attributes)],
set_labels=('Azdias', 'Attributes'),
set_colors = ['cyan', 'grey']);
plt.title("Attribute presence on Azdias vs DIAS Attributes ")
plt.show()
# -
# ### From this little exploration we got quite a little bit of information:
# #### - There are 3 extra features in the customers dataset, it corresponds to the columns 'CUSTOMER_GROUP', 'ONLINE_PURCHASE', and 'PRODUCT_GROUP'
#
# #### - All the datasets share 327 features between them
#
# #### - The attributes file has 5 columns corresponding to feature information that does not exist in the other datasets
# ## Preprocessing
# ### Now that I have a birds-eye view of the data I will proceed with cleaning and handling missing calues, re-encode features (since the first portion of this project will involve unsupervised learning), perform some feature enginnering and scaling.
# ### Assessing missing data and replacing it with nan
# ### Before dealing with the missing and unknown data I will save a copy of the dataframes for the purpose of visualizing how much improvement was achieved
#making dataframes copies pre-cleanup
azdias_pre_cleanup = azdias.copy()
customers_pre_cleanup = customers.copy()
azdias_pre_cleanup['AKT_DAT_KL'].isnull().sum()*100/len(azdias_pre_cleanup['AKT_DAT_KL'])
# I am using feat_fixer to use the information in the attributes dataframe to fill the information
# regarding missing and unknown values
azdias = feat_fixer(azdias, attributes)
customers = feat_fixer(customers, attributes)
# ### Since the net step involves dropping columns missing data over a threshold it is important to check if there is a column match between azdias and customers before and after the cleanup process
#
# ### There is a chance that some columns are missing too much data in one dataframe and being dropped while they are abundant in the other, causing a discrepancy in the shape between the 2 dataframes
# #### It is always hard to define a threshold on how much missing data is too much, my first approach will consider over 30% too much
# #### Based on model performance this is an idea to revisit and adjust
balance_checker(azdias, customers)
# #### Prior to cleanup customers and azdias match
# +
percent_missing_azdias_df = percentage_of_missing(azdias)
percent_missing_azdias_pc_df = percentage_of_missing(azdias_pre_cleanup)
percent_missing_customers_df = percentage_of_missing(customers)
percent_missing_customers_pc_df = percentage_of_missing(customers_pre_cleanup)
# +
print('Identified missing data in Azdias: ')
print('Pre-cleanup: ' + str(azdias_pre_cleanup.isnull().sum().sum()) + ' Post_cleanup: ' + str(azdias.isnull().sum().sum()))
print('Identified missing data in Customers: ')
print('Pre-cleanup: ' + str(customers_pre_cleanup.isnull().sum().sum()) + ' Post_cleanup: ' + str(customers.isnull().sum().sum()))
# +
print('Azdias columns not missing values(percentage):')
print('Pre-cleanup: ', (percent_missing_azdias_df['percent_missing'] == 0.0).sum())
print('Post-cleanup: ', (percent_missing_azdias_pc_df['percent_missing'] == 0.0).sum())
print('Customers columns not missing values(percentage):')
print('Pre-cleanup: ', (percent_missing_customers_df['percent_missing'] == 0.0).sum())
print('Post-cleanup: ', (percent_missing_customers_pc_df['percent_missing'] == 0.0).sum())
# -
# #### Deciding on what data to maintain based on the percentage missing
# +
# missing more or less than 30% of the data
azdias_missing_over_30 = split_on_percentage(percent_missing_azdias_df, 30, '>')
azdias_missing_less_30 = split_on_percentage(percent_missing_azdias_df, 30, '<=')
customers_missing_over_30 = split_on_percentage(percent_missing_customers_df, 30, '>')
customers_missing_less_30 = split_on_percentage(percent_missing_customers_df, 30, '<=')
# +
#plotting select features and their missing data percentages
figure, axes = plt.subplots(4, 1, figsize = (15,15), squeeze = False)
azdias_missing_over_30.sort_values(by = 'percent_missing', ascending = False).plot(kind = 'bar', x = 'column_name', y = 'percent_missing',
ax = axes[0][0], color = 'red', title = 'Azdias percentage of missing values over 30%' )
#due to the sheer amount of data points to be plotted this does not make an appealing vis so I will restrict
#the number of plotted points to 40
azdias_missing_less_30.sort_values(by = 'percent_missing', ascending = False)[:40].plot(kind = 'bar', x = 'column_name', y = 'percent_missing',
ax = axes[1][0], title = 'Azdias percentage of missing values less 30%' )
customers_missing_over_30.sort_values(by = 'percent_missing', ascending = False).plot(kind = 'bar', x = 'column_name', y = 'percent_missing',
ax = axes[2][0], color = 'red', title = 'Customers percentage of missing values over 30%' )
#due to the sheer amount of data points to be plotted this does not make an appealing vis so I will restrict
#the number of plotted points to 40
customers_missing_less_30.sort_values(by = 'percent_missing', ascending = False)[:40].plot(kind = 'bar', x = 'column_name', y = 'percent_missing',
ax = axes[3][0], title = 'Customers percentage of missing values less 30%' )
plt.tight_layout()
plt.show()
# -
azdias['AKT_DAT_KL'].isnull().sum()*100/len(azdias['AKT_DAT_KL'])
# ### The vast majority of the columns with missing values have a percent of missing under 30%
# ### Based on this information I will remove columns with more than 30% missing values
# +
#extracting column names with more than 30% values missing so we can drop them from azdias df
azdias_col_delete = columns_to_delete(azdias_missing_over_30)
#extracting column names with more than 30% values missing so we can drop them from customers df
customers_col_delete = columns_to_delete(customers_missing_over_30)
# +
#dropping the columns identified in the previous lists
azdias = azdias.drop(azdias_col_delete, axis = 1)
customers = customers.drop(customers_col_delete, axis = 1)
# -
# ### Now that we dropped columns missing more than 30% of their data let's check if we should also drop rows based on a particular threshold
#plotting distribution of null values
row_hist(azdias, customers, 30)
# #### Based on this visualization we deduct 2 things
# ##### - most of the rows are missing the information over less than 50 columns
# ##### - both customer and azdias have probably overlapping rows in which they are missing info corresponding to over 200 columns
#deleting rows based on the information acquired in the previous histogram
azdias = row_dropper(azdias, 50)
customers = row_dropper(customers, 50)
#plotting null values distribution after cleanup
row_hist(azdias, customers, 30)
balance_checker(azdias, customers)
azdias.shape
customers.shape
# Based on this information the azdias df has a few columns extra when compared to customers:
# - 'KBA13_SEG_WOHNMOBILE', 'ORTSGR_KLS9', 'KBA13_SEG_SPORTWAGEN', 'KBA13_SEG_OBERKLASSE'
# - These colummns refer to information on the type of car individuals own
#
# The customers dataframe has a column not present in azdias:
# - 'AKT_DAT_KL'
#
# So to finalize this step I will drop these columns
azdias = azdias.drop(['KBA13_SEG_WOHNMOBILE', 'ORTSGR_KLS9', 'KBA13_SEG_SPORTWAGEN', 'KBA13_SEG_OBERKLASSE'], inplace=False, axis=1)
customers = customers.drop(['AKT_DAT_KL'], inplace=False, axis=1)
balance_checker(azdias, customers)
# ## Feature Encoding
#
# ### Like I previously checked using the categorical_checker there are many features in need of re-encoding for the unsupervised learning portion
#
# - numerical features will be kept as is
# - ordinal features will be kept as is
# - categorical features and mixed type features will have to be re-encoded
#checking for mixed type features
attributes[attributes.type == 'mixed']
#retrieve a list of categorical features for future encoding
cats = attributes[attributes.type == 'categorical']
list(cats['attribute'])
# #### At this point I already dealt with the CAMEO_INTL_2015 column by converting XX to nan
#
# #### PRAEGENDE_JUGENDJAHRE has 3 dimentions: generation decade, if people are mainstream or avant-garde and if they are from east or west, I will create new features out of this particular column
#
# #### LP_LEBENSPHASE_GROB seems to encode the same information as the CAMEO column and it is divided between gross(grob) and fine (fein)
azdias = special_feature_handler(azdias)
customers = special_feature_handler(customers)
azdias.TITEL_KZ.unique()
azdias.select_dtypes('object').head()
# ## Feature engineering
# #### Based on the previous exploration there are a few features that are good candidates for novel feature creation
azdias_eng = azdias.copy()
customers_eng = customers.copy()
feat_eng(azdias_eng)
feat_eng(customers_eng)
azdias_eng.shape
customers_eng.shape
azdias.TITEL_KZ.unique()
# #### Now that I am done with creating new features and dealing with the most obvious columns I need to encode the remaining categorical features
# #### Considering this post: https://stats.stackexchange.com/questions/224051/one-hot-vs-dummy-encoding-in-scikit-learn there are advantages and drawbacks with chosing one-hot-encoding vs dummy encoding.
# #### There are also concerns regarding using dummies all together https://towardsdatascience.com/one-hot-encoding-is-making-your-tree-based-ensembles-worse-heres-why-d64b282b5769 so I will keep this in mind while moving forward
# #### For now I will go with dummy creation
# +
#finally I will encode all the features that are left
cat_features = ['AGER_TYP','ANREDE_KZ','CAMEO_DEU_2015','CAMEO_DEUG_2015','CJT_GESAMTTYP','D19_BANKEN_DATUM','D19_BANKEN_OFFLINE_DATUM',
'D19_BANKEN_ONLINE_DATUM','D19_GESAMT_DATUM','D19_GESAMT_OFFLINE_DATUM','D19_GESAMT_ONLINE_DATUM','D19_KONSUMTYP',
'D19_TELKO_DATUM','D19_TELKO_OFFLINE_DATUM','D19_TELKO_ONLINE_DATUM','D19_VERSAND_DATUM','D19_VERSAND_OFFLINE_DATUM','D19_VERSAND_ONLINE_DATUM',
'D19_VERSI_DATUM','D19_VERSI_OFFLINE_DATUM','D19_VERSI_ONLINE_DATUM','FINANZTYP','GEBAEUDETYP',
'GFK_URLAUBERTYP','GREEN_AVANTGARDE','KBA05_BAUMAX','LP_FAMILIE_FEIN',
'LP_FAMILIE_GROB','LP_STATUS_FEIN','LP_STATUS_GROB','NATIONALITAET_KZ','OST_WEST_KZ','PLZ8_BAUMAX',
'SHOPPER_TYP','SOHO_KZ','TITEL_KZ','VERS_TYP','ZABEOTYP']
azdias_ohe = pd.get_dummies(azdias_eng, columns = cat_features)
customers_ohe = pd.get_dummies(customers_eng, columns = cat_features)
# -
azdias_ohe.shape
customers_ohe.shape
balance_checker(azdias_ohe, customers_ohe)
# ## Feature scaling
# ### Before moving on to dimentionality reduction I need to apply feature scaling, this way principal component vectors won't be affected by the variation that naturally occurs in the data
#dataframes using StandardScaler
azdias_SS = feature_scaling(azdias_ohe, 'StandardScaler')
customers_SS = feature_scaling(customers_ohe, 'StandardScaler')
#dataframes using RobustScaler
azdias_RS = feature_scaling(azdias_ohe, 'RobustScaler')
customers_RS = feature_scaling(customers_ohe, 'RobustScaler')
#dataframes using MinMaxScaler
azdias_MMS = feature_scaling(azdias_ohe, 'MinMaxScaler')
customers_MMS = feature_scaling(customers_ohe, 'MinMaxScaler')
# ## Dimensionality Reduction
# ### Finally I will use PCA (linear technique) to select only the features that seem to be more impactfull
# +
components_list_azdias = azdias_SS.columns.values
n_components_azdias = len(components_list_azdias)
components_list_customers = customers_SS.columns.values
n_components_customers = len(components_list_customers)
azdias_SS_pca = pca_model(azdias_SS, n_components_azdias)
customers_SS_pca = pca_model(customers_SS, n_components_customers)
azdias_RS_pca = pca_model(azdias_RS, n_components_azdias)
customers_RS_pca = pca_model(customers_RS, n_components_customers)
azdias_MMS_pca = pca_model(azdias_MMS, n_components_azdias)
customers_MMS_pca = pca_model(customers_MMS, n_components_customers)
# -
scree_plots(azdias_SS_pca, azdias_RS_pca, azdias_MMS_pca, ' azdias')
scree_plots(customers_SS_pca, customers_RS_pca, customers_MMS_pca, ' customers')
# Each principal component is a directional vector pointing to the highest variance. The greater the distance from 0 the more the vector points to a feature.
first_dimension = interpret_pca(azdias_SS, n_components_azdias, 1)
first_dimension
display_interesting_features(azdias_SS, azdias_SS_pca, 0)
display_interesting_features(azdias_RS, azdias_RS_pca, 0)
display_interesting_features(azdias_MMS, azdias_MMS_pca, 0)
# #### On this first dimension most of the information seems to be related to household size, purchase power and types of purchases
# #### Based on these plots:
# - using standard scaler with 300 principal components 90% of the original variance can be represented
# - using robust scaler with about 150 components we represent 90% of the original variance
# - using minmax scaler with 250 components we represent 90% of the original variance
#
# #### Moving on I will pick the robust scaler PCA and I will re-fit with with a number of components that explains over 80% of the explained variance
azdias_pca_refit = pca_model(azdias_RS, 110)
explained_variance = azdias_pca_refit.explained_variance_ratio_.sum()
explained_variance
# ## Part 1: Customer Segmentation Report
#
# The main bulk of your analysis will come in this part of the project. Here, you should use unsupervised learning techniques to describe the relationship between the demographics of the company's existing customers and the general population of Germany. By the end of this part, you should be able to describe parts of the general population that are more likely to be part of the mail-order company's main customer base, and which parts of the general population are less so.
# ### After a lot of data Pre-Processing we are fibally getting to the analysis, I will start by attempting KMeans to find relevant clusters
# #### Now that I have reduced the number of components to use, it is important to select the number of clusters to aim at for kmeans
pca = PCA(110)
azdias_pca_110 = pca.fit_transform(azdias_RS)
def fit_kmeans(data, centers):
'''
returns the kmeans score regarding SSE for points to centers
INPUT:
data - the dataset you want to fit kmeans to
center - the number of centers you want (the k value)
OUTPUT:
score - the SSE score for the kmeans model fit to the data
'''
kmeans = KMeans(n_clusters = center)
model = kmeans.fit(data)
# SSE score for kmeans model
score = np.abs(model.score(data))
return score
# The elbow method (https://bl.ocks.org/rpgove/0060ff3b656618e9136b) is a way to validate the optimal number of clusters to use for a particular dataset.
# It can take some time training the dataset, optimising for the optimal n of clusters means that less resources are used.
scores = []
centers = list(range(1,20))
for center in centers:
print('score appended')
scores.append(fit_kmeans(azdias_pca_110, center))
# +
# Investigate the change in within-cluster distance across number of clusters.
# Plot the original data with clusters
plt.plot(centers, scores, linestyle='--', marker='o', color='b')
plt.ylabel('SSE score')
plt.xlabel('K')
plt.title('SSE vs K')
#Using a regression to determine where it is a good cluster number to divide the population (when the gradient decreases)
l_reg = LinearRegression()
l_reg.fit(X=np.asarray([[9,10,11,12,13,14]]).reshape(6,1), y=scores[8:14])
predicted =l_reg.predict(np.asarray(range(2,9)).reshape(-1,1))
plt.plot(list(range(2,20)),np.asarray(list(predicted.reshape(-1,1)) + list(scores[8:20])),'r')
# -
# Based on the plot 9 clusters should be enough to proceed with the kmeans training
# refitting using just 9 clusters
kmeans = KMeans(9)
kmodel = kmeans.fit(azdias_pca_110)
# +
#and now we can compare the customer data to the general demographics
# -
customers_RS.shape
azdias_RS.shape
azdias_kmeans = kmodel.predict(pca.transform(azdias_RS))
customers_kmeans = kmodel.predict(pca.transform(customers_RS))
# ## Part 2: Supervised Learning Model
#
# Now that you've found which parts of the population are more likely to be customers of the mail-order company, it's time to build a prediction model. Each of the rows in the "MAILOUT" data files represents an individual that was targeted for a mailout campaign. Ideally, we should be able to use the demographic information from each individual to decide whether or not it will be worth it to include that person in the campaign.
#
# The "MAILOUT" data has been split into two approximately equal parts, each with almost 43 000 data rows. In this part, you can verify your model with the "TRAIN" partition, which includes a column, "RESPONSE", that states whether or not a person became a customer of the company following the campaign. In the next part, you'll need to create predictions on the "TEST" partition, where the "RESPONSE" column has been withheld.
mailout_train = pd.read_csv('../../data/Term2/capstone/arvato_data/Udacity_MAILOUT_052018_TRAIN.csv', sep=';')
# ## Part 3: Kaggle Competition
#
# Now that you've created a model to predict which individuals are most likely to respond to a mailout campaign, it's time to test that model in competition through Kaggle. If you click on the link [here](http://www.kaggle.com/t/21e6d45d4c574c7fa2d868f0e8c83140), you'll be taken to the competition page where, if you have a Kaggle account, you can enter. If you're one of the top performers, you may have the chance to be contacted by a hiring manager from Arvato or Bertelsmann for an interview!
#
# Your entry to the competition should be a CSV file with two columns. The first column should be a copy of "LNR", which acts as an ID number for each individual in the "TEST" partition. The second column, "RESPONSE", should be some measure of how likely each individual became a customer – this might not be a straightforward probability. As you should have found in Part 2, there is a large output class imbalance, where most individuals did not respond to the mailout. Thus, predicting individual classes and using accuracy does not seem to be an appropriate performance evaluation method. Instead, the competition will be using AUC to evaluate performance. The exact values of the "RESPONSE" column do not matter as much: only that the higher values try to capture as many of the actual customers as possible, early in the ROC curve sweep.
mailout_test = pd.read_csv('../../data/Term2/capstone/arvato_data/Udacity_MAILOUT_052018_TEST.csv', sep=';')
| .ipynb_checkpoints/Arvato Project Workbook-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python_defaultSpec_1600820451014
# ---
# # Introduction to Programming
#
# Topics for today will include:
# - Hackerrank
# - Reviewing Our Tools
# - Importance of Logic
# - Learning About Our Strengths
# - Interpreted vs Compiled
# - Keeping Our Code Dry
# - Learning About Dictionaries
# - Back To The Terminal
#
#
#
#
#
# ## [Codecademy](https://www.codecademy.com/catalog/language/bash)
# ---
# Codecademy is an amazing resource that allows new and seasoned developers take courses. Formerly a primarily free platform there is some paid content out there. Some of which is still free!
# ## [Markdown](https://www.markdownguide.org)
# ---
# Markdown is an industry standard way to beautifully document our work. This is going to be helpful for you lab later.
# ## [HackerRank](https://www.hackerrank.com)
# ---
# We're going to continue our discussion on useful resources with HackerRank. HackerRank is a site that has practice problems revolving around a bunch of languages. This similar to last week isn't something that's required to play around with but may prove useful in the future. This can be used to improve your understanding of logic or possibly learn new things and get badges on the site. Companies often use sites like this for interviews so getting used to answering questions in a medium such as this one is a worth while endeavor for most. Practice problems also and IBM use this to interview people. Get insight into the ways that questions might be asked or companies may want you to think.
# ## Learning About Our Strengths
# ---
# So, we have talked a little about why we chose Python as our language but we want to build on that. How can we be sure that we're using the language properly? What what it intended to do? How do others percieve Python?
#
# These are questions that often need to be asked to determine what our approach towards things should be. As someone new to programming it should be used as a basis for when you venture into other languages.
#
# ### Interpreted Language (Kinda)
# ---
# Python is a language that is mostly interpreted we'll touch on exactly what that means below. For the purposes of discussing the language it makes it more accessible. Not having to interface with a compiler and just going through an interpreter is often viewed as more developer friendly. Especially for newer developers.
#
# > Python is an interpreted, high-level and general-purpose programming language. -Wikipedia
#
# Although Wikipedia says this python has strayed from that definition. While it's still mostly interpreted that's become a little controversial as a topic. There are compiled python files that allow for quicker access to commonly used python files.
#
# Meaning that it's kinda both!
#
# 
#
# ### Simple to Understand
# ---
# Python wants to be easy to understand as one of the basic tenets of the language. We want to be pythonic meaning that we want to be able to read Python as if it was english. It's important that we adhere to this because most nowadays are learning python as a first language or to explain and code up complex things. If the tools that we're using a complex thing are themselves complex then out solution is complex^2 and that will scare users.
#
# #### KISS (KEEP IT SIMPLE STUPID) and DOTS (DON'T OVER THINK SH!T)
#
# ### Flexible
# ---
# Python is use in several places and in many scenarios. From web development to data science Python is the primary chosen language due to it's flexibility. This is something that's easier to witness than to imagine. So we have an image to demonstrate.
#
# 
#
# ### Dynamic vs Static Typing
# ---
# Python is a Dynamically typed language. This often comes with being interpreted. All that this means for us is that the interpreter is responsible for determining the typing of all of the things that we make. From variables to functions and what they might return.
#
# In a static typed language like Java, which is also a complied language. We have to explicitly say what type of variables that we're declaring.
#
# Dynamic typing means the interpreter will figure out types.
#
# Static typing means that we declare the types upon initialization.
#
#
# ### Shell, PowerShell, Bash, and zsh (Bourne Again Shell)
# ---
# Shells are something that you can technically circumnavate for a long time in Computer Science. Though many lessons and different methods of doing things come through working in the shell. I wouldn't recommend straying away.
#
# Shells are a way to interact with your computer on an more interactive and heightened level. We can also run things like our python files without a text editor. In fact your text editor is just using your built in terminal.
#
# ### What's A Terminal Then?
# ---
# A terminal is the vessel that holds a shell. Some are more customizable, have better highlighting, better functionality, etc. than others. Here are the ones that I use for each platform
#
# #### Windows
# - [Windows Terminal](https://www.microsoft.com/en-us/p/windows-terminal/9n0dx20hk701?activetab=pivot:overviewtab)
# #### Mac
# - [ITerm2](https://iterm2.com)
#
# ### Command Line Driven Applications
# ---
# Python is used to write command line driven applications often. This is solely a product that is run from the command line using a shell. Command line applications are prevelant in places that we don't need a GUI.
#
# GUI (Graphical User Interface)
# - [Cement](https://builtoncement.com)
#
#
# ### Back End Processing
# ---
# So now after speaking about Command Line Driven applications we see that we can make apps that handle processing in the back end.
#
# Whether this is a website, a process to bring up and automate systems, a process to do calculus homework.
#
# We often don't need a GUI and it takes time to build out GUIs. For us we're more concerned about the logic in the back end.
#
# - [Django](https://www.djangoproject.com)
# - [Flask](https://flask.palletsprojects.com/en/1.1.x/)
# ## Compiled vs Interpreted
# ---
# Programming languages typically get converted into machine code using one of two methods. The code can either be interpreted and/or compiled. The and/or is important!
#
# ### Being Compiled
# ---
# Compliled languages are languages that are compiled and that process changes the code that you've written into machine code directly and ahead of time. These languages tend to be the lower level languages. These are usually faster and more efficent. These have a compile step that's required to be manually run whenever something changes.
#
# ### Being Interpreted
# ---
# Interpreted languages are languages that decide what's happening at run time. These languages are read in and exexuted by some program on the system. This means that things are changed into machine code in real time often. So if something were to change in real time you don't need to worry about the complilation step cause the program is going to deal with it as it comes up.
#
# ### What's a Compiler
# ---
# A compiler is just something that can directly translate the chosen language into machine code.
#
# ### Which One is Better?
# ---
# [You can decide!](https://www.freecodecamp.org/news/compiled-versus-interpreted-languages/)
#
# ### What is Python?
# ---
# Well it can be both...
# ## Keeping Our Code Dry
# ---
#
# ### What does this even mean???
# ---
# If we want code to be dry then it can be wet?
#
# #### WET (Write Every Time)
#
# Ok so what is dry?
#
# #### DRY (Don't Repeat Yourself)
# ### Is Our Code WET?
# ---
#
# So we look to see if we're repeating ourselves in our code and look to minimize and generalize where we can. Modularity is key here. Not only does it mean we have fewer lines of code. This is then easier to use. Easier to follow. Then finally easier to test
#
# So we should be looking for areas where blocks of code are similar and looking to make them into functions.
# ### How Do We DRY Off?
# ---
# - We look to reduce repeated code
# - We get rid of magic numbers
# - A magic number is a number just floating out there.
# - `age * 7` What is 7? Why are we multiplying age by it?
# - `DOG_YEARS_MULTIPLIER = 7` gives the number meaning.
# - When a variable is written in all caps that typically means that it's a constant.
# - Constants aren't supposed to be changed.
# - `age * DOG_YEARS_MULTIPLIER`
# - We use functions and classes whereever applicable
# ### The Importance of DRY Code
# ---
# Dry code is important for a ton of reasons.
#
# [Read this article about DRY code for more!](https://www.softwareyoga.com/is-your-code-dry-or-wet/)
# ## Learning About Dictionaries
# ---
# + tags=[]
dummy_dict = {
"key1" : "value4",
"key2" : "value3",
"key3" : "value2",
"key4" : "value1",
}
print(dummy_dict["key1"])
# -
# ## Back To The Terminal
# ---
#
| JupyterNotebooks/Lessons/Lesson 5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# necessary libraries for prediciton
import utils
import os
import pandas as pd
import numpy as np
import pprint
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NeighbourhoodCleaningRule
from imblearn.combine import SMOTEENN
from sklearn.tree import export_graphviz
from subprocess import call
from IPython.display import Image
# -
pd.set_option('display.max_columns', 200)
os.makedirs('images', exist_ok=True)
SEED = 42
# # Useful Functions
# +
def pretty_matrix(matrix, row_label, col_label):
"""Pretty print of the given matrix """
# Restraining labels that are too big
row_label = [el[:10] + '..' if len(el) > 10 else el
for el in row_label]
col_label = [el[:10] + '..' if len(el) > 10 else el
for el in col_label]
# Stringfying everything & Joining top label
s_matrix = [list([" "] + (col_label))] + \
[[row_label[row_idx]] + \
[str(e) for e in row] for row_idx, row in enumerate(matrix)]
# Length of each matrix column
len_s = [max(map(len, col)) for col in zip(*s_matrix)]
# Cell formatation
formatation = '\t'.join('{{:{}}}'.format(x) for x in len_s)
# Apply cell formation to each matrix element
pretty_mat = [formatation.format(*row) for row in s_matrix]
# Print Pretty Matrix
print('\n'.join(pretty_mat))
def display_confusion_matrix(values):
'''Display the given array as a confusion matrix'''
pretty_matrix([values[0:2], values[2:4]],
['Actual NO', 'Actual YES'],
['Predic NO', 'Predic YES'])
# -
def plot_roc_auc(fpr, tpr, roc_auc):
'''Plot the ROC-AUC curve'''
fig, ax = plt.subplots()
plt.title('Receiver Operating Characteristic (ROC)')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'w--')
plt.xlim([-0.02, 1.02])
plt.ylim([0, 1])
ax.fill_between(fpr, 0, tpr)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
def visualize_tree(classifier, name):
'''Visualize the given classifier,
by creating a .png with a given name.'''
dot_name = name + '.dot'
png_name = name + '.png'
export_graphviz(classifier,
out_file=dot_name,
feature_names = list(X.columns),
class_names = 'status',
rounded = True, proportion = False,
precision = 2, filled = True)
# Convert to png using system command (requires Graphviz installed)
call(['dot', '-Tpng', dot_name, '-o', png_name, '-Gdpi=600'])
call(['rm', dot_name])
# Display
Image('tree.png')
# # Necessary functions for the prediction process
# +
def normalize_columns(df, columns):
'''Normalize the given columns for range between [0, 1]'''
for col in columns:
col_min = df[col].min()
col_max = df[col].max()
df[col] = (df[col] - col_min)/\
(col_max - col_min)
def normalize_df(df):
'''Normalize all columns of the given df'''
normalize_columns(df, df.columns)
# -
def apply_PCA(df, variance_val=0.95, debug=True):
'''Apply the PCA algorithm to given dataframe,
using the given variance val to trim the df'''
# Necessary to normalize all data to use PCA
scaler=StandardScaler()
X_scaled=scaler.fit_transform(df)
# PCA - keep, by default mode, 90% variance
pca = PCA(variance_val)
pca.fit(X_scaled)
X_pca = pca.transform(X_scaled)
if debug:
ex_variance=np.var(X_pca,axis=0)
ex_variance_ratio = ex_variance/np.sum(ex_variance)
print(' > Impact in total variance of each generated feature by PCA:')
print(ex_variance_ratio)
principal_df = pd.DataFrame(data = X_pca, index = df.reset_index()['loan_id'])
return (principal_df, pca)
def apply_sampling(algorithm, oversample, undersample):
'''Apply sampling according to the control flags'''
# Applying sampling techniques
pipeline = Pipeline([
('classification', algorithm)
])
if oversample:
if undersample:
pipeline = Pipeline([
('sampling', SMOTEENN(random_state = SEED)),
('classification', algorithm)
])
else:
pipeline = Pipeline([
('sampling', SMOTE(random_state = SEED)),
('classification', algorithm)
])
elif undersample:
pipeline = Pipeline([
('sampling', NeighbourhoodCleaningRule(random_state = SEED)),
('classification', algorithm)
])
return pipeline
def auc_scorer(y_true, y_pred):
'''Scorer of Area Under Curve value'''
fpr, tpr, _ = metrics.roc_curve(y_true, y_pred)
return metrics.auc(fpr, tpr)
# # Prediction Algorithms
#
# * Logistic Regression
# * Decision Tree
# * Random Forest
# * Gradient Boosting
def create_LR():
'''Create a Logistic Regression model'''
return LogisticRegression(random_state=SEED)
def create_DT():
'''Create a new Decision Tree'''
return DecisionTreeClassifier(random_state=SEED)
def create_RF():
'''Create a new Ranfom Forest model'''
return RandomForestClassifier(random_state=SEED)
def create_GB():
'''Create a new Gradient Boosting model'''
return GradientBoostingClassifier(random_state=SEED)
# # Predictions
# +
# Useful Macros
K_FOLD_NUM_SPLITS = 5
USE_PCA = False
UNDERSAMPLE = False
OVERSAMPLE = True
# Pretty printer
pp = pprint.PrettyPrinter(indent=4)
# +
dataset = utils.read_csv_to_df('dataset/preprocessed_data.csv')
dataset = dataset.set_index('loan_id')
display(dataset)
# Normalizing dataset
print(' > Dataset after normalization')
normalize_df(dataset)
display(dataset)
# -
STATUS_COL = dataset.columns.get_loc("status")
# +
# Setting X and Y
X = dataset.iloc[:, 0:STATUS_COL]
y = dataset.iloc[:, [STATUS_COL]]
display(X.head())
if USE_PCA:
print(' > Applying PCA to X_train:')
X, pca = apply_PCA(X, debug=True)
display(X.head())
# -
# # Hyper Parameter Tunning
# ## Using GridSearch to obtain best parameters.
#
# > Considered using RandomSearch, but since the search is not that big, opted for GridSearch
def getLogisticRegressionBest(X, y, debug=True):
'''Get the Logistic Regression Hyper Parameters'''
# Maximum number of levels in tree
max_depth = [int(x) for x in range(2, 20, 4)]
max_depth.append(None)
# Create the random grid
grid = {'classification__penalty': ['l2', 'none'],
'classification__C': [0.01, 0.05, 0.1, 0.2, 0.5, 1.0],
'classification__solver': ['newton-cg', 'lbfgs', 'sag', 'saga'],
'classification__class_weight': ["balanced", None]}
if debug:
pp.pprint(grid)
# Applying sampling techniques
lr = apply_sampling(create_LR(), OVERSAMPLE, UNDERSAMPLE)
# Using the grid search for best hyperparameters
lr_grid = GridSearchCV(estimator = lr,
param_grid = grid,
scoring=metrics.make_scorer(auc_scorer,
greater_is_better=True),
cv=StratifiedKFold(K_FOLD_NUM_SPLITS,
random_state=SEED,
shuffle=True),
verbose=2,
n_jobs = -1)
# Fit the grid search model
lr_grid = lr_grid.fit(X, y)
if debug:
print('Best Score: ', lr_grid.best_score_)
print('Best Params: ', lr_grid.best_params_)
# Return score, method & params tuple
return (lr_grid.best_score_, 'Logistic Regression', lr_grid.best_params_)
def getDecisionTreeBest(X, y, debug=True):
'''Get the Decision Tree Hyper Parameters'''
# Maximum number of levels in tree
max_depth = [int(x) for x in range(2, 16, 2)]
max_depth.append(None)
# Create the random grid
grid = {'classification__criterion': ['gini', 'entropy'],
'classification__splitter': ['best'],
'classification__max_features': ['auto', 'sqrt'],
'classification__max_depth': max_depth,
'classification__min_samples_split': [2, 4, 6, 8],
'classification__min_samples_leaf': [1, 2, 4, 6],
'classification__min_impurity_split': [0.05, 0.1, 0.23, 0.3],
'classification__class_weight': ["balanced", None]}
if debug:
pp.pprint(grid)
# Applying sampling techniques
dt = apply_sampling(create_DT(), OVERSAMPLE, UNDERSAMPLE)
# Using the grid search for best hyperparameters
dt_grid = GridSearchCV(estimator = dt,
param_grid = grid,
scoring=metrics.make_scorer(auc_scorer,
greater_is_better=True),
cv=StratifiedKFold(K_FOLD_NUM_SPLITS,
random_state=SEED,
shuffle=True),
verbose=2,
n_jobs = -1)
# Fit the grid search model
dt_grid = dt_grid.fit(X, y)
if debug:
print('Best Score: ', dt_grid.best_score_)
print('Best Params: ', dt_grid.best_params_)
# Return score, method & params tuple
return (dt_grid.best_score_, 'Decision Tree', dt_grid.best_params_)
def getRandomForestBest(X, y, debug=True):
'''Get the Random Forest Hyper Parameters'''
# Maximum number of levels in tree
max_depth = [int(x) for x in range(2, 16, 4)]
max_depth.append(None)
# Create the random grid
grid = {'classification__n_estimators': [int(x) for x in range(2, 14, 2)],
'classification__max_features': ['auto', 'sqrt'],
'classification__max_depth': max_depth,
'classification__criterion': ['gini', 'entropy'],
'classification__min_samples_split': [2, 4, 6, 8],
'classification__min_samples_leaf': [1, 2, 4, 6],
'classification__class_weight': ["balanced", "balanced_subsample", None]}
if debug:
pp.pprint(grid)
# Applying sampling techniques
rf = apply_sampling(create_RF(), OVERSAMPLE, UNDERSAMPLE)
# Using the grid search for best hyperparameters
rf_grid = GridSearchCV(estimator = rf,
param_grid = grid,
scoring=metrics.make_scorer(auc_scorer,
greater_is_better=True),
cv=StratifiedKFold(K_FOLD_NUM_SPLITS,
random_state=SEED,
shuffle=True),
verbose=2,
n_jobs = -1)
# Fit the grid search model
rf_grid = rf_grid.fit(X, y)
if debug:
print('Best Score: ', rf_grid.best_score_)
print('Best Params: ', rf_grid.best_params_)
# Return score, method & params tuple
return (rf_grid.best_score_, 'Random Forest', rf_grid.best_params_)
def getGradientBoostBest(X, y, debug=True):
'''Get the Gradient Boost Hyper Parameters'''
# Create the grid parameters
grid = {'classification__n_estimators': [int(x) for x in range(2, 14, 2)],
'classification__learning_rate': [0.1, 0.3, 0.5, 0.7],
'classification__loss': ['deviance', 'exponential'],
'classification__criterion': ['friedman_mse', 'mse', 'mae'],
'classification__min_samples_split': [4, 6, 8],
'classification__min_samples_leaf': [2, 4, 6]}
if debug:
pp.pprint(grid)
# Applying sampling techniques
gb = apply_sampling(create_GB(), OVERSAMPLE, UNDERSAMPLE)
# Using the grid search for best hyperparameters
gb_grid = GridSearchCV(estimator = gb,
param_grid = grid,
scoring=metrics.make_scorer(auc_scorer,
greater_is_better=True),
cv=StratifiedKFold(K_FOLD_NUM_SPLITS,
random_state=SEED,
shuffle=True),
verbose=2,
n_jobs = -1)
# Fit the grid search model
gb_grid = gb_grid.fit(X, y)
if debug:
print('Best Score: ', gb_grid.best_score_)
print('Best Params: ', gb_grid.best_params_)
# Return score, method & params tuple
return (gb_grid.best_score_, 'Gradient Boosting', gb_grid.best_params_)
# +
# Getting the best algorithm
algorithms = [getLogisticRegressionBest(X, y),
getDecisionTreeBest(X, y),
getRandomForestBest(X, y),
getGradientBoostBest(X, y)]
algorithms.sort(reverse=True, key=lambda el: el[0])
for index, entry in enumerate(algorithms):
print('%i. %s - %f\n---------' % (index + 1, entry[1], entry[0]))
print('Best algorithm: %s' % algorithms[0][1])
# -
# ## Using method with higher score with our data
# +
# Cross validation settings
auc_scores = []
confusion_matrixes = []
cv = StratifiedKFold(n_splits=K_FOLD_NUM_SPLITS,
random_state=SEED,
shuffle=True)
# CHANGE THIS LINE TO CHANGE THE USED CLASSIFICATION METHOD
classifier = RandomForestClassifier(bootstrap=False,
class_weight='balanced',
criterion='gini',
max_depth=6,
max_features='auto',
min_samples_leaf=6,
min_samples_split=2,
n_estimators=10,
random_state=SEED)
# Applying sampling techniques
classifier = apply_sampling(classifier, OVERSAMPLE, UNDERSAMPLE)
# Applying Cross validation
for train_index, test_index in cv.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
# Training with this fold
classifier.fit(X_train, y_train)
# Testing & Measuring accuracy
y_pred = classifier.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
auc = metrics.auc(fpr, tpr)
auc_scores.append(auc)
confusion_matrixes.append(metrics.confusion_matrix(y_test, y_pred).ravel())
plot_roc_auc(fpr, tpr, auc)
# Visualization of classifier result - if DT or RF
algorithm = classifier.get_params().get('classification')
if isinstance(algorithm, DecisionTreeClassifier):
visualize_tree(algorithm, 'images/DT')
elif isinstance(algorithm, RandomForestClassifier):
for index, tree in enumerate(algorithm.estimators_):
visualize_tree(tree, 'images/RF_' + str(index))
# -
# Printing the obtained results
print('Classification Method used:', classifier, '\n')
print('AUC scores:', auc_scores)
print('> Average: ', sum(auc_scores)/len(auc_scores))
for cf in confusion_matrixes:
display_confusion_matrix(cf)
# ### After having our model trained we shall use the model on the data to be sumitted in the kaggle
test_dataset = utils.read_csv_to_df('dataset/test_dataset.csv')
test_dataset = test_dataset.set_index('loan_id')
normalize_df(test_dataset)
display(test_dataset.head())
# +
# We now remove the Y column with NaNs
test_dataset = test_dataset.iloc[:, 0:STATUS_COL]
display(test_dataset.head())
# +
final_df = pd.DataFrame()
if USE_PCA:
# Using train PCA and classifying
scaler=StandardScaler()
X_test_scaled=scaler.fit_transform(test_dataset)
predictions_df = pd.DataFrame(data = pca.transform(X_test_scaled),
index=test_dataset.reset_index()['loan_id'])
display(predictions_df)
predictions_df['Predicted'] = classifier.predict(predictions_df)
final_df = predictions_df.reset_index()\
[['loan_id', 'Predicted']]\
.rename(columns={
'loan_id': 'Id'
})
else:
final_df = test_dataset.copy()
final_df['Predicted'] = classifier.predict(final_df)
final_df = final_df.reset_index()\
[['loan_id', 'Predicted']]\
.rename(columns={
'loan_id': 'Id'
})\
.drop_duplicates()
final_df.loc[final_df["Predicted"]== 0.0, "Predicted"] = -1
display(final_df)
# -
# Outputting predictions to .csv
# CHANGE FILE NAME TO PRESERVE DIFFERENT INSTANCES
utils.write_df_to_csv(final_df, 'predictions', 'prediction.csv')
| project-competition/prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/SebastianMM-96/AnalyzeInternationalDebtStatistics/blob/main/fakeNewsModels/NLP/NLP_FakeNews.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Epq7b9nC3uB7"
# # Detección de noticias falsas utilizando aprendizaje supervisado y NLP
# #### Developed by: <NAME>
# ***
# + [markdown] id="5NDd5Jft4G7X"
# ### 1. Importando las librerías necesarias
# + id="gNUVNI2q3c8o"
# librerías básicas
import numpy as np
import pandas as pd
import itertools
import re
import string
import random
from textblob import TextBlob
# graficación
import matplotlib.pyplot as plt
# %matplotlib inline
from wordcloud import WordCloud
# importando la librería de nltk
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk import WordNetLemmatizer
# importando scikit-learn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import classification_report
# + [markdown] id="fN5diVgo4VsS"
# ### 2. Importando los conjuntos de datos
# + colab={"base_uri": "https://localhost:8080/"} id="4PLtvOYw4ULG" outputId="6d0795bc-8981-45dc-80a1-5ee5145729c3"
print('Importando el conjunto de datos...')
data = pd.read_csv('/content/data/fake_or_real_news.csv')
print('Listo')
# + [markdown] id="ZRAlzxnq4s9W"
# Inspeccionaremos el dataframe que tiene almacenado nuestro conjunto de datos
# + colab={"base_uri": "https://localhost:8080/"} id="DFOHdAPI4rfc" outputId="120bdf8c-8ad9-4e4e-fe93-32f82c05a8d7"
print('Forma del conjunto de datos: {}'.format(data.shape))
# + [markdown] id="a0F9NcZE43iK"
# Inspeccionaremos los elementos que contiene nuestro conjunto de datos
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="GTUZJsYX42Bh" outputId="9cd9839d-9415-4c45-e0b0-05ea35e4c6a7"
data.head(10)
# + [markdown] id="GePPv5aJ4_mn"
# Corregiremos nuestro conjunto de datos asignando el índice a la primera columna
# + colab={"base_uri": "https://localhost:8080/", "height": 450} id="OOGisqi54996" outputId="66ddc228-eb90-487d-f9d8-d5fe77bb4553"
data.set_index('Unnamed: 0')
# + [markdown] id="kdIaM1H55LuH"
# Ahora nuestro conjunto de datos se ve de esta forma:
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="JVV-vf7z5J5S" outputId="b4ff6a59-3cc9-49c5-ef27-cd90cbc4095e"
data.head(10)
# + [markdown] id="U6p6kIzl5U05"
# ### 3. Extración de los datos
# + [markdown] id="kn1Pozgm5ZiJ"
# Guardaremos dentro de una variable la etiqueta de cada una de las noticias
# + id="Lk0n_8T25R4p"
y = data.label
# + [markdown] id="GnCaFvi55ggO"
# Ahora que ya tenemos almacenada la columna de "*etiqueta*", podemos eliminar esta columna del dataframe
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="ssjbs4K15f6f" outputId="48636deb-4253-434b-ff52-ae34a3bac744"
data.drop('label', axis=1)
# + [markdown] id="GBbTCbOv59S6"
# Ahora comenzaremos a crear nuestros conjuntos de entrenamiento y de pruebas utilizando la biblioteca de scikit-learn
# + id="aCnLi24855P1"
X_train, X_test, y_train, y_test = train_test_split(data['text'], y, test_size=0.33, random_state=53)
# + [markdown] id="jIfACaKw6KqS"
# ### 4. Clasificación por vectores
# + [markdown] id="LhxVi8ZZ6S2R"
# #### 4.1 CountVectorizer
# + id="q8RaprC06bUe"
# Inicializamos countVectorizer
count_vec = CountVectorizer(stop_words='english')
# Ajuste y transformacion del conjunto de entrenamiento
count_train = count_vec.fit_transform(X_train)
# Transformacion del conjunto de prueba
count_test = count_vec.transform(X_test)
# + [markdown] id="npmgOEmt6W7X"
# #### 4.2 Tf-idf Vectorizer
# + id="fD-tc9Yw6Izq"
# Inicializar the tfidf vectorizer
tfidf_vec = TfidfVectorizer(stop_words='english', max_df=0.7)
# Ajustar y transformar el conjunto de entrenamiento
tfidf_train = tfidf_vec.fit_transform(X_train)
# Transformar el conjunto de pruebas
tfidf_test = tfidf_vec.transform(X_test)
# + [markdown] id="diKreopf6yxF"
# #### 4.3 Información obtenida
# + colab={"base_uri": "https://localhost:8080/"} id="lrHb1rLB6wzS" outputId="bbc1e065-f695-4cf8-c4d0-a0d06fe8411e"
print("Tf-idf: {}".format(tfidf_vec.get_feature_names()[-10:]))
# + colab={"base_uri": "https://localhost:8080/"} id="YuK0m_gK6-5u" outputId="46c79369-79a6-4159-cdcd-f25c67044e39"
print("CountVectorizer: {}".format(count_vec.get_feature_names()[-10:]))
# + [markdown] id="IACwydt37NYn"
# ### 5. Comparación de modelos: Count_Vectorizer vs Tf-idf Vectorizer usando MultiNomial Naïve Bayes
# + [markdown] id="wdk2sFfz7P08"
# A continuación definiremos una función para gráficar una Matriz de confusión. Esta función se encuentra disponible en el siguiente [link](https://scikit-learn.org/0.18/auto_examples/model_selection/plot_confusion_matrix.html)
# + id="vTnth2AG7EjO"
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# + [markdown] id="KiFmHXqn71Yi"
# #### 5.1 Tf-idf Vectorizer
# + id="ztTKj5Dz79fl"
# Definiremos el clasificador dentro de una variable
clf_tfidf = MultinomialNB()
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="ZCiDd9Zi79yQ" outputId="3017ad9f-8427-4c71-8fc1-e4f183b38a4f"
clf_tfidf.fit(tfidf_train, y_train)
pred_tfidf = clf_tfidf.predict(tfidf_test)
score_tfidf = accuracy_score(y_test, pred_tfidf)
# Obteniendo el puntaje de efectividad
print('Accuracy using Tf-idf: {}%'.format(round(score_tfidf*100,2)))
# Calcular la matriz de confusion
cm_tfidf = confusion_matrix(y_test, pred_tfidf, labels=['FAKE', 'REAL'])
# Graficar
plot_confusion_matrix(cm_tfidf, classes=['FAKE', 'REAL'])
# + colab={"base_uri": "https://localhost:8080/"} id="rsfBqiGy8mJC" outputId="cc087156-fd74-4bf5-f095-12eeae0da970"
# Metricas de rendimiento
print(classification_report(y_test, pred_tfidf))
# + [markdown] id="skmJ223N71hL"
# #### 5.2 Count Vectorizer
# + id="FYQbGVU670dk"
# Definiremos el clasificador dentro de una variable
clf_countVec = MultinomialNB()
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="is2xHw198aXP" outputId="efc44783-42ef-4987-9ae2-94526f60db00"
clf_countVec.fit(count_train, y_train)
pred_countVec = clf_countVec.predict(count_test)
score_countVec = accuracy_score(y_test, pred_countVec)
# Print the accuracy score
print('Accuracy using Tf-idf: {}%'.format(round(score_countVec*100,2)))
# Calculate the confusion matrix
cm_countVec = confusion_matrix(y_test, pred_countVec, labels=['FAKE', 'REAL'])
# Plot the confusion matrix
plot_confusion_matrix(cm_countVec, classes=['FAKE', 'REAL'])
# + colab={"base_uri": "https://localhost:8080/"} id="qA9lClG48fvL" outputId="abb1e402-0be9-4f47-a327-7679f3021cc8"
# Metricas de rendimiento
print(classification_report(y_test, pred_countVec))
# + [markdown] id="LgxJ_vbR9Gq3"
# ### 6. Passive Agressive Classifier
# + id="cGzew3UK8-O8"
| fakeNewsModels/NLP/NLP_FakeNews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="53c90f720c67f85516c2e9350017d9da94185435"
# # Code along 4
#
# ## Scale, Standardize, or Normalize with scikit-learn
# ### När ska man använda MinMaxScaler, RobustScaler, StandardScaler, och Normalizer
# ### Attribution: <NAME>
# + [markdown] _uuid="8174f53e7350d7f282636b7d5a94d8299c554552"
# ### Varför är det ofta nödvändigt att genomföra så kallad variable transformation/feature scaling det vill säga, standardisera, normalisera eller på andra sätt ändra skalan på data vid dataaalys?
#
# Som jag gått igenom på föreläsningen om data wrangling kan data behöva formateras (variable transformation) för att förbättra prestandan hos många algoritmer för dataanalys. En typ av formaterinng av data, som går att göra på många olika sätt, är så kallad skalning av attribut (feature scaling). Det kan finnas flera anledningar till att data kan behöv skalas, några exempel är:
#
# * Exempelvis neurala nätverk, regressionsalgoritmer och K-nearest neighbors fungerar inte lika bra om inte de attribut (features) som algoritmen använder befinner sig i relativt lika skalor.
#
# * Vissa av metoderna för att skala, standardisera och normalisera kan också minska den negativa påverkan outliers kan ha i vissa algoritmer.
#
# * Ibland är det också av vikt att ha data som är normalfördelat (standardiserat)
#
# *Med skala menas inte den skala som hänsyftas på exempelvis kartor där det brukar anges att skalan är 1:50 000 vilket tolkas som att varje avstånd på kartan är 50 000 ggr kortare än i verkligheten.*
#
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
#Importerar de bibliotek vi behöver
import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
#Denna kod sätter upp hur matplotlib ska visa grafer och plotar
# %matplotlib inline
matplotlib.style.use('ggplot')
#Generera lite input
#(den som är extremt intresserad kan läsa följande, intressanta och roliga förklaring kring varför random.seed egentligen är pseudorandom)
#https://www.sharpsightlabs.com/blog/numpy-random-seed/
np.random.seed(34)
# + [markdown] _uuid="2ba6eda07c92b9f8a5dfaaf035359243bc977ded"
# # Original Distributions
#
# Data som det kan se ut i original, alltså när det samlats in, innan någon pre-processing har genomförts.
#
# För att ha data att använda i övningarna skapar nedanstående kod ett antal randomiserade spridningar av data
# + _uuid="50cda9065e05259061321479ab04ed469ce26de4"
#skapa kolumner med olika fördelningar
df = pd.DataFrame({
'beta': np.random.beta(5, 1, 1000) * 60, # beta
'exponential': np.random.exponential(10, 1000), # exponential
'normal_p': np.random.normal(10, 2, 1000), # normal platykurtic
'normal_l': np.random.normal(10, 10, 1000), # normal leptokurtic
})
# make bimodal distribution
first_half = np.random.normal(20, 3, 500)
second_half = np.random.normal(-20, 3, 500)
bimodal = np.concatenate([first_half, second_half])
df['bimodal'] = bimodal
# create list of column names to use later
col_names = list(df.columns)
# + [markdown] _uuid="1acea8ada274cab28babd79959f92431acd90fb9"
# ## Uppgift 1:
#
# a. Plotta de kurvor som skapats i ovanstående cell i en och samma koordinatsystem med hjälp av [seaborn biblioteket](https://seaborn.pydata.org/api.html#distribution-api).
#
# >Se till att det är tydligt vilken kurva som representerar vilken distribution.
# >
# >Koden för själva koordinatsystemet är given, fortsätt koda i samma cell
# >
# >HINT! alla fem är distribution plots
# + _uuid="06c4730029c407202b169c31e92fb247d824de56"
# plot original distribution plot
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('Original Distributions')
#De fem kurvorna
sns.kdeplot(df['beta'], ax=ax1)
sns.kdeplot(df['exponential'], ax=ax1)
sns.kdeplot(df['normal_p'], ax=ax1)
sns.kdeplot(df['normal_l'], ax=ax1)
sns.kdeplot(df['bimodal'], ax=ax1);
# -
# b. Visa de fem första raderna i den dataframe som innehåller alla distributioner.
# + _uuid="ed5da4af29ae63b90b0dc0fcfc62ad857a0be11b"
df.head()
# + [markdown] _uuid="8002769d4872942a779e8ad219fa8dafaa111d9d"
# c. För samtliga fem attribut, beräkna:
#
# * medel
# * median
#
# Vad för bra metod kan användas för att få ett antal statistiska mått på en dataframe? Hämta denna information med denna metod.
# -
df.describe()
# d. I pandas kan du plotta din dataframe på några olika sätt. Gör en plot för att ta reda på hur skalan på de olika attibuten ser ut, befinner sig alla fem i ungefär samma skala?
#
df.plot()
# + [markdown] _uuid="50a2f43eeabf0557ddca3c1691d3bdacc21cf98a"
# * Samtliga värden ligger inom liknande intervall
# + [markdown] _uuid="2b8cc2f3a9578552eff85d01f09bbf95701bba38"
# e. Vad händer om följande kolumn med randomiserade värden läggs till?
# + _uuid="bb6355d0905daf893f2854f5ae86773f74b0c25c"
new_column = np.random.normal(1000000, 10000, (1000,1))
df['new_column'] = new_column
col_names.append('new_column')
df['new_column'].plot(kind='kde')
# + _uuid="43f8cce6dbb0fcb30c7d54b7a01852fa3d6af178"
# plot våra originalvärden tillsammans med det nya värdet
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('Original Distributions')
sns.kdeplot(df['beta'], ax=ax1)
sns.kdeplot(df['exponential'], ax=ax1)
sns.kdeplot(df['normal_p'], ax=ax1)
sns.kdeplot(df['normal_l'], ax=ax1)
sns.kdeplot(df['bimodal'], ax=ax1);
sns.kdeplot(df['new_column'], ax=ax1);
# -
# Hur gick det?
# + [markdown] _uuid="2e795d7547fd02aec36fc682cc651b507dd677d1"
# Testar några olika sätt att skala dataframes..
# + [markdown] _uuid="98aa148dba37013510ec222678140461deab99b5"
# ### MinMaxScaler
#
# MinMaxScaler subtraherar varje värde i en kolumn med medelvärdet av den kolumnen och dividerar sedan med antalet värden.
# + _uuid="8d53fbc635fdbe9c31c5a02368fdacfea44074ee"
mm_scaler = preprocessing.MinMaxScaler()
df_mm = mm_scaler.fit_transform(df)
df_mm = pd.DataFrame(df_mm, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After MinMaxScaler')
sns.kdeplot(df_mm['beta'], ax=ax1)
sns.kdeplot(df_mm['exponential'], ax=ax1)
sns.kdeplot(df_mm['normal_p'], ax=ax1)
sns.kdeplot(df_mm['normal_l'], ax=ax1)
sns.kdeplot(df_mm['bimodal'], ax=ax1)
sns.kdeplot(df_mm['new_column'], ax=ax1);
# + [markdown] _uuid="5ab1940ed342ab438bd8e437a09a482e56e17f39"
# Vad har hänt med värdena?
# + _uuid="df57c21b0769fbde43c7597aa9ca5cbf81bcaf71"
df_mm['beta'].min()
# + _uuid="0814a537a7be26fe065aa5f2ae9efa38d9741bf2"
df_mm['beta'].max()
# + [markdown] _uuid="11ef371c86a091c06701184504d0da556baae434"
# Vi jämför med min och maxvärde för varje kolumn innan vi normaliserade vår dataframe
# + _uuid="c8a2e55bad1eb2911e2a2fae29bc2ba9e4f0066a"
mins = [df[col].min() for col in df.columns]
mins
# + _uuid="3080138c4cbf61f372184d52406d5388defde718"
maxs = [df[col].max() for col in df.columns]
maxs
# + [markdown] _uuid="67d547c34122a3f2659a7320bd98881e0b6b9edd"
# Let's check the minimums and maximums for each column after MinMaxScaler.
# + _uuid="a67a1650ea6c4d83d35f389570b566a8da9630b4"
mins = [df_mm[col].min() for col in df_mm.columns]
mins
# + _uuid="8a560f67852f8843f4b5215c2cee5f6be7ed9f0f"
maxs = [df_mm[col].max() for col in df_mm.columns]
maxs
# + [markdown] _uuid="31bb92f2bbccd3a4cec450bbb26d96cc83e08489"
# Vad har hänt?
# + [markdown] _uuid="e89e7885204f606c89e5d4557876bba7991c5fc7"
# ### RobustScaler
#
# RobustScaler subtraherar med medianen för kolumnen och dividerar med kvartilavståndet (skillnaden mellan största 25% och minsta 25%)
# + _uuid="f2b82a1ca80ec44d0e25d55ef2e7e5ff440c87ec"
r_scaler = preprocessing.RobustScaler()
df_r = r_scaler.fit_transform(df)
df_r = pd.DataFrame(df_r, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After RobustScaler')
sns.kdeplot(df_r['beta'], ax=ax1)
sns.kdeplot(df_r['exponential'], ax=ax1)
sns.kdeplot(df_r['normal_p'], ax=ax1)
sns.kdeplot(df_r['normal_l'], ax=ax1)
sns.kdeplot(df_r['bimodal'], ax=ax1)
sns.kdeplot(df_r['new_column'], ax=ax1);
# + [markdown] _uuid="dcc077aec1581f20498dc05d1a22e5be78c92535"
# Vi kollar igen min och max efteråt (OBS; jämför med originalet högst upp innan vi startar olika skalningsmetoder).
# + _uuid="8d4030ddd4b584432da924289723c26e697eb5cd"
mins = [df_r[col].min() for col in df_r.columns]
mins
# + _uuid="523cc16efeacd12e3c637f6def1664796e7daf99"
maxs = [df_r[col].max() for col in df_r.columns]
maxs
# + [markdown] _uuid="4dbf7394dfea6178f104f7dcc2f4ccccc4f0ee0a"
# Vad har hänt?
# + [markdown] _uuid="3b5146b80a9ae1c1d098543c9c11cfad608b0ba9"
# ### StandardScaler
#
# StandardScaler skalar varje kolumn till att ha 0 som medelvärde och standardavvikelsen 1
# + _uuid="08609ebc1ed00191c609393550c3ff669f61de67"
s_scaler = preprocessing.StandardScaler()
df_s = s_scaler.fit_transform(df)
df_s = pd.DataFrame(df_s, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After StandardScaler')
sns.kdeplot(df_s['beta'], ax=ax1)
sns.kdeplot(df_s['exponential'], ax=ax1)
sns.kdeplot(df_s['normal_p'], ax=ax1)
sns.kdeplot(df_s['normal_l'], ax=ax1)
sns.kdeplot(df_s['bimodal'], ax=ax1)
sns.kdeplot(df_s['new_column'], ax=ax1);
# + [markdown] _uuid="044277ab27467f23402f566828cc893d262a061c"
# Vi kontrollerar min och max efter skalningen återigen
# + _uuid="297211c50cf6edd0bf4f455e31c632855ea7755e"
mins = [df_s[col].min() for col in df_s.columns]
mins
# + _uuid="3b16a6d312335e82671f2f763d97e1c83473717c"
maxs = [df_s[col].max() for col in df_s.columns]
maxs
# + [markdown] _uuid="d3c4868647d4505f7bf735f36dae13d40be7d904"
# Vad har hänt? I jämförelse med de två innan?
# + [markdown] _uuid="41d47dbcd963627eab1323d986478ddc5a0c93a4"
# # Normalizer
#
# Normaliser transformerar rader istället för kolumner genom att (default) beräkna den Euclidiska normen som är roten ur summan av roten ur samtliga värden. Kallas för l2.
# + _uuid="7acbee8b8b745a8c347c055540f1f4f4bef588f7"
n_scaler = preprocessing.Normalizer()
df_n = n_scaler.fit_transform(df)
df_n = pd.DataFrame(df_n, columns=col_names)
fig, (ax1) = plt.subplots(ncols=1, figsize=(10, 8))
ax1.set_title('After Normalizer')
sns.kdeplot(df_n['beta'], ax=ax1)
sns.kdeplot(df_n['exponential'], ax=ax1)
sns.kdeplot(df_n['normal_p'], ax=ax1)
sns.kdeplot(df_n['normal_l'], ax=ax1)
sns.kdeplot(df_n['bimodal'], ax=ax1)
sns.kdeplot(df_n['new_column'], ax=ax1);
# + [markdown] _uuid="7ba477a3a86798f7ce9f0df1cbb218a7fe227173"
# Min och max efter skalning
# + _uuid="ffde297a55fd6f5f12d275f77dc6bd1a9f6693a3"
mins = [df_n[col].min() for col in df_n.columns]
mins
# + _uuid="ca22359fca795384a8c169832ac151e7ab6cf46a"
maxs = [df_n[col].max() for col in df_n.columns]
maxs
# + [markdown] _uuid="900bd9d48b818fe447d220897fd785439701b09d"
# Vad har hänt?
# + [markdown] _uuid="501a3d0671b293f8bcd0937aa60df767e0f2c74a"
# Nu tar vi en titt på alla olika sätt att skala tillsammans, dock skippar vi normalizern då det är väldigt ovanligt att man vill skala om rader.
# + [markdown] _uuid="798ae56438449253b271a4c9301976fdc65da5e6"
# ### Kombinerad plot
# + _uuid="f02ff366d3cb22eff31266ce0a09ce7801c8384f"
#Själva figuren
fig, (ax0, ax1, ax2, ax3) = plt.subplots(ncols=4, figsize=(20, 8))
ax0.set_title('Original Distributions')
sns.kdeplot(df['beta'], ax=ax0)
sns.kdeplot(df['exponential'], ax=ax0)
sns.kdeplot(df['normal_p'], ax=ax0)
sns.kdeplot(df['normal_l'], ax=ax0)
sns.kdeplot(df['bimodal'], ax=ax0)
sns.kdeplot(df['new_column'], ax=ax0);
ax1.set_title('After MinMaxScaler')
sns.kdeplot(df_mm['beta'], ax=ax1)
sns.kdeplot(df_mm['exponential'], ax=ax1)
sns.kdeplot(df_mm['normal_p'], ax=ax1)
sns.kdeplot(df_mm['normal_l'], ax=ax1)
sns.kdeplot(df_mm['bimodal'], ax=ax1)
sns.kdeplot(df_mm['new_column'], ax=ax1);
ax2.set_title('After RobustScaler')
sns.kdeplot(df_r['beta'], ax=ax2)
sns.kdeplot(df_r['exponential'], ax=ax2)
sns.kdeplot(df_r['normal_p'], ax=ax2)
sns.kdeplot(df_r['normal_l'], ax=ax2)
sns.kdeplot(df_r['bimodal'], ax=ax2)
sns.kdeplot(df_r['new_column'], ax=ax2);
ax3.set_title('After StandardScaler')
sns.kdeplot(df_s['beta'], ax=ax3)
sns.kdeplot(df_s['exponential'], ax=ax3)
sns.kdeplot(df_s['normal_p'], ax=ax3)
sns.kdeplot(df_s['normal_l'], ax=ax3)
sns.kdeplot(df_s['bimodal'], ax=ax3)
sns.kdeplot(df_s['new_column'], ax=ax3);
# + [markdown] _uuid="8bddab8169d46325087fb8ebb7735147e53ab3b2"
# Efter samtliga transformationer är värdena på en mer lika skala. MinMax hade varit att föredra här eftersom den ger minst förskjutning av värdena i förhållande till varandra. Det är samma avstånd som i originalet, de andra två skalningsmetoderna ändrar avstånden mellan värdena vilket kommer påverka modellens korrekthet.
# -
| Scale, Standardize, or Normalize with scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spark on Kubernetes
# Preparing the notebook https://towardsdatascience.com/make-kubeflow-into-your-own-data-science-workspace-cc8162969e29
# ## Setup service account permissions
# https://github.com/kubeflow/kubeflow/issues/4306 issue with launching spark-operator from jupyter notebook
# Run command in your shell (not in notebook)
#
# ```shell
# export NAMESPACE=<your_namespace>
# kubectl create serviceaccount spark -n ${NAMESPACE}
# kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=${NAMESPACE}:spark --namespace=${NAMESPACE}
# ```
# ## Python version
#
# > Note: Make sure your driver python and executor python version matches.
# > Otherwise, you will see error msg like below
#
# Exception: Python in worker has different version 3.7 than that in driver 3.6, PySpark cannot run with different minor versions.Please check environment variables `PYSPARK_PYTHON` and `PYSPARK_DRIVER_PYTHON` are correctly set.
import sys
print(sys.version)
# ## Client Mode
# +
import findspark, pyspark,socket
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
findspark.init()
localIpAddress = socket.gethostbyname(socket.gethostname())
conf = SparkConf().setAppName('sparktest1')
conf.setMaster('k8s://https://kubernetes.default.svc:443')
conf.set("spark.submit.deployMode", "client")
conf.set("spark.executor.instances", "2")
conf.set("spark.driver.host", localIpAddress)
conf.set("spark.driver.port", "7778")
conf.set("spark.kubernetes.namespace", "yahavb")
conf.set("spark.kubernetes.container.image", "seedjeffwan/spark-py:v2.4.6")
conf.set("spark.kubernetes.pyspark.pythonVersion", "3")
conf.set("spark.kubernetes.authenticate.driver.serviceAccountName", "spark")
conf.set("spark.kubernetes.executor.annotation.sidecar.istio.io/inject", "false")
# +
sc = pyspark.context.SparkContext.getOrCreate(conf=conf)
# following works as well
# spark = SparkSession.builder.config(conf=conf).getOrCreate()
# +
num_samples = 100000
def inside(p):
x, y = random.random(), random.random()
return x*x + y*y < 1
count = sc.parallelize(range(0, num_samples)).filter(inside).count()
# -
sc.stop()
# ## Cluster Mode
# ## Java
# + language="bash"
#
# /opt/spark-2.4.6/bin/spark-submit --master "k8s://https://kubernetes.default.svc:443" \
# --deploy-mode cluster \
# --name spark-java-pi \
# --class org.apache.spark.examples.SparkPi \
# --conf spark.executor.instances=30 \
# --conf spark.kubernetes.namespace=yahavb \
# --conf spark.kubernetes.driver.annotation.sidecar.istio.io/inject=false \
# --conf spark.kubernetes.executor.annotation.sidecar.istio.io/inject=false \
# --conf spark.kubernetes.container.image=seedjeffwan/spark:v2.4.6 \
# --conf spark.kubernetes.driver.pod.name=spark-java-pi-driver \
# --conf spark.kubernetes.executor.request.cores=4 \
# --conf spark.kubernetes.node.selector.computetype=gpu \
# --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
# local:///opt/spark/examples/jars/spark-examples_2.11-2.4.6.jar 262144
# + language="bash"
# kubectl -n yahavb delete po ` kubectl -n yahavb get po | grep spark-java-pi-driver | awk '{print $1}'`
# -
# ## Python
# + language="bash"
#
# /opt/spark-2.4.6/bin/spark-submit --master "k8s://https://kubernetes.default.svc:443" \
# --deploy-mode cluster \
# --name spark-python-pi \
# --conf spark.executor.instances=50 \
# --conf spark.kubernetes.container.image=seedjeffwan/spark-py:v2.4.6 \
# --conf spark.kubernetes.driver.pod.name=spark-python-pi-driver \
# --conf spark.kubernetes.namespace=yahavb \
# --conf spark.kubernetes.driver.annotation.sidecar.istio.io/inject=false \
# --conf spark.kubernetes.executor.annotation.sidecar.istio.io/inject=false \
# --conf spark.kubernetes.pyspark.pythonVersion=3 \
# --conf spark.kubernetes.executor.request.cores=4 \
# --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark /opt/spark/examples/src/main/python/pi.py 64000
# + language="bash"
# kubectl -n yahavb delete po `kubectl -n yahavb get po | grep spark-python-pi-driver | awk '{print $1}'`
# -
| notebooks/spark-on-eks-cluster-mode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 3. Coordinate Reference Systems (CRS) & Map Projections
#
# Building off of what we learned in the previous notebook, we'll get to understand an integral aspect of geospatial data: Coordinate Reference Systems.
#
# - 3.1 California County Shapefile
# - 3.2 USA State Shapefile
# - 3.3 Plot the Two Together
# - 3.4 Coordinate Reference System (CRS)
# - 3.5 Getting the CRS
# - 3.6 Setting the CRS
# - 3.7 Transforming or Reprojecting the CRS
# - 3.8 Plotting States and Counties Togther
# - 3.9 Recap
# - **Exercise**: CRS Management
#
# <br>
# <font color='grey'>
# <b>Instructor Notes</b>
#
# - Datasets used
# - ‘notebook_data/california_counties/CaliforniaCounties.shp’
# - ‘notebook_data/us_states/us_states.shp’
# - ‘notebook_data/census/Places/cb_2018_06_place_500k.zip’
#
# - Expected time to complete
# - Lecture + Questions: 45 minutes
# - Exercises: 10 minutes
# </font>
# ### Import Libraries
# +
import pandas as pd
import geopandas as gpd
import matplotlib # base python plotting library
import matplotlib.pyplot as plt # submodule of matplotlib
# To display plots, maps, charts etc in the notebook
# %matplotlib inline
# -
# ## 3.1 California County shapefile
# Let's go ahead and bring back in our California County shapefile. As before, we can read the file in using `gpd.read_file` and plot it straight away.
counties = gpd.read_file('notebook_data/california_counties/CaliforniaCounties.shp')
counties.plot(color='darkgreen')
# Even if we have an awesome map like this, sometimes we want to have more geographical context, or we just want additional information. We're going to try **overlaying** our counties GeoDataFrame on our USA states shapefile.
# ## 3.2 USA State shapefile
#
# We're going to bring in our states geodataframe, and let's do the usual operations to start exploring our data.
# Read in states shapefile
states = gpd.read_file('notebook_data/us_states/us_states.shp')
# Look at the first few rows
states.head()
# Count how many rows and columns we have
states.shape
# Plot our states data
states.plot()
# You might have noticed that our plot extends beyond the 50 states (which we also saw when we executed the `shape` method). Let's double check what states we have included in our data.
states['STATE'].values
# Beyond the 50 states we seem to have American Samoa, Puerto Rico, Guam, Commonwealth of the Northern Mariana Islands, and United States Virgin Islands included in this geodataframe. To make our map cleaner, let's limit the states to the contiguous states (so we'll also exclude Alaska and Hawaii).
# Define list of non-contiguous states
non_contiguous_us = [ 'American Samoa','Puerto Rico','Guam',
'Commonwealth of the Northern Mariana Islands',
'United States Virgin Islands', 'Alaska','Hawaii']
# Limit data according to above list
states_limited = states.loc[~states['STATE'].isin(non_contiguous_us)]
# Plot it
states_limited.plot()
# To prepare for our mapping overlay, let's make our states a nice, light grey color.
states_limited.plot(color='lightgrey', figsize=(10,10))
# ## 3.3 Plot the two together
#
# Now that we have both geodataframes in our environment, we can plot both in the same figure.
#
# **NOTE**: To do this, note that we're getting a Matplotlib Axes object (`ax`), then explicitly adding each our layers to it
# by providing the `ax=ax` argument to the `plot` method.
fig, ax = plt.subplots(figsize=(10,10))
counties.plot(color='darkgreen',ax=ax)
states_limited.plot(color='lightgrey', ax=ax)
# Oh no, what happened here?
#
# <img src="http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png" width="20" align=left > **Question** Without looking ahead, what do you think happened?
#
#
# + active=""
# Your response here:
#
#
#
#
#
#
# -
# <br>
# <br>
# If you look at the numbers we have on the x and y axes in our two plots, you'll see that the county data has much larger numbers than our states data. It's represented in some different type of unit other than decimal degrees!
#
# In fact, that means if we zoom in really close into our plot we'll probably see the states data plotted.
# %matplotlib inline
fig, ax = plt.subplots(figsize=(10,10))
counties.plot(color='darkgreen',ax=ax)
states_limited.plot(color='lightgrey', ax=ax)
ax.set_xlim(-140,-50)
ax.set_ylim(20,50)
# This is a key issue that you'll have to resolve time and time again when working with geospatial data!
#
# It all revolves around **coordinate reference systems** and **projections**.
# ----------------------------
#
# ## 3.4 Coordinate Reference Systems (CRS)
# <img src="http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png" width="20" align=left > **Question** Do you have experience with Coordinate Reference Systems?
# + active=""
# Your response here:
#
#
#
#
#
#
# -
# <br><br>As a refresher, a CRS describes how the coordinates in a geospatial dataset relate to locations on the surface of the earth.
#
# A `geographic CRS` consists of:
# - a 3D model of the shape of the earth (a **datum**), approximated as a sphere or spheroid (aka ellipsoid)
# - the **units** of the coordinate system (e.g, decimal degrees, meters, feet) and
# - the **origin** (i.e. the 0,0 location), specified as the meeting of the **equator** and the **prime meridian**(
#
# A `projected CRS` consists of
# - a geographic CRS
# - a **map projection** and related parameters used to transform the geographic coordinates to `2D` space.
# - a map projection is a mathematical model used to transform coordinate data
#
# ### A Geographic vs Projected CRS
# <img src ="https://www.e-education.psu.edu/natureofgeoinfo/sites/www.e-education.psu.edu.natureofgeoinfo/files/image/projection.gif" height="100" width="500">
# #### There are many, many CRSs
#
# Theoretically the number of CRSs is unlimited!
#
# Why? Primariy, because there are many different definitions of the shape of the earth, multiplied by many different ways to cast its surface into 2 dimensions. Our understanding of the earth's shape and our ability to measure it has changed greatly over time.
#
# #### Why are CRSs Important?
#
# - You need to know the data about your data (or `metadata`) to use it appropriately.
#
#
# - All projected CRSs introduce distortion in shape, area, and/or distance. So understanding what CRS best maintains the characteristics you need for your area of interest and your analysis is important.
#
#
# - Some analysis methods expect geospatial data to be in a projected CRS
# - For example, `geopandas` expects a geodataframe to be in a projected CRS for area or distance based analyses.
#
#
# - Some Python libraries, but not all, implement dynamic reprojection from the input CRS to the required CRS and assume a specific CRS (WGS84) when a CRS is not explicitly defined.
#
#
# - Most Python spatial libraries, including Geopandas, require geospatial data to be in the same CRS if they are being analysed together.
#
# #### What you need to know when working with CRSs
#
# - What CRSs used in your study area and their main characteristics
# - How to identify, or `get`, the CRS of a geodataframe
# - How to `set` the CRS of geodataframe (i.e. define the projection)
# - Hot to `transform` the CRS of a geodataframe (i.e. reproject the data)
# ### Codes for CRSs commonly used with CA data
#
# CRSs are typically referenced by an [EPSG code](http://wiki.gis.com/wiki/index.php/European_Petroleum_Survey_Group).
#
# It's important to know the commonly used CRSs and their EPSG codes for your geographic area of interest.
#
# For example, below is a list of commonly used CRSs for California geospatial data along with their EPSG codes.
#
# ##### Geographic CRSs
# -`4326: WGS84` (units decimal degrees) - the most commonly used geographic CRS
#
# -`4269: NAD83` (units decimal degrees) - the geographic CRS customized to best fit the USA. This is used by all Census geographic data.
#
# > `NAD83 (epsg:4269)` are approximately the same as `WGS84(epsg:4326)` although locations can differ by up to 1 meter in the continental USA and elsewhere up to 3m. That is not a big issue with census tract data as these data are only accurate within +/-7meters.
# ##### Projected CRSs
#
# -`5070: CONUS NAD83` (units meters) projected CRS for mapping the entire contiguous USA (CONUS)
#
# -`3857: Web Mercator` (units meters) conformal (shape preserving) CRS used as the default in web mapping
#
# -`3310: CA Albers Equal Area, NAD83` (units meters) projected CRS for CA statewide mapping and spatial analysis
#
# -`26910: UTM Zone 10N, NAD83` (units meters) projected CRS for northern CA mapping & analysis
#
# -`26911: UTM Zone 11N, NAD83` (units meters) projected CRS for Southern CA mapping & analysis
#
# -`102641 to 102646: CA State Plane zones 1-6, NAD83` (units feet) projected CRS used for local analysis.
#
# You can find the full CRS details on the website https://www.spatialreference.org
# ## 3.5 Getting the CRS
#
# ### Getting the CRS of a gdf
#
# GeoPandas GeoDataFrames have a `crs` attribute that returns the CRS of the data.
counties.crs
states_limited.crs
# As we can clearly see from those two printouts (even if we don't understand all the content!),
# the CRSs of our two datasets are different! **This explains why we couldn't overlay them correctly!**
# -----------------------------------------
# The above CRS definition specifies
# - the name of the CRS (`WGS84`),
# - the axis units (`degree`)
# - the shape (`datum`),
# - and the origin (`Prime Meridian`, and the equator)
# - and the area for which it is best suited (`World`)
#
# > Notes:
# > - `geocentric` latitude and longitude assume a spherical (round) model of the shape of the earth
# > - `geodetic` latitude and longitude assume a spheriodal (ellipsoidal) model, which is closer to the true shape.
# > - `geodesy` is the study of the shape of the earth.
# **NOTE**: If you print a `crs` call, Python will just display the EPSG code used to initiate the CRS object. Depending on your versions of Geopandas and its dependencies, this may or may not look different from what we just saw above.
print(states_limited.crs)
# ## 3.6 Setting the CRS
#
# You can also set the CRS of a gdf using the `crs` attribute. You would set the CRS if is not defined or if you think it is incorrectly defined.
#
# > In desktop GIS terminology setting the CRS is called **defining the CRS**
#
# As an example, let's set the CRS of our data to `None`
# first set the CRS to None
states_limited.crs = None
# Check it again
states_limited.crs
# ...hummm...
#
# If a variable has a null value (None) then displaying it without printing it won't display anything!
# Check it again
print(states_limited.crs)
# Now we'll set it back to its correct CRS.
# Set it to 4326
states_limited.crs = "epsg:4326"
# Show it
states_limited.crs
# **NOTE**: You can set the CRS to anything you like, but **that doesn't make it correct**! This is because setting the CRS does not change the coordinate data; it just tells the software how to interpret it.
# ## 3.7 Transforming or Reprojecting the CRS
# You can transform the CRS of a geodataframe with the `to_crs` method.
#
#
# > In desktop GIS terminology transforming the CRS is called **projecting the data** (or **reprojecting the data**)
#
# When you do this you want to save the output to a new GeoDataFrame.
states_limited_utm10 = states_limited.to_crs( "epsg:26910")
# Now take a look at the CRS.
states_limited_utm10.crs
# You can see the result immediately by plotting the data.
# +
# plot geographic gdf
states_limited.plot();
plt.axis('square');
# plot utm gdf
states_limited_utm10.plot();
plt.axis('square')
# +
# Your thoughts here
# -
# <div style="display:inline-block;vertical-align:top;">
# <img src="http://www.pngall.com/wp-content/uploads/2016/03/Light-Bulb-Free-PNG-Image.png" width="30" align=left >
# </div>
# <div style="display:inline-block;">
#
# #### Questions
# </div>
#
# 1. What two key differences do you see between the two plots above?
# 1. Do either of these plotted USA maps look good?
# 1. Try looking at the common CRS EPSG codes above and see if any of them look better for the whole country than what we have now. Then try transforming the states data to the CRS that you think would be best and plotting it. (Use the code cell two cells below.)
# + active=""
# Your responses here:
#
#
#
#
#
#
# +
# YOUR CODE HERE
# -
# **Double-click to see solution!**
#
# <!--
# #SOLUTION
# states_limited_conus = states_limited.to_crs("epsg:5070")
# states_limited_conus.plot();
# plt.axis('square')
# -->
# ## 3.8 Plotting states and counties together
#
# Now that we know what a CRS is and how we can set them, let's convert our counties GeoDataFrame to match up with out states' crs.
# Convert counties data to NAD83
counties_utm10 = counties.to_crs("epsg:26910")
counties_utm10.plot()
# Plot it together!
fig, ax = plt.subplots(figsize=(10,10))
states_limited_utm10.plot(color='lightgrey', ax=ax)
counties_utm10.plot(color='darkgreen',ax=ax)
# Since we know that the best CRS to plot the contiguous US from the above question is 5070, let's also transform and plot everything in that CRS.
counties_conus = counties.to_crs("epsg:5070")
fig, ax = plt.subplots(figsize=(10,10))
states_limited_conus.plot(color='lightgrey', ax=ax)
counties_conus.plot(color='darkgreen',ax=ax)
# ## 3.9 Recap
#
# In this lesson we learned about...
# - Coordinate Reference Systems
# - Getting the CRS of a geodataframe
# - `crs`
# - Transforming/repojecting CRS
# - `to_crs`
# - Overlaying maps
# ## Exercise: CRS Management
#
# Now it's time to take a crack and managing the CRS of a new dataset. In the code cell below, write code to:
#
# 1. Bring in the CA places data (`notebook_data/census/Places/cb_2018_06_place_500k.zip`)
# 2. Check if the CRS is EPSG code 26910. If not, transform the CRS
# 3. Plot the California counties and places together.
#
# To see the solution, double-click the Markdown cell below.
# +
# YOUR CODE HERE
# -
# ## Double-click to see solution!
#
# <!--
#
# # SOLUTION
#
# # 1. Bring in the CA places data
# california_places = gpd.read_file('zip://notebook_data/census/Places/cb_2018_06_place_500k.zip')
# california_places.head()
#
# # 2. Check and transorm the CRS if needed
# california_places.crs
# california_places_utm10 = california_places.to_crs( "epsg:26910")
#
# # 3. Plot the California counties and places together
# fig, ax = plt.subplots(figsize=(10,10))
# counties_utm10.plot(color='lightgrey', ax=ax)
# california_places_utm10 .plot(color='purple',ax=ax)
#
# -->
# ---
# <div style="display:inline-block;vertical-align:middle;">
# <a href="https://dlab.berkeley.edu/" target="_blank"><img src ="assets/images/dlab_logo.png" width="75" align="left">
# </a>
# </div>
#
# <div style="display:inline-block;vertical-align:middle;">
# <div style="font-size:larger"> D-Lab @ University of California - Berkeley</div>
# <div> Team Geo<div>
# </div>
#
#
| 03_CRS_Map_Projections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 9.2
# language: sage
# name: sagemath
# ---
def root(f, xs:(float, float, float), iter: int):
assert len(xs)==3, "xs must have exactly three elements"
assert type(iter) == Integer, " iter must be an integer"
P = f.parent
for _ in range(iter):
x0 = xs[0]
x_1 = xs[-2]
x_2 = xs[-1]
q = (xs[0] - xs[-2])/ (xs[-2] - xs[-1])
A = q*f(xs[0]) - q*(1-q)*f(xs[-2]) + q^2*f(xs[-1])
B = (2*q+1)*f(xs[0]) - (1+q)^2*f(xs[-2]) + q^2*f(xs[-1])
C = (1+q)*f(xs[0])
delta = sqrt(B^2 - 4*A*C)
xi = xs[0] - (xs[0]-xs[-2])*(2*C)/max([B+delta, B-delta], key= abs)
xs.pop()
xs.insert(0, xi)
return (xs[0])
#Test
P.<x> = RR[]
f = P((x+3)*(x-7))
root(f, [10, 20, 30], 10)
# This
P.<x> = RR[]
f = P(x^2-612); show(f)
root(f, [10, 20, 30], 5)
# +
# from typing import *
# from cmath import sqrt # Use the complex sqrt as we may generate complex numbers
# Num = Union[float, complex]
# Func = Callable[[Num], Num]
# def div_diff(f: Func, xs: List[Num]):
# """Calculate the divided difference f[x0, x1, ...]."""
# if len(xs) == 2:
# a, b = xs
# return (f(a) - f(b)) / (a - b)
# else:
# return (div_diff(f, xs[1:]) - div_diff(f, xs[0:-1])) / (xs[-1] - xs[0])
# def mullers_method(f: Func, xs: (Num, Num, Num), iterations: int) -> float:
# """Return the root calculated using Muller's method."""
# x0, x1, x2 = xs
# for _ in range(iterations):
# w = div_diff(f, (x2, x1)) + div_diff(f, (x2, x0)) - div_diff(f, (x2, x1))
# s_delta = sqrt(w ** 2 - 4 * f(x2) * div_diff(f, (x2, x1, x0)))
# denoms = [w + s_delta, w - s_delta]
# # Take the higher-magnitude denominator
# x3 = x2 - 2 * f(x2) / max(denoms, key=abs)
# # Advance
# x0, x1, x2 = x1, x2, x3
# return x3
# def f_example(x: Num) -> Num:
# """The example function. With a more expensive function, memoization of the last 4 points called may be useful."""
# return x ** 2 - 612
# root = mullers_method(f, (10, 20, 30), 10)
# print("Root: {}".format(root)) # Root: (24.738633317099097+0j)
# -
# Secant Method
def secant_method(f, x0, x1, iterations):
"""Return the root calculated using the secant method."""
for i in range(iterations):
x2 = x1 - f(x1) * (x1 - x0) / float(f(x1) - f(x0))
x0, x1 = x1, x2
return x2
def f_example(x):
return x ** 2 - 612
# +
root = secant_method(f, 10, 30, 5)
print("Root: {}".format(root)) # Root: 24.738633748750722
# -
| Muller's_Method.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
with open('../sentiment-network/reviews.txt', 'r') as f:
reviews = f.read()
with open('../sentiment-network/labels.txt', 'r') as f:
labels_org = f.read()
reviews[:2000]
# ## Data preprocessing
# +
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
# -
all_text[:2000]
words[:100]
# ### Encoding the words
#
# +
# # Create your dictionary that maps vocab words to integers here
# vocab = set(words)
# vocab_to_int = {w: i for i, w in enumerate(vocab, 1)}
# print(len(vocab_to_int))
# # Convert the reviews to integers, same shape as reviews list, but with integers
# reviews_ints = []
# for r in reviews:
# ri = [vocab_to_int.get(w) for w in r if vocab_to_int.get(w) is not None]
# reviews_ints.append(ri)
# reviews_ints[:10]
from collections import Counter
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
# -
print(len(reviews_ints))
# ### Encoding the labels
# Convert labels to 1s and 0s for 'positive' and 'negative'
# print(labels_org)
labels = np.array([1 if l == "positive" else 0 for l in labels_org.split()])
# print(labels)
print(len(labels))
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
# Filter out that review with 0 length
reviews_ints = [r[0:200] for r in reviews_ints if len(r) > 0]
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
# +
seq_len = 200
features = np.zeros((len(reviews_ints), seq_len), dtype=int)
# print(features[:10,:100])
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_len]
features[:10,:100]
# features = []
# for r in reviews_ints:
# features.append(np.pad(r, (0, 200 - len(r)), 'constant').tolist())
# interesting = []
# for i, r in enumerate(reviews_ints):
# if len(r) < 200:
# interesting.append(i)
# features = np.array([np.pad(r, (len(r), 200-len(r)), 'constant').tolist() for r in reviews_ints])
# +
# print(interesting)
print(len(features))
print(type(features))
print(features[41])
print(len(features[41]))
print(reviews_ints[41])
print(len(reviews_ints[41]))
# features[:100]
# -
# ## Training, Validation, Test
#
#
# With our data in nice shape, we'll split it into training, validation, and test sets.
# +
split_frac = 0.8
split_index = int(split_frac * len(features))
train_x, val_x = features[:split_index], features[split_index:]
train_y, val_y = labels[:split_index], labels[split_index:]
split_frac = 0.5
split_index = int(split_frac * len(val_x))
val_x, test_x = val_x[:split_index], val_x[split_index:]
val_y, test_y = val_y[:split_index], val_y[split_index:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
print("label set: \t\t{}".format(train_y.shape),
"\nValidation label set: \t{}".format(val_y.shape),
"\nTest label set: \t\t{}".format(test_y.shape))
# -
lstm_size = 256
lstm_layers = 2
batch_size = 1000
learning_rate = 0.01
# For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be `batch_size` vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
# +
n_words = len(vocab_to_int) + 1 # Add 1 for 0 added to vocab
# Create the graph object
tf.reset_default_graph()
with tf.name_scope('inputs'):
inputs_ = tf.placeholder(tf.int32, [None, None], name="inputs")
labels_ = tf.placeholder(tf.int32, [None, None], name="labels")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# +
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with tf.name_scope("Embeddings"):
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
# -
# ### LSTM cell
# +
def lstm_cell():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size, reuse=tf.get_variable_scope().reuse)
# Add dropout to the cell
return tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
with tf.name_scope("RNN_layers"):
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(lstm_layers)])
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
# -
# ### RNN forward pass
with tf.name_scope("RNN_forward"):
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
# ### Output
# +
with tf.name_scope('predictions'):
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
tf.summary.histogram('predictions', predictions)
with tf.name_scope('cost'):
cost = tf.losses.mean_squared_error(labels_, predictions)
tf.summary.scalar('cost', cost)
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
merged = tf.summary.merge_all()
# -
# ### Validation accuracy
#
# Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
with tf.name_scope('validation'):
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# ### Batching
#
# This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the `x` and `y` arrays and returns slices out of those arrays with size `[batch_size]`.
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
# ## Training
#
# Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the `checkpoints` directory exists.
# +
epochs = 10
# with graph.as_default():
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('./logs/tb/train', sess.graph)
test_writer = tf.summary.FileWriter('./logs/tb/test', sess.graph)
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
summary, loss, state, _ = sess.run([merged, cost, final_state, optimizer], feed_dict=feed)
# loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
train_writer.add_summary(summary, iteration)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
# batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
summary, batch_acc, val_state = sess.run([merged, accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
test_writer.add_summary(summary, iteration)
saver.save(sess, "checkpoints/sentiment_manish.ckpt")
saver.save(sess, "checkpoints/sentiment_manish.ckpt")
# -
# ## Testing
test_acc = []
with tf.Session() as sess:
saver.restore(sess, "checkpoints/sentiment_manish.ckpt")
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
| tensorflow/LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: speech
# language: python
# name: speech
# ---
# + [markdown] id="_otKXY13Z1US"
# # Get Started with pulling the code
# + id="xyneRi2qZ1Ud"
import os
from getpass import getpass
import urllib
user = input('User name: ')
password = getpass('Password: ')
password = urllib.parse.quote(password) # your password is converted into url format
repo_name = "UNAST.git"
cmd_string = 'git clone https://{0}:{1}@github.com/{0}/{2}'.format(user, password, repo_name)
# !{cmd_string}
# + id="m5ZvPma2auUn"
# %cd UNAST
# !git checkout model-implementation/lucas
# -
# # Add parent directory to path if needed
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
# + [markdown] id="YmtIHkAJZ1Ue"
# ## Lets Make some dummy data
# + id="4lCLanS9bz09"
import torch
from module import RNNEncoder, RNNDecoder
from network import Discriminator
# + id="-nJPK6R6Z1Uf"
# [batch_size x seq_len x hidden_dim] expected into the network
hidden = 512
latent = 64
out = 100
network_in_shape = (128, 40, 512)
dummy = torch.randn(network_in_shape)
# + [markdown] id="oUj5SLPKZ1Ug"
# ## Let's Make a dummy network
# + id="<KEY>"
encoder = RNNEncoder(hidden, hidden, latent, num_layers=5, bidirectional=False)
decoder = RNNDecoder(latent, hidden, hidden, out, num_layers=5, attention=True)
discriminator = Discriminator(hidden)
# + [markdown] id="UrK13mLbZ1Ug"
# ## Now, run the network and lets see how we do!
# + id="itYpyerZcxYr"
output, (latent_hidden, latent_cell) = encoder(dummy)
print(output.shape)
print(latent_hidden.shape)
print(latent_cell.shape)
print(latent_hidden.shape)
input = latent_hidden.permute(1, 0, 2)
#mask = torch.zeros(dummy.shape[0:2])
#print("MASK shape", mask.shape)
#output_probs, hidden = decoder(input[:, -1:, :], (latent_hidden, latent_cell), output, mask)
#print("\nDecoder output shapes: ")
#print(output_probs.shape)
discriminator_out = discriminator(latent_hidden[-1])
print("\nDiscriminator output shape:")
print(discriminator_out.shape)
# -
# ## Testing Smoothed CE loss
fake_output = torch.zeros_like(discriminator_out[0])
fake_output[:,] = torch.tensor([1,])
fake_output.shape
empty_target = torch.zeros_like(fake_output)
empty_target[:,] = torch.tensor([1,0])
empty_target.shape
empty_target
import torch.nn.functional as F
import torch.nn as nn
def cross_entropy(input, target, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
logsoftmax = nn.LogSoftmax(1)
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
cross_entropy(discriminator_out[0], empty_target)
cross_entropy(torch.Tensor([[1,0], [2,0]]), torch.Tensor([[1,0], [1,0]]))
F.cross_entropy(torch.FloatTensor([[1,0], [2,0]]), torch.LongTensor([0,0]))
| src/notebooks/testing_network.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Various things can go wrong when you import your data into Pandas. Some of these are immediately obvious; others only appear later, in confusing forms.
#
# This page covers one common problem when loading data into Pandas --- text encoding.
# ## Pandas and encoding
import numpy as np
import pandas as pd
pd.set_option('mode.chained_assignment','raise')
# Consider the following annoying situation. You can download the data file from [imdblet_latin.csv]({{ site.baseurl }}/data/imdblet_latin.csv).
# + tags=["raises-exception"]
films = pd.read_csv('imdblet_latin.csv')
# -
# The next sections are about why this happens, and therefore, how to fix it.
# ## Text encoding
#
# When the computer stores text in memory, or on disk, it must represent the
# characters in the text with numbers, because numbers are the computer's basic
# units of storage.
#
# The traditional unit of memory size, or disk size, is the
# [byte](https://en.wikipedia.org/wiki/Byte). Nowadays, the term byte means a
# single number that can take any value between 0 through 255. Specifically, a
# byte is a binary number with 8 binary digits, so it can store $2^8 = 256$
# different values --- 0 through 255.
#
# We can think of everything that the computer stores, in memory or on disk, as
# bytes --- units of information in memory, represented as numbers.
#
# This is also true for text. For example, here is a short piece of text:
# A short piece of text
name = 'Pandas'
# Somewhere in the computer's memory, Python has recorded "Pandas" as a series of
# bytes, in a format that it understands.
#
# When the computer writes this information into a file, it has to decide how to
# convert its own version of the text "Pandas" into bytes that other programs
# will understand. That is, it needs to convert its own format into a standard
# sequence of numbers (bytes) that other programs will recognize as the text
# "Pandas".
#
# This process of converting from Python's own format to a standard sequence of
# bytes, is called *encoding*. Whenever Python --- or any other program ---
# writes text to a file, it has to decide how to *encode* that text as a sequence
# of bytes.
#
# There are various standard ways of encoding text as numbers. One very common
# encoding is called [8-bit Unicode Transformation
# Format](https://en.wikipedia.org/wiki/UTF-8) or "UTF-8" for short. Almost all
# web page files use this format. Your web browser knows how to translate the
# numbers in this format into text to show on screen.
#
# We can see that process in memory, in Python, like this.
# Convert the text in "name" into bytes.
name_as_utf8_bytes = name.encode('utf-8')
# Show the bytes as numbers
list(name_as_utf8_bytes)
# In the UTF-8 coding scheme, the number 80 stands for the character 'P', 97
# stands for 'a', and so on. Notice that for these standard English alphabet
# characters, UTF-8 stores each character with a single byte (80 for 'P' , 97 for
# 'a' etc).
#
# We can go the opposite direction, and *decode* the sequence of numbers (bytes)
# into a piece of text, like this:
# Convert the sequence of numbers (bytes) into text again.
name_again = name_as_utf8_bytes.decode('utf-8')
name_again
# UTF-8 is a particularly useful encoding, because it defines standard sequences
# of bytes that represent an enormous range of characters, including, for
# example, Mandarin and Cantonese Chinese characters.
# Hello in Mandarin.
mandarin_hello = "你好"
hello_as_bytes = mandarin_hello.encode('utf-8')
list(hello_as_bytes)
# Notice that, this time, UTF-8 used three bytes to represent each of the two
# Mandarin characters.
#
# Another common, but less useful encoding is called [Latin
# 1](https://en.wikipedia.org/wiki/ISO/IEC_8859-1) or ISO-8859-1. This encoding
# only defines ways to represent text characters in the standard [Latin
# alphabet](https://en.wikipedia.org/wiki/Latin_script). This is the standard
# English alphabet plus a range of other characters from other European
# languages, including characters with accents.
#
# For English words using the standard English alphabet, Latin 1 uses the same
# set of character-to-byte mappings as UTF-8 does -- 80 for 'P' and so on:
name_as_latin1_bytes = name.encode('latin1')
list(name_as_latin1_bytes)
# The differences show up when the encodings generate bytes for characters
# outside the standard English alphabet. Here's the surname of [Fernando
# Pérez](https://en.wikipedia.org/wiki/Fernando_P%C3%A9rez_(software_developer))
# one of the founders of the Jupyter project you are using here:
jupyter_person = 'Pérez'
# Here are the bytes that UTF-8 needs to store that name:
fp_as_utf8 = jupyter_person.encode('utf-8')
list(fp_as_utf8)
# Notice that UTF-8 still uses 80 for 'P'. The next two bytes --- 195 and 169
# --- represent the é in Fernando's name.
#
# In contrast, Latin 1 uses a single byte --- 233 -- to store the é:
fp_as_latin1 = jupyter_person.encode('latin1')
list(fp_as_latin1)
# Latin 1 has no idea what to do about Mandarin:
# + tags=["raises-exception"]
mandarin_hello.encode('latin1')
# -
# Now consider what will happen if the computer writes (encodes) some text in
# Latin 1 format, and then tries to read it (decode) assuming it is in UTF-8
# format:
# + tags=["raises-exception"]
fp_as_latin1.decode('utf-8')
# -
# It's a mess - because UTF-8 doesn't know how to interpret the bytes that Latin
# 1 wrote --- this sequence of bytes doesn't make sense in the UTF-8 encoding.
#
# Something similar happens when you write bytes (encode) text with UTF-8 and
# then read (decode) assuming the bytes are for Latin 1:
fp_as_utf8.decode('latin1')
# This time there is no error, because the bytes from UTF-8 do mean something to
# Latin 1 --- but the text is wrong, because those bytes mean something
# *different* in Latin 1 than they do for UTF-8.
# ## Fixing encoding errors in Pandas
#
# With this background, you may have guessed that the problem that we had at the
# top of this page was because someone has written a file where the text is in a
# different *encoding* than the one that Pandas assumed.
#
# In fact, Pandas assumes that text is in UTF-8 format, because it is so common.
#
# In this case, as the filename suggests, the bytes for the text are in Latin
# 1 encoding. We can tell Pandas about this with the `encoding=` option:
films = pd.read_csv('imdblet_latin.csv', encoding='latin1')
films.head()
| notebooks/07/text_encoding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 约瑟夫斯问题
#
# * Slug: josephus-problem
#
# * Date: 2018-07-11
#
# * Category: 面试
#
# * Tags: 算法
#
# * Author: timking
#
# * Summary: 面试中遇到的题目
# **题目如下:**
#
# n个人(编号1~n)围成一圈从编号为1的开始报数,从1报数到m,报到m的人出来,下一个人继续重新从1开始报数,编程求最后一个留下的人的编号
#
# 如n=3,m=4
#
# 第一次出队:1
#
# 第二次出队:3
#
# 最后留下:2
# ## 解答
#
# 该题的解答思路在于,完成第一步 $m\%n$ 之后,下一个需要重新开始报号。
#
# 这里有几种方法:
# ### 解答一
#
# 第一种利用数组完成,在第一步的人出队之后(设其下标为 `index`),将其后面的人 `[index+1:]` 切片移到前面的人 `[:index]` 之前,重新构造为第二步的数组。
#
# 代码如下:
def x(n, m):
a = [i for i in range(1, n+1)]
while len(a) > 1:
index = (m - 1) % len(a)
a = a[index+1:] + a[:index]
return a[0]
# ### 解答二
#
#
# 第二种同样利用数组,可以通过移动下标实现。
#
# 设第一次 `index` 为 0,表示从第一个开始算起,第一步出队的人的位置为 $(m-1)\%n$, 记为 `A1`, `A1` 出队之后 `A1` 后面的人都往前移一位。
#
#
# 第二步出队的人位置为 $(A1+m-1)\%(n-1)$ , 表示从上一步出队的人的位置开始算起。
# 由此可以得出,每一步出队的都是上一步的index计算得出, 得 $(index+m-1)\%len(list)$
#
# 代码如下:
def x(n, m):
l_number = list(range(1, n + 1))
index = 0
while len(l_number) > 1:
index = (index -1 + m) % len(l_number)
l_number.pop(index)
return l_number[0]
# ### 解答三
#
# 第三种,则在第二种的基础上,更进一步,通过数学代换直接得到每一步应该出队的人。
#
# 假设从0排列开始,第一次排列为:
#
# `0, 1, 2, 3, 4 ... n-3, n-2, n-1` (共n项)
#
# 此时出队的人的编号为 $(m-1)\%n$ 。
#
# 设第二次应该从 k 开始算起,则有式子 $k=m\%n$, 同时第二次的排列为:
#
# `k, k+1, k+2, K+3, ... k-3, k-2` (共n-1项)
#
# 此时设 k = 0, 则该排列可以简单代换为 `n-1` 项的求出队问题:
#
# `0, 1, 2, 3, ..., n-3, n-2` (共n-1项)
#
# 同样的,往下代换。 即,对于`n`个人报数的问题,可以分解为先求解`(n–1)`个人报数的子问题;而对于`(n–1)`个人报数的子问题,又可分解为先求`((N–1)–1)`人个报数的子问题。
#
# 设当人数只有1个人的时候,不管怎么数编号都为 0:
#
# 则有 $f(1) = 0$。
#
# 当人数有2个人的时候,则有上一步的人的编号加 m,即:
#
# $f(2) = (f(1) + m)\%n$
#
# 以此类推,则有公式:
#
# $f(1) = 0 \\
# f(x) = (f(x-1) + m)\%n \qquad (x>1)$
#
# 最后我们实际编号是从1开始,则将结果加一。
#
# 代码如下:
def x(n, m):
i = 2
s = 0
while i <= n:
s = (s + m) % i
i+=1
return s+1
| content/interview/josephus-problem/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
#
# ## P7: Résolvez des problèmes en utilisant des algorithmes en Python #0
#
#
# Algorithme glouton
#
# ### 1. Préparation des données
#
#
# -
# modules importés
# lecture de fichiers csv et dict colonnes
import csv as csv
# nettoyage des caractères via expression regex
import re as re
# mesure du temps passé -> time spend over complexity
import time
# mesure de l'occupation mémoire space complexity
from sys import argv
# constants
FILE = "data/p7-20-shares.csv"
FIELDNAMES = ['name', 'cost', 'profit']
STEP = 100
BUDGET = 500 * STEP
# check if file name was passed as parm to the script
if __name__ == '__main__':
if len(argv) == 2:
FILE = sys.argv[1]
# +
def fn_timer(function):
""" starts before & stops after the run of the function, a time counter"""
# @wraps(function)
def function_timer(*args, **kwargs):
t0 = time.perf_counter_ns()
result = function(*args, **kwargs)
t1 = time.perf_counter_ns()
elapsed = (t1-t0)/1000000000
print(f"Total time running {function.__name__}: {str(elapsed)}s seconds")
return result
return function_timer
# -
# strips a string from its weird caracters
def clean_char(texte: str) -> str:
""" on ne conserve que les caractères lisibles
les lettres, chiffres, ponctuations décimales et signes
les valeurs negatives sont acceptées, du point de vue profit.
"""
texte_propre = re.sub(r"[^a-zA-Z0-9\-\.\,\+]", "", texte.replace(',','.'))
return texte_propre
""" lecture, nettoyage et chargement en dict.
les non valeurs NaN sont rejetées.
"""
action_dict = {}
file_name = FILE
try:
with open(file_name, "r", newline='', encoding='utf-8') as file:
csv_reader = csv.DictReader(file, fieldnames=FIELDNAMES,
delimiter=',', doublequote=False)
# skip the header
next(csv_reader)
compteur_ligne = 0
for idx, line in enumerate(csv_reader):
clean_data = True
if line[FIELDNAMES[0]] != "":
cle = clean_char(line[FIELDNAMES[0]])
else:
print(f" line {idx} had missing share name; dropped.")
clean_data = False
if line[FIELDNAMES[1]] != "":
cout = int(STEP * float(clean_char(line[FIELDNAMES[1]])))
if cout < 0 :
print(f" line {idx} had neg cost data; dropped.")
cout = 0
clean_data = False
if cout == 0 :
print(f" line {idx} had null cost data ; could have been a gift but management decision: dropped.")
cout = 0
clean_data = False
else:
print(f" line {idx} had missing cost data; dropped.")
clean_data = False
if line[FIELDNAMES[2]] != "":
gain_percent = int(STEP * float(clean_char(line[FIELDNAMES[2]])))
else:
print(f" line {idx} had missing profit percentage; dropped.")
clean_data = False
if gain_percent <= 0:
# TODO: check if to keep or not in any case comment ; as negativ can't be optimum
print(f"** line {idx} had negative profit percentage ; accepted but pls check. **")
print(' ',idx,line)
if clean_data:
action_dict[cle] = (cout, cout*gain_percent/STEP)
compteur_ligne += 1
else:
print(' ',idx,line)
print("nombre d'actions retenues: ", compteur_ligne)
except FileNotFoundError:
print(f" fichier non trouvé, Merci de vérifier son nom dans le répertoire data {file_name} : {FileNotFoundError}")
except IOError:
print(f" une erreur est survenue à l'écriture du fichier {file_name} : {IOError}")
#action_dict
# ## 2. Algorithme Glouton
#
# [Algorithme glouton — Wikipédia](https://fr.wikipedia.org/wiki/Algorithme_glouton)
#
# Un **algorithme glouton** (_greedy algorithm_ en anglais, parfois appelé aussi algorithme gourmand, ou goulu) est un [algorithme](https://fr.wikipedia.org/wiki/Algorithmique "Algorithmique") qui suit le principe de faire, étape par étape, un choix optimum local, dans l'espoir d'obtenir un résultat optimum global. Par exemple, dans le problème du rendu de monnaie (donner une somme avec le moins possible de pièces), l'algorithme consistant à répéter le choix de la pièce de plus grande valeur qui ne dépasse pas la somme restante est un algorithme glouton.
# Dans le système de pièces (1, 3, 4), l'algorithme glouton n'est pas optimal, comme le montre l'exemple simple suivant. Il donne pour 6 : 4+1+1, alors que 3+3 est optimal.
# Mais optimal par rapport à quelle contrainte? à priori le nombre pièce utilisé. Si c'était avoir moins de pièce dans son portemonnaie, la 1ère solution était "optimale".
# + tags=[]
# -
@fn_timer
def algo_glouton(dictio: dict[str,tuple],budget: int) -> (int, list):
""" Tri par densité de profit càd que les actions au meilleur rapport profit/cout
sont les premières dans l'ordre
"""
action_trie = dict(sorted(dictio.items(), key=lambda x:x[1][1]/x[1][0], reverse=True))
budget_left = budget
action_selected: list[str] = []
for cle,valeur in action_trie.items():
if budget_left - valeur[0] > 0:
action_selected.append(cle)
budget_left -= valeur[0]
return (budget_left,action_selected)
# V2 : executons la fonction sur les données précédentes
reste_budget, action_pf = algo_glouton(action_dict,BUDGET)
print('At cost of ',(BUDGET-reste_budget)/STEP, ' ', action_pf, ' actions brought a profit of ',round(sum(list(map(lambda x: action_dict[x][1],action_pf)))/(STEP*STEP),2))
| P7_00_greedy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [Automatic variational ABC](https://arxiv.org/abs/1606.08549) for exoplanets?
# %matplotlib inline
# %config IPython.matplotlib.backend = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 300
rcParams["figure.dpi"] = 300
# +
from autograd import grad
import autograd.numpy as np
from autograd.scipy.misc import logsumexp
from autograd.optimizers import adam
import matplotlib.pyplot as plt
# +
def completeness(x, y):
snr = y**2 * np.sqrt(2000.0 / x) * 1e3
return 1.0 / (1 + np.exp(-0.3 * (snr - 10.0)))
def power_law(u, n, mn, mx):
np1 = n+1.0
if np.allclose(np1, 0.0):
return mn * np.exp(u * (np.log(mx) - np.log(mn)))
x0n = mn ** np1
return ((mx**np1 - x0n) * u + x0n) ** (1.0 / np1)
# -
N_tot = 1000
K = np.random.poisson(0.5 * N_tot)
XY_true = np.vstack((
10**np.random.uniform(0, 2, K),
power_law(np.random.rand(K), -1.5, 0.01, 0.1),
)).T
Q = completeness(XY_true[:, 0], XY_true[:, 1])
XY_obs = np.array(XY_true[Q > np.random.rand(K)])
class Simulator(object):
def __init__(self, XY, N_tot, x_range, y_range,
mu_lg, sig_lg, mu_nx, sig_nx, mu_ny, sig_ny):
self.N_tot = N_tot
self.x_range = x_range
self.y_range = y_range
self.stats_obs = self.stats(XY[:, 0], XY[:, 1])
# Prior
self.priors = [(mu_lg, sig_lg), (mu_nx, sig_nx), (mu_ny, sig_ny)]
def kld(self, phi):
kld = 0.0
for (mu1, lsig1), (mu2, sig2) in zip((phi[:2], phi[2:4], phi[4:6]), self.priors):
sig1 = np.exp(lsig1)
kld += np.log(sig2/sig1)+0.5*((sig1**2+(mu1-mu2)**2)/sig2**2-1.0)
return kld
def stats(self, x, y):
x, y = np.log(x), np.log(y)
return np.array([10*np.log(len(x)), np.mean(x), np.log(np.var(x)), np.mean(y), np.log(np.var(y))])
def _f(self, theta, u_exist, u_det, u_x, u_y):
q = u_exist < np.exp(theta[0])
x = power_law(u_x[q], theta[1], *(self.x_range))
y = power_law(u_y[q], theta[2], *(self.y_range))
q_det = completeness(x, y) > u_det[q]
return x[q_det], y[q_det]
def _g(self, phi, nu):
return np.array([
phi[0] + np.exp(phi[1]) * nu[0],
phi[2] + np.exp(phi[3]) * nu[1],
phi[4] + np.exp(phi[5]) * nu[2],
])
def simulate(self, theta):
return self._f(theta, *(np.random.rand(4, self.N_tot)))
def sample(self, phi):
theta = self._g(phi, np.random.randn(3))
return self.simulate(theta)
def _log_p_eps(self, phi, nu, u, eps=0.1):
theta = self._g(phi, nu)
sim = self._f(theta, *u)
if len(sim[0]) < 2:
return -np.inf
stats = self.stats(*sim)
return -0.5 * np.sum((stats - self.stats_obs)**2 / eps**2 + np.log(2*np.pi*eps**2))
def elbo_abc(self, phi, iteration, S=10, L=8):
print(iteration)
nu = np.random.randn(S, 3)
u = np.random.rand(L, 4, self.N_tot)
lp = 0.0
for s in range(S):
lp += logsumexp(np.array([self._log_p_eps(phi, nu[s], u[l]) for l in range(L)]))
lp /= S
elbo = lp - np.log(L) - self.kld(phi)
print(elbo)
return -elbo
sim = Simulator(XY_obs, N_tot, (1.0, 100.0), (0.01, 0.1),
-1.0, 5.0, 0.0, 3.0, 0.0, 3.0)
objective_grad = grad(sim.elbo_abc)
init_params = np.array([np.log(0.5), np.log(0.001), -1.0, np.log(1.0), -1.5, np.log(1.0)])
batch_size = 256
num_epochs = 5
step_size = 0.01
optimized_params = adam(objective_grad, init_params, step_size=step_size,
num_iters=500) #, callback=print_perf)
optimized_params[::2], np.exp(optimized_params[1::2])
init_params
x, y = sim.sample(optimized_params)
sim.stats(x, y)
sim.stats_obs
plt.plot(XY_obs[:, 0], XY_obs[:, 1], ".")
plt.plot(x, y, ".")
plt.xscale("log")
plt.yscale("log")
_, bins, _ = plt.hist(XY_obs[:, 0], histtype="step")
plt.hist(x, bins, histtype="step");
_, bins, _ = plt.hist(XY_obs[:, 1], histtype="step")
plt.hist(y, bins, histtype="step");
| variational.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python3
# ---
# End to End Project to understand Datascience with simple dataset
# -----------------------------------------------------------------
# +
# import all necessary libraries
import pandas as pd
# for numeric
import numpy as np
# data visualization
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import style
# -
# Get the data
full_titanic_df = pd.read_csv('full.csv')
full_titanic_df
# +
# To do the machine learning /predictions we will split the data into two data frames
# training dataset
# testing dataset
training_data = full_titanic_df.sample(frac=0.8, random_state=25)
testing_data = full_titanic_df.drop(training_data.index)
# -
training_data
testing_data
# Data Exploration/Analysis
# --------------------------
training_data.info()
# to understand the columns and colums with no/empty values
training_data.describe()
training_data.head(15)
training_data.columns.values
# * What are the factors that could contribute to a high survival rate?
# * Passenger Id, Ticket, Name these definetly have no factor
# * Correlation might help us to find the relation/impact that a variable might have on survival.
# * This is where we can summarize with plots
women = training_data[training_data['Sex'] == 'female']
women
men = training_data[training_data['Sex'] == 'male']
men
# * If we come up with a histogram with survived vs not survived for male and female this give us more information about data set
# +
survived = 'survived'
not_survived = 'not_survived'
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,4))
ax = sns.distplot(women[women['Survived']==1].Age.dropna(), bins=18, label= survived, ax=axes[0], kde=False)
ax = sns.distplot(women[women['Survived']==0].Age.dropna(), bins=18, label= not_survived, ax=axes[0], kde=False)
ax.legend()
ax.set_title('Female')
ax = sns.distplot(men[men['Survived']==1].Age.dropna(), bins=18, label= survived, ax=axes[1], kde=False)
ax = sns.distplot(men[men['Survived']==0].Age.dropna(), bins=18, label= not_survived, ax=axes[1], kde=False)
ax.legend()
ax.set_title('Male')
# -
FacetGrid = sns.FacetGrid(training_data, row='Embarked', size=4.5, aspect=1.6)
FacetGrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette=None, order=None, hue_order=None)
FacetGrid.add_legend()
sns.barplot(x='Pclass', y='Survived', data=training_data)
grid = sns.FacetGrid(training_data, col='Survived', row='Pclass', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
# Data Preprocessing
# -------------------
training_data = training_data.drop(['PassengerId'], axis=1)
# Dealing with missing data
# --------------------------
training_data.head(15)
# +
import re
deck = { "A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "U": 8}
data = [training_data, testing_data]
for dataset in data:
dataset['Cabin'] = dataset['Cabin'].fillna("U0")
dataset['Deck'] = dataset['Cabin'].map(lambda x: re.compile("([a-zA-Z]+)").search(x).group())
dataset['Deck'] = dataset['Deck'].map(deck)
dataset['Deck'] = dataset['Deck'].fillna(0)
dataset['Deck'] = dataset['Deck'].astype(int)
training_data = training_data.drop(['Cabin'], axis=1)
testing_data = testing_data.drop(['Cabin'], axis=1)
# -
training_data.head(15)
data = [training_data, testing_data]
for dataset in data:
mean = training_data["Age"].mean()
std = training_data["Age"].std()
is_null = dataset['Age'].isnull().sum()
rand_age = np.random.randint(mean-std, mean+std, size=is_null)
age_slice = dataset['Age'].copy()
age_slice[np.isnan(age_slice)] = rand_age
dataset["Age"] = age_slice
dataset["Age"] = training_data["Age"].astype(int)
training_data["Age"].isnull().sum()
training_data["Embarked"].describe()
# Replace with most frequent value
common_value = 'S'
data = [training_data, testing_data]
for dataset in data:
dataset['Embarked'] = dataset['Embarked'].fillna(common_value)
training_data.info()
genders = {"male": 0, "female": 1}
data = [training_data, testing_data]
for dataset in data:
dataset['Sex'] = dataset['Sex'].map(genders)
### drop => Ticket
training_data = training_data.drop(['Ticket'], axis=1)
testing_data = testing_data.drop(['Ticket'], axis=1)
# +
ports = { "S": 0, "C": 1, "Q": 2}
data = [training_data, testing_data]
for dataset in data:
dataset['Embarked'] = dataset['Embarked'].map(ports)
training_data.head(10)
# -
# # Feature Engineering
# +
data = [training_data, testing_data]
for dataset in data:
#dataset['Age'] = dataset['Age'].astype(int)
dataset.loc[dataset['Age'] <=11, 'Age'] = 0
dataset.loc[(dataset['Age'] >11) & (dataset['Age'] <=18) , 'Age'] = 1
dataset.loc[(dataset['Age'] >18) & (dataset['Age'] <=22) , 'Age'] = 2
dataset.loc[(dataset['Age'] >22) & (dataset['Age'] <=27) , 'Age'] = 3
dataset.loc[(dataset['Age'] >27) & (dataset['Age'] <=33) , 'Age'] = 4
dataset.loc[(dataset['Age'] >33) & (dataset['Age'] <=40) , 'Age'] = 5
dataset.loc[(dataset['Age'] > 40)] = 6
training_data['Age'].value_counts()
# +
training_data['Age'].value_counts()
# -
X_train = training_data.drop('Name', axis=1)
Y_train = training_data['Survived']
X_train = training_data.drop('Survived', axis=1)
# Algorithms
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
# Build a model using existing libraries
sdg = linear_model.SGDClassifier(max_iter=5, tol=None)
# sdg.fit(X_train, Y_train)
# sdg.score(X_train, Y_train)
| Mar22/EndToEnd/Dummy/endtoend.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#transpose a vector or matrix
import numpy as np
vector = np.array([1,2,3,4,5,6])
# +
#create a matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# -
#transpose matrix
matrix.T
#Create A Sparse Matrix
from scipy import sparse
# Create a matrix
matrix = np.array([[0, 0],
[0, 1],
[3, 0]])
# Create compressed sparse row (CSR) matrix
matrix_sparse = sparse.csr_matrix(matrix)
matrix_sparse
#Selecting Elements In An Array
# Create row vector
vector = np.array([1, 2, 3, 4, 5, 6])
# Select second element
vector[1]
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# Select second row, second column
matrix[1,1]
# +
#Create Tensor
# Create matrix
tensor = np.array([
[[[1, 1], [1, 1]], [[2, 2], [2, 2]]],
[[[3, 3], [3, 3]], [[4, 4], [4, 4]]]
])
# -
# Select second element of each of the three dimensions
tensor[1,1,1]
#create matrix
matrix = np.array([[1 , 4],
[2 , 5]])
# +
#Reshape an array
#create a 4*3 matrix
matrix = np.array([[4 , 2 , 6],
[5 , 2 , 7],
[5 , 3 , 8],
[3 , 6 , 2]])
# -
# Reshape matrix into 2x6 matrix
matrix.reshape(2, 6)
# +
#Converting A Dictionary Into A Matrix
# Load library
from sklearn.feature_extraction import DictVectorizer
# -
# Our dictionary of data
data_dict = [{'Red': 2, 'Blue': 4},
{'Red': 4, 'Blue': 3},
{'Red': 1, 'Yellow': 2},
{'Red': 2, 'Yellow': 2}]
# +
# Create DictVectorizer object
dictvectorizer = DictVectorizer(sparse=False)
# Convert dictionary into feature matrix
features = dictvectorizer.fit_transform(data_dict)
# View feature matrix
features
# -
# View feature matrix column names
dictvectorizer.get_feature_names()
# +
#Invert A Matrix
# Create matrix
matrix = np.array([[1, 4],
[2, 5]])
# -
# Calculate inverse of matrix
np.linalg.inv(matrix)
# +
#Calculate The Trace Of A Matrix
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# -
# Calculate the tracre of the matrix
matrix.diagonal().sum()
# +
#Getting The Diagonal Of A Matrix
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# -
# Return diagonal elements
matrix.diagonal()
# Calculate the tracre of the matrix
matrix.diagonal().sum()
# +
#Calculate The Determinant Of A Matrix
# Return determinant of matrix
np.linalg.det(matrix)
# +
#Flatten A Matrix
# Flatten matrix
matrix.flatten()
# +
#Calculate The Average, Variance, And Standard Deviation
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# -
# Return mean
np.mean(matrix)
# Return variance
np.var(matrix)
# Return standard deviation
np.std(matrix)
# +
#Find Rank Of Matrix
# Return matrix rank
np.linalg.matrix_rank(matrix)
# +
#Calculate Dot Product Of Two Vectors
# -
# Create two vectors
vector_a = np.array([1,2,3])
vector_b = np.array([4,5,6])
# Calculate dot product
np.dot(vector_a, vector_b)
# Calculate dot product (ATTERNATE METHOD)
vector_a @ vector_b
# +
#Find The Maximum And Minimum
# -
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# Return maximum element
np.max(matrix)
# Return minimum element
np.min(matrix)
# Find the maximum element in each column
np.max(matrix, axis=0)
# Find the maximum element in each row
np.max(matrix, axis=1)
# +
#Describe An Array
# -
# Create matrix
matrix = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
# View number of rows and columns
matrix.shape
# View number of elements (rows * columns)
matrix.size
# View number of dimensions
matrix.ndim
# +
#Apply Operations To Elements
# +
#Create Vectorized Function
# +
# Create a function that adds 100 to something
add_100 = lambda i: i + 100
# Create a vectorized function
vectorized_add_100 = np.vectorize(add_100)
# -
# Apply function to all elements in matrix
vectorized_add_100(matrix)
# +
#Create A Vector
# Create a vector as a row
vector_row = np.array([1, 2, 3])
# -
# Create a vector as a column
vector_column = np.array([[1],
[2],
[3]])
# +
#Adding And Subtracting Matrices
# +
# Create matrix
matrix_a = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
# Create matrix
matrix_b = np.array([[1, 3, 1],
[1, 3, 1],
[1, 3, 8]])
# -
# Add two matrices
np.add(matrix_a, matrix_b)
# Subtract two matrices
np.subtract(matrix_a, matrix_b)
| Vectors, Matrices, And Arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:21cmfast]
# language: python
# name: conda-env-21cmfast-py
# ---
# # Running and Plotting LightCones
# This tutorial follows on from the [coeval cube tutorial](coeval_cubes), and provides an introduction to creating lightcones with ``21cmFAST``. If you are new to ``21cmFAST`` you should go through the coeval cube tutorial first.
# There are two ways of creating lightcones in ``21cmFAST``: manual and automatic. The manual way involves evolving a coeval simulation through redshift and saving slices of it into a lightcone array. The advantage of this method is that one can precisely choose the redshift nodes to simulate and decide on interpolation methods. However, in this tutorial, we will focus on the single function that is included to do this for you: ``run_lightcone``.
#
# The function takes a few different arguments, most of which will be familiar to you if you've gone through the coeval tutorial. All simulation parameters can be passed (i.e. ``user_params``, ``cosmo_params``, ``flag_options`` and ``astro_params``). As an alternative to the first two, an ``InitialConditions`` and/or ``PerturbField`` box can be passed.
#
# Furthermore, the evolution can be managed with the ``zprime_step_factor`` and ``z_heat_max`` arguments.
#
# Finally, the final *minimum* redshift of the lightcone is set by the ``redshift`` argument, and the maximum redshift of the lightcone is defined by the ``max_redshift`` argument (note that this is not the maximum redshift evaluated, which is controlled by ``z_heat_max``, merely the maximum saved into the returned lightcone).
#
# You can specify which 3D quantities are interpolated as lightcones, and which should be saved as global parameters.
#
# Let's see what it does. We won't use the spin temperature, just to get a simple toy model:
# +
import py21cmfast as p21c
from py21cmfast import plotting
import os
print(f"21cmFAST version is {p21c.__version__}")
# -
lightcone = p21c.run_lightcone(
redshift = 7.0,
max_redshift = 12.0,
user_params = {"HII_DIM":150, "BOX_LEN": 600},
lightcone_quantities=("brightness_temp", 'density'),
global_quantities=("brightness_temp", 'density', 'xH_box'),
direc='_cache'
)
plotting.lightcone_sliceplot(lightcone);
plotting.lightcone_sliceplot(lightcone, "density")
# Simple!
# You can also save lightcones:
filename = lightcone.save(direc='_cache')
print(os.path.basename(filename))
| docs/tutorials/lightcones.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Last homework: designing a microscope using machine learning
#
# For the final homework assignment, we will design a microscope using machine learning! In order to do this, we will combine elements of HW3 (CNNs) and HW4 (microscope simulator) into a single end-to-end architecture. In particular, we will convert the MNIST digits into phase/amplitude objects and process them through a microscope simulator as we did in HW4, add noise, and then feed the output into a CNN to classify the digit. In addition to optimizing the CNN parameters, we will also be simultaneously optimizing the input illumination incident on the sample as well as the aperture plane.
#
# Below, we will walk you through the steps of implementing this joint architecture, leaving some portions blank for you to implement. We will first instruct you to use specific values that we have tested and are known to give reasonable results. Later on, you will revisit the code and explore different hyperparameter settings.
# ## 1. import stuff and load MNIST
# As always, we split the dataset into training and testing. This code was copied from the jupyter notebook from TA session 5 and slightly modified.
# +
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# load MNIST dataset:
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
# verify that the shapes are correct:
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# cast as a float32:
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# -
# ## 2. convert the MNIST images into microscope samples
# Convert the MNIST images into phase-only objects. To do this, normalize the MNIST digits to be between 0 and 1, and make the object 1 wavelength thick (we may come back later to adjust the sample thickness).
# +
wavelength = .5
def convert_MNIST(X):
X = X / X.max()
X_phase = X
optical_thickness = 1 * wavelength
X = np.exp(1j * X_phase*optical_thickness/wavelength)
return X
X_train = convert_MNIST(X_train)
X_test = convert_MNIST(X_test)
# -
# ## 3. create input pipeline for generating training/testing batches
# For your convenience, this was also copied from TA session 5 (and slightly modified). You don't need to do anything here but run this block:
# +
# this can be either X_train/y_train or X_test/y_test, so we make a placeholder that we can feed into:
X_train_or_test = tf.placeholder(tf.complex64, [None, 28, 28], name='input_image')
y_train_or_test = tf.placeholder(tf.int32, [None], name='image_label')
batch_size = 32
# create a tf dataset, from which we can generate batches
dataset = tf.data.Dataset.from_tensor_slices((X_train_or_test, y_train_or_test))
dataset = dataset.batch(batch_size).repeat(None)
batch_generator = dataset.make_initializable_iterator()
X_batch, y_batch = batch_generator.get_next() # batches symbolically generated
# -
# ## 4. create complex-valued trainable illumination
# The input field will be a 28x28 complex-valued field that interacts with the sample. In practice, this input field might be obtained by using a spatial light modulator (SLM), an optical element that can be programmed to display an arbitrary phase and/or amplitude pattern pixel by pixel. We will use a phase-only SLM, so that the variable to optimize is a 28x28 array of phases (from 0 to 2pi).
# +
# use this flag to allow/disallow training of the input illumination; tf.Variable has an argument called "trainable":
train_illumination = False
# create the variable corresponding to the input illumination phase; initialize to a constant phase:
# (remember this is a weight variable that you will optimize!
input_illumination_phase = tf.Variable(np.random.rand(28, 28), trainable=train_illumination, dtype='float32')
# using that input phase, create the input field:
input_illumination = tf.exp(1j*2*np.pi/wavelength*tf.to_complex64(input_illumination_phase))
# -
# ## 5. generate the emerging field from the sample
# Given a stack of input images, X_batch (generated above), create a tensorflow array representing the emerging field from the sample. This is the same as what you did in HW4, except in HW4 you had a tilted plane wave.
# be sure to match the shapes/dimensions to enable broadcasting:
emerging_field = tf.convert_to_tensor(input_illumination*X_batch)
# ## 6. propagate the emerging field to the aperture plane
# As in HW4, propagate the field emerging from the sample to the aperture plane (also known as the Fourier plane). Remember to use tensorflow operations!
aperture_plane = tf.signal.fft2d(emerging_field)
# ## 7. create complex-valued trainable aperture function
# In HW4, we modeled a circular aperture in the aperture plane of our microscope. Please do the same here. For now, in whatever coordinate system you have established, please try to ensure that the radius extends across 8 pixels of the 28 pixels that will define the k-space matrix for this MNIST dataset along one dimension. We may come back later to adjust this radius.
#
# In addition, for extra flexibility, let's add an SLM in the aperture plane. Assume the SLM is a phase-only SLM (only values from 0 to 2pi are allowed).
# +
# use this flag to allow/disallow training of the aperture plane; pass this into the tf.Variable definition:
train_aperture = False
# the aperture function consists of two parts: 1) the circular aperture as in HW4, and 2) a trainable 28x28 phase array
# create a circular aperture as you did in HW4:
# NOTES: shift your aperature because it is not in tensorflow and shift back when after training
radius = 16
fx = np.linspace(-14, 14, 28)#?
fy = np.linspace(-14, 14, 28)#?
[fxx, fyy] = np.meshgrid(fx, fy)
circ_aper = np.fft.fftshift((fxx**2 + fyy**2) <= radius**2)
# create the variable corresponding to the aperture phase; initialize to a constant phase:
# (remember this is a weight variable that you will optimize!)
aperture_phase = tf.Variable(np.random.rand(28, 28), trainable=train_aperture, dtype='float32')
# write the full aperture function, combining the above two components:
aperture = circ_aper*tf.exp(1j*2*np.pi/wavelength*tf.to_complex64(aperture_phase))
# filter the field that you propagated:
aperture_plane_filtered = aperture*aperture_plane
# -
# ## 8. propagate to the image plane
# Next, we propagate the field to the image plane and take the magnitude squared (since we can only measure intensity). Add some Gaussian noise, since real measurements are noisy. Use tf.random_normal and for now use a stddev of .05.
# +
# propagate the field from the aperture plane to the image plane and convert it to intensity:
image = tf.math.abs(tf.signal.ifft2d(aperture_plane_filtered))
# add noise:
image += tf.random_normal((28, 28), mean=0.0, stddev=0.05, dtype=tf.dtypes.float32,)
# -
# ## 9. process the simulated image through a CNN
# Use your favorite CNN architecture that classifies MNIST or come up with a new one. You may copy a network architecture from a previous TA or class session.
# +
net = image[..., None] # add a channels dimension
# add some convolutional layers:
net = tf.layers.conv2d(net, filters=32, kernel_size=3, padding='SAME', activation=tf.nn.relu)
net = tf.layers.conv2d(net, filters=32, kernel_size=3, padding='SAME', activation=tf.nn.relu)
net = tf.layers.max_pooling2d(net, pool_size=2, strides=2)
# add some more if you want:
net = tf.layers.conv2d(net, filters=64, kernel_size=3, padding='SAME', activation=tf.nn.relu)
net = tf.layers.conv2d(net, filters=64, kernel_size=3, padding='SAME', activation=tf.nn.relu)
net = tf.layers.max_pooling2d(net, pool_size=2, strides=2)
# fully connected layers:
net = tf.layers.flatten(net)
net = tf.layers.dense(net, units=512, activation=tf.nn.relu)
net = tf.layers.flatten(net)
net = tf.layers.dense(net, units=10)
logits = net
loss = tf.losses.softmax_cross_entropy(onehot_labels=tf.one_hot(y_batch, depth=10), logits=logits)
# boilerplate code:
train_op = tf.train.GradientDescentOptimizer(learning_rate=.01).minimize(loss)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# -
# ## 10. train!
# this code tells our batch_generator to generate training batches:
sess.run(batch_generator.initializer, feed_dict={X_train_or_test: X_train, y_train_or_test: y_train})
# Let's first look at a few simulated noisy microscope images.
def plot_examples(batch):
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.imshow(batch[i])
plt.colorbar()
plt.axis('off')
plt.show()
plot_examples(image.eval())
# Write your train loop here. Feel free to monitor loss and/or aperture/illumination phases during training. When we tested this, the optimizer had a slow start, and we had to run for several 1000 batches. Pick a value for the number of iterations and keep it fixed.
# train loop:
for i in range(5000):
_, loss_i = sess.run([train_op, loss])
if i%500 == 0:
print(loss_i)
# Let's first look at a few simulated noisy microscope images AFTER training. Do the images look more recognizable?
# plot examples
plot_examples(image.eval())
# Pass through the test set.
# +
# this code tells our batch_generator to generate test batches:
sess.run(batch_generator.initializer, feed_dict={X_train_or_test: X_test, y_train_or_test: y_test})
# pass through test set:
correct = 0
total = 0
for i in range(100):
prediction, truth = sess.run([logits, y_batch])
correct += np.sum(prediction.argmax(1)==truth)
total += len(truth)
acc = correct/total
print(acc)
# -
# Ok, now you've run a machine learning model with a physical layer! You did it! Please respond to the following questions. Note that this is a pretty open-ended analysis, and everyone may achieve different results depending on the selected parameters -- as long as you get question 1 to work, it's okay if the other questions give uninteresting results!
#
# 1. First, using the default hyperparameters based on the instructions and code we provided, please run the training under the following conditions:
# 1. Allowing optimization of aperture phase and illumination phase.
# 2. Allowing optimization of neither aperture phase nor illumination phase (this is a control experiment, because only the CNN is trained).
# 3. Allowing optimization of only the aperture phase.
# 4. Allowing optimization of only the illumination phase.
#
# For these 4 conditions, report the final test classification accuracies as well as the optimized aperture phase and/or illumination phase. Provide a brief analysis of the results (hint: if you don't see a difference between at least two of the above conditions, something probably went wrong!).
#
# 2. If you got question 1 to work, good work! Next, let's try changing some of the hyperparameters above. For each of the following questions, rerun the analysis from question 1 (under conditions of 1A and 1B). Ideally, we want to find situations where the CNN with the physical layer (the microscope simulating layers) outperforms the CNN-only network. Comment on the gap in performance between the physically-optimized and non-physically-optimized cases. Also comment on the optimized aperture and/or illumination phase.
#
# * For question 1, you initialized with a constant phase. Next, try to initialize the optimization with random-valued aperture phase and illumination phase.
# * Try changing the diameter of the aperture to two other values. For example, half and double the original diameter used above.
# part 1A Allowing optimization of aperture phase and illumination phase.
# Accuracy = 0.9771875
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# part 1B Allowing optimization of neither aperture phase nor illumination phase.
# Accuracy = 0.1125
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# part 1C Allowing optimization of only aperture phase
# Accuracy = 0.97625
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# part 1D Allowing optimization of only illumination phase
# Accuracy = 0.8059375
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# ## Conclusion:
# 1. Aperature phase is more important than illumination phase
# 2. Accuracy of changing aperature only is better than training both or training only illumination
# 2. If you got question 1 to work, good work! Next, let's try changing some of the hyperparameters above. For each of the following questions, rerun the analysis from question 1 (under conditions of 1A and 1B). Ideally, we want to find situations where the CNN with the physical layer (the microscope simulating layers) outperforms the CNN-only network. Comment on the gap in performance between the physically-optimized and non-physically-optimized cases. Also comment on the optimized aperture and/or illumination phase.
#
# * For question 1, you initialized with a constant phase. Next, try to initialize the optimization with random-valued aperture phase and illumination phase.
# * Try changing the diameter of the aperture to two other values. For example, half and double the original diameter used above.
# +
## Question 2:
# -
# Random initialization
# 1A Allowing optimization of aperture phase and illumination phase with original aperature
# Accuracy = 0.8734375
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# Random initialization
# 1B Allowing optimization of aperture phase and illumination phase.
# Accuracy = 0.80375
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# Random initialization + half aperature
# 1A Allowing optimization of aperture phase and illumination phase.
# Accuracy = 0.8153125
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# Random initialization + half aperature
# 1B Allowing optimization of aperture phase and illumination phase.
# Accuracy = 0.759375
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# Random initialization + double aperature
# 1A Allowing optimization of aperture phase and illumination phase.
# Accuracy = 0.8546875
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# Random initialization + double aperature
# 1B Allowing optimization of aperture phase and illumination phase.
# Accuracy = 0.870625
plt.imshow(sess.run(aperture_phase))
plt.title('aperature phase')
plt.show()
plt.imshow(sess.run(input_illumination_phase))
plt.title('illumination phase')
plt.show()
# ## Conclusion:
#
# 1. Randomizing the initial condition for phases has worse performance than constant phases
# 2. Doubling the aperatures works better than half the aperatures
# 3. Training both parameters works better than training CNN only
# 4. However, when the aperature is large enough, training CNN is better than training physical layers
| homework/homework_5/HW5rw174.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework #3 (option #1)
#
# ## <NAME>
# **Homework3 -- construct a visualization of the Illinois Building Inventory that communicates the following information -- choose 3 out of the 4 listed below to visualize:**
#
# - Relationship between the year acquired and the year constructed
# - Total square footage as a function of congressional district ("Congress Dist")
# - Average square footage per floor as a function of congressional district
# - Square footage for the five most common departments (aka "Agency Name") as a function of year.
#
# (Each component will be worth 10 points (5 for code and 5 for writeup of your narrative disucssing your process what things did/did not work) and must be a completely communicative visualization -- including labels and a one paragraph writeup of successes and shortcomings in your approach.)
# ### 1. Relationship between the year acquired and the year constructed
# +
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
# import the pandas package
import pandas as pd
# -
buildings = pd.read_csv("building_inventory.csv")
buildings
# make a plot (x = Year Acquired, y = Year Constructed)
buildings.plot(x = "Year Acquired", y = "Year Constructed")
plt.show()
# The above plot cannot show the relationship between Year Acquired and Year Constructed. I will change it to *Scatter* plot:
buildings.plot(x = "Year Acquired", y = "Year Constructed", kind = 'scatter')
# There are several data having '0' value. I will eliminate these 'bad data':
b = pd.read_csv("building_inventory.csv",
na_values = {'Year Acquired': 0,
'Year Constructed': 0})
b.plot(x = "Year Acquired", y = "Year Constructed", kind = 'scatter')
plt.show()
with plt.style.context("ggplot") :
b.plot(x = "Year Acquired", y = "Year Constructed", kind = 'scatter')
plt.show()
# #### To conclude
#
# There were several *bad data* containing '0' values. When drawing a plot including bad data, it was not possible to see any trend among data. However, when I eliminate bad data, I could clearly see the relationship between "Year Acquired" and "Year Constructed". Also, in this data set, the scatter plot was appropriate.
# ### 2. Total square footage as a function of congressional district ("Congress Dist")
# In order to see the total square footage by Congress Dist, I will use 'groupby' function to make sum of square footage.
agg_data = buildings.groupby("Congress Dist")["Square Footage"].sum()
agg_data
agg_data.index # Congress Dist
agg_data.values # the total square footage
agg_data.values.max()
agg_data.values[13]
fig, ax = plt.subplots(figsize = (20,3))
ax.plot(agg_data.values, '.-r', linewidth = 2)
ax.set_xlabel("Congress Dist")
ax.set_ylabel("Total Square Footage")
plt.show()
with plt.style.context("ggplot") :
fig, ax = plt.subplots(figsize = (20,3))
ax.plot(agg_data.values, '.-r', linewidth = 2)
ax.set_xlabel("Congress Dist")
ax.set_ylabel("Total Square Footage")
plt.show()
# #### To conclude
#
# In order to see the Total Square Footage, I could make the aggregated data set using groupby function. After I calcuated the Total Square Footage by Congress Dist, it was easy to make a plot. It clearly showed that the 13 Congress Dist had the biggest Total Square Footage.
# ### 3. Average square footage per floor as a function of congressional district
# In order to calulate the average square footage per floor of Congress Dist,\
# (1) Average Square Footage = (Sum of Suqare Footage) / (Total Floors) $\rightarrow$
# (2) Make a plot x = Congress Dist, y = Average Square Footage per floor
agg_bb_total = buildings.groupby("Congress Dist")["Square Footage"].sum()
agg_bb_total
total_floors = buildings.groupby("Congress Dist")["Total Floors"].sum()
total_floors
agg_bb_average_square_footage = agg_bb_total / total_floors
agg_bb_average_square_footage
fig, ax = plt.subplots(figsize = (20,3))
ax.plot(agg_bb_average_square_footage.values, '.-g', linewidth = 2)
ax.set_xlabel("Congress Dist")
ax.set_ylabel("Average Square Footage by floor")
plt.show()
with plt.style.context("ggplot") :
fig, ax = plt.subplots(figsize = (20,3))
ax.plot(agg_bb_average_square_footage.values, '.-g', linewidth = 2)
ax.set_xlabel("Congress Dist")
ax.set_ylabel("Average Square Footage by floor")
plt.show()
# #### To conclude
#
# In order to know the Average Square Footage by floor, it needed several steps to caclulate it. First, I summed the Square Footage to make the Total Square Footage by Congress Dist. And then, I summed the Total Floors to make the Total Floors by Congress Dist. Lastly, I calculated the Average Square Footage by floor as a function of Congress Dist.
| homework/homework_3/ma-haesook-homework3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:Dask-jobqueue_v2020.02.10]
# language: python
# name: conda-env-Dask-jobqueue_v2020.02.10-py
# ---
# # Dask jobqueue example for JUWELS at JSC
# covers the following aspects, i.e. how to
# * add the JUWELS specific Dask jobqueue configuration
# * get overview on available JUWELS compute node resources
# * specify batch queue and project budget name
# * open, scale and close a default jobqueue cluster
# * do an example calculation on larger than memory data
import dask, dask_jobqueue, os
import dask.distributed as dask_distributed
# ## Load jobqueue configuration defaults
additional_config = dask.config.collect(paths=['.']) # look up further Dask configurations in local directory
dask.config.update(dask.config.config, additional_config, priority='new');
dask.config.get('jobqueue.juwels-jobqueue-config')
# ## Set up jobqueue cluster ...
# !sinfo -t idle --format="%9P %.5a %.5D %.5t" # get overview on available resources per queue
jobqueue_cluster = dask_jobqueue.SLURMCluster(
config_name='juwels-jobqueue-config',
project='esmtst', # specify budget name associated with project
queue='esm', # choose queue by available resources
host=os.environ['HOSTNAME']) # globally visible local scheduler network location
print(jobqueue_cluster.job_script())
# ## ... and the client process
client = dask_distributed.Client(jobqueue_cluster)
# ## Start jobqueue workers
jobqueue_cluster.scale(jobs=1)
# !squeue -u hoeflich1
client
# ## Do calculation on larger than memory data
import dask.array as da
fake_data = da.random.uniform(0, 1, size=(365, 1e4, 1e4), chunks=(365,500,500)) # problem specific chunking
fake_data
import time
start_time = time.time()
fake_data.mean(axis=0).compute()
elapsed = time.time() - start_time
print('elapse time ',elapsed,' in seconds')
# ## Close jobqueue cluster and client process
# !squeue -u hoeflich1
jobqueue_cluster.close()
client.close()
# !squeue -u hoeflich1
# ## Conda environment
# !conda list --explicit
| juwels/01_jobqueue_cluster_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from lstm import Lstm, LstmMiniBatch
import theano.tensor as T
s = {'emb_dimension' : 50,
'n_hidden' : 100,
'n_out' : 27,
'window' : 2,
'lr' : 0.005}
possize = 100
vocsize = 9000
nclasses = 27
lstm = Lstm(ne = vocsize,
de = s['emb_dimension'],
n_lstm = s['n_hidden'],
n_out = nclasses,
cs = s['window'],
npos = possize,
lr=s['lr'],
single_output=True,
output_activation=T.nnet.softmax,
cost_function='nll')
| deep_disfluency/rnn/dev/LSTM_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.2
# language: julia
# name: julia-0.6
# ---
using JuMP
using MathProgBase.SolverInterface
using GLPKMathProgInterface
include("../solver/solver.jl")
include("../src/utils/activation.jl")
include("../src/utils/problem.jl")
include("../src/utils/util.jl")
include("../src/feasibility/reverify.jl")
small_nnet = read_nnet("networks/small_nnet.txt")
# input = Constraints(zeros(1,1), zeros(1), [0.0], [0.0])
# output = Constraints(zeros(1,1), zeros(1), [100.0], [0.0]) # check this - if one output var should be zeros(1,1)
# need to get A[I, -I]x = [upper, -lower] somehow:
A = Matrix{Float64}(2,1)
A[[1, 2]] = [1, -1]
input = HPolytope(A, [0.0, 0.0])
output = HPolytope(A, [100.0, 0.0])
problem = FeasibilityProblem(small_nnet, input, output)
optimizer = GLPKSolverMIP()
solver = ReverifySolver(1000.0, optimizer)
solve(solver, problem)
| examples/Reverify Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Innisfil Groceries Survey
import pandas as pd
df = pd.read_csv('apr17_survey.csv', encoding = 'UTF-8')
df.head()
# +
col_names = ['date', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6', 'Q7', 'Q8', 'Q9', 'Q10', 'Q11', 'email']
df.columns = col_names
# -
df.head()
df.duplicated().sum()
df.isna().sum()
df = df.dropna(subset = ['Q1'])
df.isna().sum()
# ### Question 1
df['Q1'].unique()
# +
mask_Essa = df['Q1'].str.match('.*[Ee]ssa.*')
mask_Alliston = df['Q1'].str.match('.*[Aa]lliston.*')
mask_Belle = df['Q1'].str.match('.*[Bb]elle [Ee]wart.*')
df.loc[mask_Essa, 'Q1'] = 'Essa'
df.loc[mask_Alliston, 'Q1'] = 'Alliston'
df.loc[mask_Belle, 'Q1'] = '<NAME>'
df['Q1'].value_counts()
# +
mask_other = df['Q1'] == 'Cookstown'
df.loc[~mask_other, 'Q1'] = 'Other'
df['Q1'].value_counts()
# -
# ### Question 2
df['Q2'].unique()
# +
mask_Cookstown = df['Q1'] == 'Cookstown'
df.loc[mask_Cookstown, 'Q2'].value_counts()
# -
df['Q2'] = df['Q2'].fillna('Foodland')
# +
mask_Zehrs = df['Q2'].str.match('.*[Zzehrs\']{4,5}.*')
mask_Frills = df['Q2'].str.match('.*[Nno Ffrils]{9}.*')
mask_Costco = df['Q2'].str.match('.*[Ccost]{6}.*')
mask_Foodland = df['Q2'].str.match('.*[FfoodlandL ]{8,9}.*')
mask_Walmart = df['Q2'].str.match('.*[Wwalmrt]{7,8}.*')
mask_Sobeys = df['Q2'].str.match('.*[Ssobe\'y’]{6,7}.*')
mask_Basic = df['Q2'].str.match('.*[Ffod Bbasics]{10,11}.*')
df['Zehrs'] = 0
df.loc[mask_Zehrs, 'Zehrs'] = 1
df['Frills'] = 0
df.loc[mask_Frills, 'Frills'] = 1
df['Costco'] = 0
df.loc[mask_Costco, 'Costco'] = 1
df['Foodland'] = 0
df.loc[mask_Foodland, 'Foodland'] = 1
df['Walmart'] = 0
df.loc[mask_Walmart, 'Walmart'] = 1
df['Sobeys'] = 0
df.loc[mask_Sobeys, 'Sobeys'] = 1
df['Basic'] = 0
df.loc[mask_Basic, 'Basic'] = 1
df.head()
# -
df.loc[:, 'Zehrs':].sum()
# ### Question 3
df['Q3'].unique()
# ### Question 4
df['Q4'].unique()
# ### Question 5
df['Q5'].unique()
df['Q5'].value_counts()
df['Q5'] = df['Q5'].fillna('Pick Up')
# ### Question 6
df['Q6'].unique()
# +
df['Q6'] = df['Q6'].fillna('5')
df.loc[df['Q6'].str.match('.*\$?5\$?.*'), 'Q6']
mask_0 = df['Q6'].str.match('^\s*\$?0\$?\s*$')
mask_0_2 = df['Q6'].str.match('No.*')
mask_0_3 = df['Q6'].str.match('Free')
mask_0_4 = df['Q6'].str.match('.*[Pp]ick up.*')
df.loc[mask_0 | mask_0_2 | mask_0_3 | mask_0_4, 'Q6'] = '0'
mask_5 = df['Q6'].str.match('^\s*\$?[1-5](.00)?\$?\s*$')
mask_5_2 = df['Q6'].str.match('.*\D*5(.00)?\$?\s*')
mask_5_3 = df['Q6'].str.match('Depend.*')
df.loc[mask_5 | mask_5_2 | mask_5_3, 'Q6'] = '1'
mask_10 = df['Q6'].str.match('^\s*\$?10(.00)?\$?\s*$')
mask_10_2 = df['Q6'].str.match('.*[6-9].*')
mask_10_3 = df['Q6'].str.match('\D*10(.00)?\D*')
df.loc[mask_10 | mask_10_2 | mask_10_3, 'Q6'] = '2'
mask_high = df['Q6'].str.match('^\$(20)?(30)?$')
df.loc[mask_high, 'Q6'] = '3'
mask_final = df['Q6'].str.match('.*[A-z].*')
df.loc[mask_final, 'Q6'] = '1'
df['Q6'] = df['Q6'].astype('int')
df['Q6'].value_counts()
# -
# ### Question 7
df['Q7'].unique()
df['Q7'].value_counts()
df['Q7'] = df['Q7'].fillna('Almost never')
# ### Question 8
df['Q8'].unique()
df['Q8'] = df['Q8'].fillna('Yes')
# +
mask_Yes = df['Q8'].str.match('.*[Yyes].*')
mask_Yes2 = df['Q8'].str.match('.*Bradford.*')
df.loc[(mask_Yes | mask_Yes2), 'Q8'] = 'Yes'
mask_No = df['Q8'].str.match('^[Nno]{2}.*')
mask_No2 = df['Q8'].str.match('.*\Wno\W.*')
mask_No3 = ~(mask_Yes | mask_Yes2 | mask_No | mask_No2)
df.loc[(mask_No | mask_No2 | mask_No3), 'Q8'] = 'No'
# -
df['Q8'].value_counts()
# ### Question 9
df['Q9'].unique()
# +
df['Q9'] = df['Q9'].astype('int')
df['Q9'].value_counts()
# -
# ### Question 10
df['Q10'] = df['Q10'].fillna('NR')
# ### Question 11
df['Q11'] = df['Q11'].fillna('NR')
df.to_csv('survey_clean.csv', index = False, encoding = 'UTF-8')
| survey_cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/bryanpioloEspanol/bryan-espanol/blob/main/Matrix_and_its_Operation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="pUBD9-kJIUn3" outputId="04c74043-e460-458f-cac7-bd17c758d207"
import numpy as np
#Create a 2x4 array
a = np.array([[-5,0],[4,1]])
b = np.array([[6,-3],[2,3]])
print(a)
print(b)
# + colab={"base_uri": "https://localhost:8080/"} id="SMu8SnRKIbJT" outputId="85ea3bc4-aeee-4ebe-ffc8-c6495042f33f"
#Sum of a+b
a = np.array([[-5,0],[4,1]])
b = np.array([[6,-3],[2,3]])
print(a+b)
# + colab={"base_uri": "https://localhost:8080/"} id="BrZmKIXMIgcP" outputId="ede9fb4c-c09f-4014-f9bf-143c814d32ea"
#diffirence1 = a-b
a = np.array([[-5,0],[4,1]])
b = np.array([[6,-3],[2,3]])
print(a-b)
# + colab={"base_uri": "https://localhost:8080/"} id="zjYtCo_4Ini4" outputId="1f602c6f-a5c1-449b-9fbf-966e921ac41e"
#diffirence1 = a-b
a = np.array([[-5,0],[4,1]])
b = np.array([[6,-3],[2,3]])
print(a-b)
# + id="9BJf3p6iI6M9"
| Matrix_and_its_Operation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
r=float(input("Input the radius of the circle : "))
print ("The area of the circle with radius " + str(r) + " is: " + str(np.pi * r**2))
| Area of circle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What is Python?
#
# ### Python is a general purpose programming language which can be used for developing web pages , for machine learning or developing any sort of simple applications.
#
# ## Advantages of Python:
#
# 1. Easy to read
# 2. Open Source
# 3. Large Standard Libraries
# 4. OOPS
#
# #### It was created by <NAME> in 1989.
# #### He was inspired by the creator of his favorite show "Flying Circus" , Monty Python.
# #### High Level interpreted language with easy syntax and dynamic semantics
#
#
# High Level- It means that Python derives it components from the natural language that we humans use to communicate with each other.
#
# Interpreted- Python code is compiled line by line which makes debugging errors much more easy but this comes with a cost as python is much slower than any other programming language.
#
# Easy Syntax- Makes use of indentations instead of braces to distinguish what block of code comes under which class or function.
#
# Dynamic Sematics - No need to initialize anything(automatically done by python).
# # Features of Python:
#
# 1. Simplicity: Very Simple to use and easy to understand.
# 2. Open Source: Free for anyone to use.
# 3. Portability: Write the code in python and share with anybody you want . It will behave the same way for both.
# 4. Embeddable and Extensible: Python allows the code of other languages like C/C++to be embedded into it so that certain functions can be performed making Python even more powerful.
# 5. Interpreted: Compiled line by line .The tasks of CPU and memory management are handled by Pythion itself.
# 6. Huge Library: Has a large collection of Library like Numpy ,Pandas , Matplotlib,Scikit Learn which helps in solving a lot of problems
# 7. Object Orientation: It supports OOPS.Object orientation basically breaks down complex problems of the world into code and help provide security to it to obtain better solutions.
#
# # Who uses Python?
# #### Google , Youtube- to provide better searches for users
# #### Netflix - uses machine learning to understand users choice and then recommend the movies
# # Python Basics
#
# 1. Installing Python
# 2. First Code in Python .
# 3. Extensions used in Python.
# 4. Addition
#
#
#
# # DATA TYPES IN PYTHON
# ## There are basically 6 data types in Python:
#
# 1. Numeric
# 2. Lists
# 3. Tuples
# 4. Dictionary
# 5. Sets
# 6. Strings
#
# ### 1. Numeric
#
# It is used to store numerical values in the variable
#
# eg a=3 , a is the name of the variable and 3 is the value that is assigned to it
#
# They are immutable
#
# eg a=20
#
# You cant change 0 to any other number but you can change the value of the variable.
#
# ##### Types:
#
# Integers - 2, 3, 4 ,354
#
# Float- 3.45
#
# Complex Numbers -10 +3j
a=10
b=-10
c=3.142
d=0.142
e=10+3j
f=6j
print(a+b,c-d,e-f)
print(a-c,b-f,a*b)
# +
## Type Conversion
#### It is used to convert datatypes from one form to another.
#s
s="10010"
c= int(s)
print(c)
type(c)
a=1
b=str(a)
print(b)
type(b)
s="brillica"
print(list(s))
print(tuple(s))
print(set(s))
# -
# ## 2.Lists
#
# ##### Lists are a collections of various data types .A list is a data structure in Python that is a mutable, or changeable, ordered sequence of elements. Each element or value that is inside of a list is called an item. Just as strings are defined as characters between quotes, lists are defined by having values between square brackets [ ]
#
#
# #### Index : It is basically the address in which the data is stored .
#
#
my_list=[1,2,3,"Brillica", "Python"]
print(my_list)
# ### Accessing elements of a list - USING SLICING
# +
#[Starting Index:Stopping Index:Skipping Index]
# -
print(my_list[:])
print(my_list[3])
print(my_list[0:4])
print(my_list[:-1])
print(my_list[-1])
print(my_list[0:5:2])
print(my_list[3][:3])
# ### Adding Elements in a list
# +
# APPEND
my_list.append([10,20])
print(my_list)
# +
# EXTEND
my_list.extend([10,20])
print(my_list)
# +
#INSERT
my_list.insert(2,"Rishabh")
print(my_list)
# -
#Concatenate
print(my_list+["ADD ONE MORE"])
my_list
#Multiply
print(my_list*2)
my_list
#Count
my_list.count("Brillica")
#Index
my_list.index("Brillica")
#Sort
a=[1,2,10,20,19,54,4,6]
sorted(a)
a
a.sort(reverse=True)
a
a.sort(reverse=False)
a
# ### Deleting elements from a list
# +
#Remove
a.remove(2)
# -
a
#Delete
del my_list[2]
print(my_list)
# +
#pop
my_list.pop(3)
# -
print(my_list)
# +
#clear
a.clear()
# -
print(a)
my_list2=my_list.copy()
print(my_list2)
# ## 3.Tuples
# A tuple is a sequence of immutable Python objects. Tuples are sequences, just like lists. The differences between tuples and lists are, the tuples cannot be changed unlike lists and tuples use parentheses, whereas lists use square brackets. Creating a tuple is as simple as putting different comma-separated values.
tup1=(1,2,3,["Brillica","Python"])
tup2=(4,5,6)
tup3=(7,8,9)
tup2+tup3
# +
# Slicing
tup1[2]
# +
### Accessing is same as list
tup1[3][1]="Rishabh"
tup1
# +
#Tuple with single element
c="Brillica",
print(type(c))
# -
# ## Dictionary
# Dictionary in Python is an unordered collection of data values, used to store data values like a map, which unlike other Data Types that hold only single value as an element, Dictionary holds key:value pair. ... Each key-value pair in a Dictionary is separated by a colon :, whereas each key is separated by a 'comma'
# +
#eg: Contact book of phone
# -
dict1={"a":1,"b":2,"c":3}
print(dict1)
type(dict1)
# ### Accessing the elements in a dictionary
# +
#Get
dict1.get("a")
# +
## Adding and Changing elements in a dictionary
#Changing
dict1["a"]="Rishabh"
# -
dict1
# +
#Adding
dict1["d"]=5
# -
print(dict1)
# +
#Other functions
# -
dict1.items()
dict1.keys()
dict1.values()
# ## SETS
# A Set is an unordered collection data type that is iterable, mutable and has no duplicate elements. Python's set class represents the mathematical notion of a set. ...
# This is based on a data structure known as a hash table.
#
# +
set1={1,2,4,5,6,7,7}
set2={1,3,4,5,6}
# -
print(set1)
#Add
set1.add(8)
set1
# +
#Union
set1.union(set2)
# +
#Intersection
set1.intersection(set2)
# +
#Difference
set1.difference(set2)
# +
#Symmetric difference
set1.symmetric_difference(set2)
# -
seta={1,2,3,4,5}
setb={6,7,8,9,0}
superset={1,2,3,4,5,6,7,8,9,0}
print(seta==setb)
print(seta != setb)
print(seta <= superset)
print(seta< superset)
print(seta<=setb)
print(seta<setb)
# ## Strings
#
# A string in Python is a sequence of characters. ... Strings are immutable. This means that once defined, they cannot be changed. Many Python methods, such as replace() , join() , or split() modify strings. However, they do not modify the original string.
str1="Welcome to the tutorial"
str2="This is Python "
# +
#Slicing
str1[0]
# -
str1[0:9:2]
str1[:2]
str1[::-1]
print(str1.count("W"))
print(str1.split(" "))
print(str1.partition("to"))
print(str1.upper())
print(str1.lower())
print(str1.replace("Welcome","Hello"))
# ## Operators in Python
#
#
# 1. Arithmetic operators
# 2. Assignment Operators
# 3. Comparison Operators
# 4. Logical Operators
# 5. Bitwise Operators
# 6. Identity Operators
# 7. Membership Operators
#
# ### Arithmetic Operators:
#
# 1. "+" Used to add two numbers
# 2. "-" - Used to substract two numbers
# 3. "/" - used to divide two numbers
# 4. "*" - used to Multiply two numbers
# 5. "%" - Gives the remainder
# 6. "**" - to raise the power
#
# ### Assignment Operators
#
# They are used to assign values to variables
#
# 1. "=" - Assign the value
# 2. "+=" - Add and assign the value.
# 3. "-=" - Substract and Assign
# 4. "*=" - Multiply and assign
# 5. "/=" - Divide and assign
# 6. "%=" - Find remainder and assign
# 7. "**=" - Find exponential and assign
# +
#examples
a=5
add,sub,mul,div,expo=0,0,0,1,1 #Multiple variable assignment
# -
add+=a
print(add)
sub-=a
print(sub)
mul*=a
print(mul)
div/=a
print(div)
expo**=a
print(
expo)
# ### Comparison Operators
# 1. "==" - Compare if True
# 2. "!=" - Compare if not True
# 3. "<" - Check if less than
# 4. ">" - Check if greater than
# 5. "<=" - Check if lesser or equal to
# 6. ">=" - Check if greater or equal to
# +
a=5
b=10
print(a==b)
print(a!=b)
print(a>b)
print(a<b)
print(a<=b)
print(a>=b)
# -
# ### Logical Operators:
#
# They are used to obtain logic from the operands.
#
# 1. and - True if both are true
# 2. or - True if either one of them is true
# 3. not - Gives the opposite
#
| Python Basics tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pickle
import numpy as np
from collections import Counter
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# %matplotlib inline
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# +
def rougeScores(genSummary, refSummary):
genTotal, refTotal, intersection = 0., 0., 0.
for token in list(set(list(refSummary.keys()) + list(genSummary.keys()) )):
intersection += min(refSummary[token], genSummary[token])
refTotal += refSummary[token]
genTotal += genSummary[token]
recall = intersection / refTotal if refTotal > 0. else 0.
prec = intersection / genTotal if genTotal > 0. else 0.
f1 = (2. * recall * prec) / (recall + prec) if (recall + prec) > 0. else 0.
return recall, prec, f1
class LinearRegressor(nn.Module): # inheriting from nn.Module!
# calls the init function of nn.Module. Dont get confused by syntax, always do it in an nn.Module
def __init__(self, input_size, outputsize, nunits, nlayers):
super(LinearRegressor, self).__init__()
self.nlayers = nlayers
self.inputlayer = nn.Linear(input_size, nunits)
self.hiddenlayer= nn.Linear(nunits, nunits)
self.dense1_bn = nn.BatchNorm1d(nunits)
self.outputlayer = nn.Linear(nunits, outputsize)
def forward(self, input_vec):
hiddenlayer = self.inputlayer(input_vec)
for i in range(self.nlayers):
hiddenlayer = self.hiddenlayer(hiddenlayer)
return self.outputlayer(self.dense1_bn(hiddenlayer))
class lstmRegressor(nn.Module): # inheriting from nn.Module!
# calls the init function of nn.Module. Dont get confused by syntax, always do it in an nn.Module
def __init__(self, input_size, outputsize, nunits, nlayers):
super(lstmRegressor, self).__init__()
self.nlayers = nlayers
self.inputlayer = nn.LSTM(input_size, nunits, num_layers=1)
self.hiddenlayer= nn.Linear(nunits, nunits)
self.dense1_bn = nn.BatchNorm1d(nunits)
self.outputlayer = nn.Linear(nunits, outputsize)
def forward(self, input_vec):
hiddenlayer = self.inputlayer(input_vec)
for i in range(self.nlayers):
hiddenlayer = self.hiddenlayer(hiddenlayer)
return self.outputlayer(hiddenlayer)
# return self.outputlayer(self.dense1_bn(hiddenlayer))
def buildPredSummary(df, summary, sentence_emb, curr_pred_emb, action, select_index, sent_index):
if sent_index==0 and action.select(1, select_index).tolist()[0] == 1:
return sentence_emb, summary + df['sentence'][sent_index]
if action.select(1, select_index).tolist()[0] == 1:
# This uses the avergae of the embeddings from the previous
# steps...but maybe we should try something different
return curr_pred_emb * sentence_emb, curr_summary + ' ' + df['sentence'][sent_index]
else:
return sentence_emb, curr_summary
# +
sdf = pd.read_csv('/home/francisco/GitHub/DQN-Event-Summarization/data/sif/train_000_0.csv')
# Initializing stuff
SKIP = 0
SELECT = 1
true_summary = sdf['summary'][0]
ts_tokenized = Counter(true_summary.split(" "))
nepochs = 500
sif_emb_d = 300
input_dim = 600
outputdim = 2
neurons = 256
nhiddenlayers = 4
rand_rate = 0.50
decay_rate = 0.025
lrate = 0.001
momentum_rate = 0.8
criterion = nn.MSELoss()
nsentences = sdf.index.max()
# -
x0 = [autograd.Variable(torch.randn((1, 3))) for _ in range(5)]
x1 = [autograd.Variable(torch.randn((4, 3))) for _ in range(1)]
# +
# The first axis is the sequence itself
# the second indexes instances in the mini-batch
# the third indexes elements of the input.
lstm0 = nn.LSTM(input_size=3, hidden_size=2, num_layers=2)
linear0 = nn.Linear(2, 2)
# +
# first dimension is sequence size, second dimension is batch size, third dimension is who knows
out0, hidden0 = lstm0(x0[0].view(1, 1, -1))
out1, hidden1 = lstm0(x1[0].view(4, 1, -1))
# -
out1
# Notice this pulls the last state of the sequence
out1[-1]
linear0(out1[-1])
# +
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
training_data = [
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_ix = {}
for sent, tags in training_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}
# These will usually be more like 32 or 64 dimensional.
# We will keep them small, so we can see how the weights change as we train.
EMBEDDING_DIM = 6
HIDDEN_DIM = 6
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim)),
autograd.Variable(torch.zeros(1, 1, self.hidden_dim)))
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space)
return tag_scores
# +
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))
inputs = prepare_sequence(training_data[0][0], word_to_ix)
sentence = ['Everybody', 'read', 'that', 'book']
tags = ['NN', 'V', 'DET', 'NN']
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = prepare_sequence(tags, tag_to_ix)
tag_scores = model(sentence_in)
# -
sentence, sentence_in, model(sentence_in)
emb = nn.Embedding(len(word_to_ix), EMBEDDING_DIM)
lstmemb = nn.LSTM(EMBEDDING_DIM, HIDDEN_DIM)
hidden2tag = nn.Linear(HIDDEN_DIM, len(tag_to_ix))
# +
# out.view(len(sentence_in), -1) # 4 x 36
out, hidden = lstmemb(emb(sentence_in).view(len(sentence_in), 1, -1))
tag_space = hidden2tag(out.view(len(sentence_in), -1))
tag_scores = F.log_softmax(tag_space)
# -
tag_scores
class jointmodel(nn.Module):
def __init__(self):
super(jointmodel, self).__init__()
self.features = nn.Linear(300, 2)
def forward(self, x, y):
x1 = self.features(x)
x2 = self.features(y)
z = torch.cat((x1, x2), 1)
return z
m = jointmodel()
m(autograd.Variable(sent_emb), autograd.Variable(predsummary_emb))
# +
sent_index = 0
sent_emb = torch.FloatTensor(sdf[sdf.columns[4:]].values[sent_index, :].reshape(1, sif_emb_d))
train_xs = torch.cat([sent_emb, predsummary_emb], dim=1)
class lstmRegressor(nn.Module): # inheriting from nn.Module!
# calls the init function of nn.Module. Dont get confused by syntax, always do it in an nn.Module
def __init__(self, input_size, outputsize, nunits, nlayers):
super(lstmRegressor, self).__init__()
self.nunits = nunits
self.nlayers = nlayers
self.inputlayer = nn.Linear(input_size, nunits)
self.lstmlayer = nn.LSTM(nunits, nunits, num_layers=1)
self.linearlayer= nn.Linear(nunits, nunits)
self.hidden = self.init_hidden()
#self.dense1_bn = nn.BatchNorm1d(nunits)
self.outputlayer = nn.Linear(nunits, outputsize)
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.zeros(1, 1, self.nunits)),
autograd.Variable(torch.zeros(1, 1, self.nunits)))
def forward(self, input_vec):
hiddenlayer = self.inputlayer(input_vec)
out, hiddenlayer = self.lstmlayer(hiddenlayer, self.hidden)
hiddenlayer = self.linearlayer(out.view(1, 1, -1))
return self.outputlayer(hiddenlayer)
# -
model = lstmRegressor(input_dim, outputdim, neurons, nhiddenlayers)
train_xs.shape, neurons, nhiddenlayers, outputdim
pred = model(autograd.Variable(train_xs))
pred.view(1, -1)
# +
#model = lstmRegressor(input_dim, outputdim, neurons, nhiddenlayers)
model = LinearRegressor(input_dim, outputdim, neurons, nhiddenlayers)
model.zero_grad()
# swap to ADAM
optimizer = optim.Adam(model.parameters(), lr=lrate)
lossf = {'loss': [], 'sent_idx': [], 'epoch': [], 'action': [], 'f1': [], 'optpred': [], 'rouge_delta':[]}
for epoch in range(nepochs):
# reset embeddings and summary at the start of training
f1_t0 = 0.
curr_summary = ''
predsummary_emb = torch.from_numpy(np.zeros((1, sif_emb_d))).float()
for sent_index in range(nsentences):
model.zero_grad()
# The embeddings start on the 5th column (index 4)
sent_emb = torch.FloatTensor(sdf[sdf.columns[4:]].values[sent_index, :].reshape(1, sif_emb_d))
train_xs = torch.cat([sent_emb, predsummary_emb], dim=1)
train_ys = torch.from_numpy(np.asarray([0]).reshape(1, 1)).float()
action = torch.from_numpy(np.asarray([0, 0]).reshape(1,2)).int()
rouge_preds = model(autograd.Variable(train_xs))
#rouge_preds = model(autograd.Variable(train_xs).view(1, 1, -1))
qMax, qIndx = rouge_preds.max(dim=1)
if np.random.uniform() > rand_rate and rand_rate > 0:
# Randomly choosing either 0 or 1 some percent of hte time
qIndx = np.random.randint(0, 2, 1)[0]
action[:, qIndx.data[0]] = 1
action[:, abs(qIndx.data[0] - 1)] = 0
# building the summary and capturing the embedding
# without a history model doesn't make a lot of sense
# not clear what's happening...
# concatenate summary embedding to input or try separate joining layer like before
# Worth looking at rougue of each sentence...calculate f1 for each sentence to find out
# might help figure out what's going on...
predsummary_emb, curr_summary = buildPredSummary(
sdf,
curr_summary,
sent_emb,
predsummary_emb,
action,
SELECT,
sent_index
)
recall, prec, f1 = rougeScores(ts_tokenized, Counter(curr_summary.split(" ")))
# Backward part
predQonActions = torch.masked_select(rouge_preds, autograd.Variable(action.byte()))
rouge_delta = f1 - f1_t0
# Change in rouge-f1
train_ys[0] = rouge_delta
loss = criterion(predQonActions, autograd.Variable(train_ys))
loss.backward()
optimizer.step()
lossf['loss'].append(loss.data[0])
lossf['sent_idx'].append(sent_index)
lossf['epoch'].append(epoch)
lossf['action'].append(qIndx.data[0])
lossf['f1'].append(f1)
lossf['optpred'].append(predQonActions.data[0])
lossf['rouge_delta'].append(rouge_delta)
# Storing last round
f1_t0 = f1 - f1_t0
rand_rate -= decay_rate
# Making the performance data a dataframe
perf = pd.DataFrame(lossf)
# -
perf[(perf['sent_idx'] == perf['sent_idx'].max() & (perf['action']==0))].plot(
x='epoch', y='optpred',
grid=True,
figsize=(12, 6),
title='Predictions of final predicted summary for action=SKIP'
)
plt.show()
perf[(perf['sent_idx'] == perf['sent_idx'].max() & (perf['action']==1))].plot(
x='epoch', y='optpred',
grid=True,
figsize=(12, 6),
title='Predictions of final predicted summary for action=SELECT'
)
plt.show()
perf[(perf['sent_idx'] == perf['sent_idx'].max() & (perf['action']==0))].plot(
x='epoch', y='loss',
grid=True,
figsize=(12, 6),
title='Loss value for action=SKIP'
)
plt.show()
perf[(perf['sent_idx'] == perf['sent_idx'].max() & (perf['action']==1))].plot(
x='epoch', y='loss', c='red',
grid=True,
figsize=(12, 6),
title='Loss value for action=SELECT'
)
plt.show()
# +
perf[perf['sent_idx'] == perf['sent_idx'].max()].plot(
x='epoch', y='loss', c='red',
grid=True,
figsize=(12, 6),
title='Loss value across training'
)
plt.show()
# -
perf[perf['sent_idx'] == perf['sent_idx'].max()].plot(
x='epoch', y='f1',
grid=True,
figsize=(12, 6),
title='f1-score across training'
)
plt.show()
perf[perf['epoch'] == perf['epoch'].max()].plot(
x='sent_idx', y='action',
ylim=[-0.2,1.2],
grid=True,
figsize=(12, 6),
title='Action across sentences'
)
plt.show()
# +
lead3 = ' '.join(sdf['sentence'][0:3])
finalsummary = rougeScores(ts_tokenized, Counter(curr_summary.split(" ")))
baseline = rougeScores(ts_tokenized, Counter(lead3.split(" ")))
print("lead-3 recall = %.3f; precision = %.3f; f1-score = %.3f " % (baseline[0], baseline[1], baseline[2]))
print("learned recall = %.3f; precision = %.3f; f1-score = %.3f " % (finalsummary[0], finalsummary[1], finalsummary[2]))
# -
lead3
true_summary
curr_summary
| SIF/summarization/DQN SIF Embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# #### 1. In the below elements which of them are values or an expression? eg:- values can be integer or string and expressions will be mathematical operators.
# *
# 'hello'
# -87.8
# -
# /
# +
# 6
#
# [Answer]: Values are 'hello', -87.8, and 6
# Expressions are *, - , /, +
# #### 2. What is the difference between string and variable?
# String is a data tyype in python.
# Variables are containers that can hold any data type in python.
# #### 3.Describe three different data types.
# integer, float, string and boolean
# #### 4. What is an expression made up of? What do all expressions do?
# Expressions are entities that act on variables. expressions modify the values of the variables.
# #### 5. This assignment statements, like spam = 10. What is the difference between an expression and a statement?
# expressions act on variables and identifiers. On the other hands, statements perform an action and can have expressions in it.
# #### 6. After running the following code, what does the variable bacon contain?
# #### bacon = 22
# #### bacon + 1
# bacon contains 22 since after incrementing, values is not assigned back to bacon
# #### 7.What should the values of the following two terms be?
# #### 'spam' + 'spamspam'
# #### 'spam' * 3
#
# Both the above expressions evaluate to 'spamspamspam'
# #### 8. Why is eggs a valid variable name while 100 is invalid?
# Any integer or float value cannot be used as a variable name in python.
# #### 9. What three functions can be used to get the integer, floating-point number, or string version of a value?
# int(), float() and str()
# #### 10. Why does this expression cause an error? How can you fix it?
# #### 'I have eaten ' + 99 + ' burritos.'
# An integer value cannot be concatenated with a string value. so the above statement throws error.
# To correct it, we have to typecast the integer value to string as given below:
# 'I have eaten' + str(99) + 'burritos.'
| Python Basic Assignment/Assignment_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MAT281 - Laboratorio N°06
#
#
# ## Problema 01
# <img src="./images/logo_iris.jpg" width="360" height="360" align="center"/>
# El **Iris dataset** es un conjunto de datos que contine una muestras de tres especies de Iris (Iris setosa, Iris virginica e Iris versicolor). Se midió cuatro rasgos de cada muestra: el largo y ancho del sépalo y pétalo, en centímetros.
#
# Lo primero es cargar el conjunto de datos y ver las primeras filas que lo componen:
# +
# librerias
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', 500) # Ver más columnas de los dataframes
# Ver gráficos de matplotlib en jupyter notebook/lab
# %matplotlib inline
# +
# cargar datos
df = pd.read_csv(os.path.join("data","iris_contaminados.csv"))
df.columns = ['sepalLength',
'sepalWidth',
'petalLength',
'petalWidth',
'species']
df.head()
# -
# ### Bases del experimento
#
# Lo primero es identificar las variables que influyen en el estudio y la naturaleza de esta.
#
# * **species**:
# * Descripción: Nombre de la especie de Iris.
# * Tipo de dato: *string*
# * Limitantes: solo existen tres tipos (setosa, virginia y versicolor).
# * **sepalLength**:
# * Descripción: largo del sépalo.
# * Tipo de dato: *integer*.
# * Limitantes: los valores se encuentran entre 4.0 y 7.0 cm.
# * **sepalWidth**:
# * Descripción: ancho del sépalo.
# * Tipo de dato: *integer*.
# * Limitantes: los valores se encuentran entre 2.0 y 4.5 cm.
# * **petalLength**:
# * Descripción: largo del pétalo.
# * Tipo de dato: *integer*.
# * Limitantes: los valores se encuentran entre 1.0 y 7.0 cm.
# * **petalWidth**:
# * Descripción: ancho del pépalo.
# * Tipo de dato: *integer*.
# * Limitantes: los valores se encuentran entre 0.1 y 2.5 cm.
# Su objetivo es realizar un correcto **E.D.A.**, para esto debe seguir las siguientes intrucciones:
# 1. Realizar un conteo de elementos de la columna **species** y corregir según su criterio. Reemplace por "default" los valores nan..
# +
df['species']=df['species'].str.lower().str.strip().fillna('default')
df.species.value_counts()
# -
# 2. Realizar un gráfico de box-plot sobre el largo y ancho de los petalos y sépalos. Reemplace por **0** los valores nan.
sns.boxplot(data=df.drop(['species'], axis=1).fillna(0))
# 3. Anteriormente se define un rango de valores válidos para los valores del largo y ancho de los petalos y sépalos. Agregue una columna denominada **label** que identifique cuál de estos valores esta fuera del rango de valores válidos.
df=df.assign(label='')
for i in range(df.shape[0]):
if 7.0 <df.iloc[i]['sepalLength'] or df.iloc[i]['sepalLength']< 4.0:
df.at[i,'label']='sepalLength'
if 4.5 <df.iloc[i]['sepalWidth'] or df.iloc[i]['sepalWidth']< 2.0:
df.at[i,'label']='sepalWidth'
if 7.0 <df.iloc[i]['petalLength'] or df.iloc[i]['petalLength']< 1.0:
df.at[i,'label']='petalLength'
if 2.5 <df.iloc[i]['petalWidth'] or df.iloc[i]['petalWidth']<0.1:
df.at[i,'label']='petalWidth'
df
# 4. Realice un gráfico de *sepalLength* vs *petalLength* y otro de *sepalWidth* vs *petalWidth* categorizados por la etiqueta **label**. Concluya sus resultados.
# +
sns.scatterplot(
data = df,
y = 'sepalLength',
x = 'petalLength',
hue = 'label'
)
plt.show()
sns.scatterplot(
data = df,
y = 'sepalWidth',
x = 'petalWidth',
hue = 'label'
)
plt.show()
# -
# se puede concluir que es facil seperar/diferenciar las especies por los criterios dados
# 5. Filtre los datos válidos y realice un gráfico de *sepalLength* vs *petalLength* categorizados por la etiqueta **species**.
# +
list=['sepalLength','sepalWidth','petalLength','petalWidth']
df = df[df.label.isin(list) == False]
sns.scatterplot(
data = df,
y = 'sepalLength',
x = 'petalLength',
hue = 'species'
)
plt.show()
sns.scatterplot(
data = df,
y = 'sepalWidth',
x = 'petalWidth',
hue = 'species'
)
plt.show()
| labs/lab_06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.006359, "end_time": "2022-06-04T09:18:25.208549", "exception": false, "start_time": "2022-06-04T09:18:25.202190", "status": "completed"} tags=[]
# # Commodity momentum
#
# This notebook analyses commodity cross-sectional momentum strategy. The strategy takes long positions on contracts with best 1-year perfomance and short positions on ones with worst 1-year performance.
# + papermill={"duration": 2.877092, "end_time": "2022-06-04T09:18:28.091761", "exception": false, "start_time": "2022-06-04T09:18:25.214669", "status": "completed"} tags=[]
# %matplotlib inline
from datetime import datetime
import logging
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.style.use('bmh')
from vivace.backtest import signal
from vivace.backtest import processing
from vivace.backtest.contract import all_futures_hollstein2020
from vivace.backtest.engine import BacktestEngine
from vivace.backtest.enums import Strategy
from vivace.backtest.stats import Performance
# + [markdown] papermill={"duration": 0.003665, "end_time": "2022-06-04T09:18:28.099374", "exception": false, "start_time": "2022-06-04T09:18:28.095709", "status": "completed"} tags=[]
# # Data
#
# 26 commodity futures are used as per Hollstein 2020.
# + papermill={"duration": 0.01971, "end_time": "2022-06-04T09:18:28.122841", "exception": false, "start_time": "2022-06-04T09:18:28.103131", "status": "completed"} tags=[]
all_futures_hollstein2020
# + papermill={"duration": 0.0127, "end_time": "2022-06-04T09:18:28.141147", "exception": false, "start_time": "2022-06-04T09:18:28.128447", "status": "completed"} tags=[]
all_futures_hollstein2020.shape
# + [markdown] papermill={"duration": 0.004916, "end_time": "2022-06-04T09:18:28.152187", "exception": false, "start_time": "2022-06-04T09:18:28.147271", "status": "completed"} tags=[]
# # Performance
# + [markdown] papermill={"duration": 0.004488, "end_time": "2022-06-04T09:18:28.161399", "exception": false, "start_time": "2022-06-04T09:18:28.156911", "status": "completed"} tags=[]
# ## Run backtest
#
# A simple portfolio is constructed by using trailing 1-year returns of each commodity futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis.
#
# Similar to other popular academic strategies, this commodity momentum signal has been stalling since around 2015.
# + papermill={"duration": 306.054777, "end_time": "2022-06-04T09:23:34.220798", "exception": false, "start_time": "2022-06-04T09:18:28.166021", "status": "completed"} tags=[]
engine = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_hollstein2020.index,
signal=signal.XSMomentum(lookback=252),
log_level=logging.WARN,
)
engine.run()
# + papermill={"duration": 109.665749, "end_time": "2022-06-04T09:25:23.891323", "exception": false, "start_time": "2022-06-04T09:23:34.225574", "status": "completed"} tags=[]
portfolio_return = (engine.calculate_equity_curve(calculate_net=False)
.rename('Commodity momentum portfolio'))
# + papermill={"duration": 0.756756, "end_time": "2022-06-04T09:25:24.655242", "exception": false, "start_time": "2022-06-04T09:25:23.898486", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
portfolio_return.plot(ax=ax, logy=True);
ax.set_title('Commodity momentum portfolio')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": 0.092054, "end_time": "2022-06-04T09:25:24.754718", "exception": false, "start_time": "2022-06-04T09:25:24.662664", "status": "completed"} tags=[]
portfolio_return.pipe(Performance).summary()
# + [markdown] papermill={"duration": 0.006955, "end_time": "2022-06-04T09:25:24.768060", "exception": false, "start_time": "2022-06-04T09:25:24.761105", "status": "completed"} tags=[]
# ## Recent performance
# + papermill={"duration": 0.604864, "end_time": "2022-06-04T09:25:25.379359", "exception": false, "start_time": "2022-06-04T09:25:24.774495", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
portfolio_return.tail(252 * 2).plot(ax=ax, logy=True);
ax.set_title('Commodity momentum portfolio')
ax.set_ylabel('Cumulative returns');
# + [markdown] papermill={"duration": 0.008253, "end_time": "2022-06-04T09:25:25.396357", "exception": false, "start_time": "2022-06-04T09:25:25.388104", "status": "completed"} tags=[]
# # Reference
# - <NAME>., <NAME>. and <NAME>., 2013. Value and momentum everywhere. The Journal of Finance, 68(3), pp.929-985.
# - <NAME>., <NAME>. and <NAME>., 2020. Anomalies in commodity futures markets: Risk or mispricing?. Available at SSRN.
# + papermill={"duration": 0.015683, "end_time": "2022-06-04T09:25:25.420244", "exception": false, "start_time": "2022-06-04T09:25:25.404561", "status": "completed"} tags=[]
print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}')
| commodity_momentum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="44gc9JAUbj8s"
# # Day 7
# Logistic Regression In Class Work
# + id="FpMNjjEgbj81"
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="X4frXtU1bj82"
# Helper function
# + id="bNQduNEXbj82"
def plotlogistic(x,py):
""" Plots x vs. prob(y)
Designed for logistic function
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,py)
ax.xaxis.set(ticks=range(-5,6))
ax.grid(axis='both')
plt.xlabel('x')
plt.ylabel('probability of y')
plt.title('Logistic Curve')
plt.show()
# + [markdown] id="2EhvFJ_Zbj83"
# The Logistic Function for a single feature is
#
# $$ p=P(y=1)=\frac{e^{\beta_0+\beta_{1}X_{1}}}{1+e^{\beta_0+\beta_1X_1} }$$
# + [markdown] id="O2OYZNv7bj83"
# ## Develop the logistic function - Student Coding part a:
#
# Define a python function to compute $P(y=1)$ from an input signature ```(x,beta0,beta1)``` where each beta is a scalar, X1 is a (n by 1) matrix and $P(y=1)$ is a (n by 1) matrix
# + id="fvZoKwo3bj84"
def logistic1D(x,beta0,beta1):
""" Return the probability of the logistic function for 1-Dimensional x
at the specified values of beta0 and beta1
will return an an item with the same shape as x which should be a vector.
"""
prob_y = np.zeros(x.shape) #placeholder for return value
#------INSERT STUDENT CODE HERE TO COMPUTE THE LOGISTIC FUNCTION-----------
numerator = np.exp((beta0+(beta1*x)))
denominator = 1+np.exp((beta0+(beta1*x)))
prob_y = numerator/denominator
#------------END OF STUDENT CODE-------------------------
return prob_y
# + [markdown] id="T_kawpQ5bj84"
# Instantiate x values
# + id="EfZdcV0zbj85"
x = np.arange(-6, 6, 0.01)
# + [markdown] id="hprIMgN8bj85"
# Beta Selection for canonical S curve: Student Coding for part c
#
# Your goal is to select scalar values for ```beta0``` and ```beta1``` to replicate s-curve canonical logistic function shape shown in the image below
#
# <img src="https://github.com/afit-csce623-master/template-inclass-day07/blob/main/Logistic-S-curve-canonical.png?raw=1">
# + id="7x3t37enbj85"
#select betas to try to recreate graph in part C
#------INSERT STUDENT CODE HERE FOR BETAS------------
b0 = 0.0
b1 = 1.0
#----------------------------------------------------
# + [markdown] id="BpWuYZznbj86"
# build the resulting logistic graph
# + id="2c6UBr0bbj86" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="b5d9d352-bad5-4934-ff7f-5d3fd213691f"
py = logistic1D(x,b0,b1)
plotlogistic(x,py)
# + [markdown] id="FnmxCIh1bj87"
# ## Beta Selection for Stairstep down: Student Coding for part F
#
# Your goal is to select scalar values for ```beta0``` and ```beta1``` to replicate downward step-function shape using the logistic function shown in the image below
#
# <img src="https://github.com/afit-csce623-master/template-inclass-day07/blob/main/Logistic-stairstep-down.png?raw=1">
#
# + id="O6qev6tSbj88"
#select betas to try to recreate graph in instructions part F
#------INSERT STUDENT CODE HERE FOR BETAS------------
b0 = -100.0 #placeholder
b1 = -100.0 #placeholder
#----------------------------------------------------
# + [markdown] id="E1meUG7Gbj88"
# build the resulting logistic graph
# + id="BJ-jpSH0bj89" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="c3d6196e-c606-4f7e-f09e-6196edbaf3da"
py = logistic1D(x,b0,b1)
plotlogistic(x,py)
| CSCE 623_SP2021 in-class Day 07 - logistic function_instructor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import sep
import matplotlib.pyplot as plt
from astropy.io import fits
from matplotlib import rcParams
# %matplotlib inline
rcParams['figure.figsize'] = [10.,8.]
fname = "hlsp_hudf12_hst_wfc3ir_udfmain_f105w_v1.0_drz.fits"
hdu_list = fits.open(fname)
hdu_list.info()
image_data = hdu_list[0].data
print(type(image_data))
print(image_data.shape)
hdu_list.close()
image_data = fits.getdata(fname)
print(type(image_data))
print(image_data.shape)
#show the image
m,s = np.mean(image_data), np.std(image_data)
plt.imshow(image_data, interpolation='nearest',cmap='bone', vmin=m-s, vmax=m+s, origin='lower')
plt.colorbar()
#measure spatially varying background on image
#change byte data to read image
image_data_new = image_data.byteswap().newbyteorder()
bkg = sep.Background(image_data_new)
bkg = sep.Background(image_data_new, mask=mask, bw=64, bh=64, fw=3, fh=3)
#get global mean and noise of image's background
print(bkg.globalback)
print(bkg.globalrms)
#evaluate background as 2-D array but same size as original image
bkg_image = bkg.back()
#bkg_image = np.array(bkg)
#show background
plt.imshow(bkg_image,interpolation='nearest',cmap='bone',origin='lower')
plt.colorbar();
#fig.savefig('plot.pdf')
#evaluate background noise as 2-D array, same size as original image
bkg_rms = bkg.rms()
#show background noise
plt.imshow(bkg_rms,interpolation='nearest',cmap='gray',origin='lower')
plt.colorbar();
#subtract background
image_data_sub = image_data - bkg
#set detection threshold to be a constant value of 1.5*sigma
#sigma=global background rms
objects = sep.extract(image_data_sub, 1.5, err=bkg.globalrms)
#number of objects detected
len(objects)
# +
#over-plot the object coordinates with some parameters on the image
#this will check where the detected objects are
from matplotlib.patches import Ellipse
#plot background-subtracted image
fig, ax = plt.subplots()
m,s = np.mean(image_data_sub), np.std(image_data_sub)
im = ax.imshow(image_data_sub,interpolation='nearest',cmap='gray',
vmin=m-s,vmax=m+s,origin='lower')
#plot an ellipse for each object
for i in range(len(objects)):
e = Ellipse(xy=(objects['x'][i],objects['y'][i]),
width=6*objects['a'][i],
height=6*objects['b'][i],
angle=objects['theta'][i]*180./np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax.add_artist(e)
#fig.savefig('plot.png')
# -
#see available fields
objects.dtype.names
#perform circular aperture photometry
#with a 3 pixel radius at locations of the objects
flux, fluxerr, flag = sep.sum_circle(image_data_sub,objects['x'], objects['y'],
3.0, err=bkg.globalrms, gain=1.0)
#show the first 10 objects results:
for i in range(10):
print("object {:d}: flux = {:f} +/- {:f}".format(i, flux[i], fluxerr[i]))
| part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### III. Feature Selection, Algorithm Selection & Generalization evidence
#
# In this part, we create a model, we try different algorithms and see which one delivers the best results. Then we chose the best algorithm and fine tune it.
#
# This notebook presents the following parts:
#
# 1) Model creation
# * Split dataset on train and test sets, making sure that countries with duplicates are in either the train or the test set.
# 2) Feature selection (using filter mthods by tree derived importance technique, I reduce dataset from 39 to 8
#
# 2) Algorithm testing :
# * Linear Regression Simple
# * Lasso Regression
# * Ridge Regression
# * Boosted decision tree regressor
# * Random forest regressor
# * Bayesian linear regressor
# * XGB reg
#
# 3) Chosing best algorithm
#
#
# 5) Save model
#
# 6) Verification that the **model created will generalize well** (visualizations, etc.)
# +
# import libraries
import pandas as pd
from sklearn import preprocessing
import sklearn.model_selection as ms
from sklearn import linear_model
import sklearn.metrics as sklm
import numpy as np
import numpy.random as nr
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import math
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
# %matplotlib inline
# %matplotlib inline
# -
#import data set
df=pd.read_csv('dfprepared2.csv')
df.shape
df.head(2)
df.columns
df.shape
# ### 1. Model Creation
#
# Here it is very important to know what I am doing.
#
# Given the number of countries are limited 92, and they are not the same in the train as in the test data.
#
# When doing the local train/test split, it is imperative that I split the data by country so that all years of data for a country appear either in the train set or the test set, but are not split across both.
#
# The country codes in the test set are distinct from those in the train set. In other words, no country that appears in the train set appears in the test set. Thus, country-specific features (i.e. country dummy variables) will not be an option. However, the countries in the test set still share similar patterns as those in the train set and so other feature engineering will work the same as usual.
#
# Consequently, I proceed as follows:
#
# * Identify unique country codes
# * Split train and test data:
# - Train data will have some country codes
# - Test data will have the other country codes
# - Drop the country_code column from both train and test data
df.country_code.unique()
#To select rows whose column value is in an iterable array, which we'll define as array, you can use isin:
array = ['889f053', '9e614ab', '100c476', '4609682', 'be2a7f5', '7e222a7',
'066b021', '66b86bf', '583201c', '0ea781c', '2ddc563', 'b79b5f9',
'f787860', '4080343', 'c408d26', 'e256731', '99a7c06', 'a0b37e7',
'd090b87', '6966b4a', '79c89fd', '12c8f8f', '3e049d7', 'e509cda',
'abd1492', '04952a0', '5dbddf9', '893d538', 'd3a0eeb', 'c8e4701',
'f405b98', '5c2e474', '10aeba6', '0b6e276', '2e5e810', '0593aa0',
'ed9ad13', 'ba2039a', '6303e84', '71dc81f', 'd680446', '11c9833',
'75c02b8', 'c3782c8', '81e884c', '5f1162c', 'e8739c8', '085807f',
'ba8e2c5', 'cff4c58', 'c3668f5', 'e8bfe1e', '9621c07', '93d74a6',
'0845041', 'dd64913', '9e79f12', 'ee5721a', '30e2302', '6b615ad',
'e15a18a', '7fb4d17', '0c0177b', '8fb5447', ]
train = df.loc[df['country_code'].isin(array)]
train.shape
# for test values, we shall use the sing ~ to select those country codes NOT IN array
test = df.loc[~df['country_code'].isin(array)]
test.shape
#Now I can delete the country_code column from train and test:
train= train.drop(['country_code'], axis = 1)
test= test.drop(['country_code'], axis = 1)
X_train = train.loc[:, train.columns != 'prevalence_of_undernourishment']
X_test = test.loc[:, test.columns != 'prevalence_of_undernourishment']
y_train = train['prevalence_of_undernourishment']
y_test = test['prevalence_of_undernourishment']
# ### 1.1 Feature Selection
# For this, I shall use basic filter methods and by Random Forest Importance
# +
# Remove Constant Features
constant_features = [
feat for feat in X_train.columns if X_train[feat].std() == 0
]
X_train.drop(labels=constant_features, axis=1, inplace=True)
X_test.drop(labels=constant_features, axis=1, inplace=True)
X_train.shape, X_test.shape
# -
# Two features have been removed
# +
# remove quasi-constant features
sel = VarianceThreshold(
threshold=0.01) # 0.1 indicates 99% of observations approximately
sel.fit(X_train) # fit finds the features with low variance
sum(sel.get_support()) # how many not quasi-constant?
# -
# None has been removed
features_to_keep = X_train.columns[sel.get_support()]
# +
# we can then remove the features like this
X_train = sel.transform(X_train)
X_test = sel.transform(X_test)
X_train.shape, X_test.shape
# +
# sklearn transformations lead to numpy arrays
# here I transform the arrays back to dataframes
# please be mindful of getting the columns assigned
# correctly
X_train= pd.DataFrame(X_train)
X_train.columns = features_to_keep
X_test= pd.DataFrame(X_test)
X_test.columns = features_to_keep
# +
# check for duplicated features in the training set
duplicated_feat = []
for i in range(0, len(X_train.columns)):
if i % 10 == 0: # this helps me understand how the loop is going
print(i)
col_1 = X_train.columns[i]
for col_2 in X_train.columns[i + 1:]:
if X_train[col_1].equals(X_train[col_2]):
duplicated_feat.append(col_2)
len(duplicated_feat)
# +
# remove duplicated features
X_train.drop(labels=duplicated_feat, axis=1, inplace=True)
X_test.drop(labels=duplicated_feat, axis=1, inplace=True)
X_train.shape, X_test.shape
# +
# I keep a copy of the dataset except constant and duplicated variables
# to measure the performance of machine learning models
# at the end of the notebook
X_train_basic_filter = X_train.copy()
X_test_basic_filter = X_test.copy()
# +
# find and remove correlated features
def correlation(dataset, threshold):
col_corr = set() # Set of all the names of correlated columns
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if abs(corr_matrix.iloc[i, j]) > threshold: # we are interested in absolute coeff value
colname = corr_matrix.columns[i] # getting the name of column
col_corr.add(colname)
return col_corr
corr_features = correlation(X_train, 0.8)
print('correlated features: ', len(set(corr_features)) )
# +
# removed correlated features
X_train.drop(labels=corr_features, axis=1, inplace=True)
X_test.drop(labels=corr_features, axis=1, inplace=True)
X_train.shape, X_test.shape
# +
# here I will do the model fitting and feature selection
# altogether in one line of code
# first I specify the Random Forest instance, indicating
# the number of trees (the default value in sklearn is 10
# Then I use the selectFromModel object from sklearn
# to automatically select the features
# SelectFrom model will select those features which importance
# is greater than the mean importance of all the features
# by default, but you can alter this threshold if you want to
sel_ = SelectFromModel(RandomForestRegressor(n_estimators=100))
sel_.fit(X_train.fillna(0), y_train)
# -
# let's make a list and count the selected features
selected_feat = X_train.columns[(sel_.get_support())]
len(selected_feat)
# +
# and now, let's compare the amount of selected features
# with the amount of features which importance is above the
# mean importance, to make sure we understand the output of
# sklearn
print('total features: {}'.format((X_train.shape[1])))
print('selected features: {}'.format(len(selected_feat)))
print('features with coefficients greater than the mean coefficient: {}'.format(
np.sum(sel_.estimator_.feature_importances_ > sel_.estimator_.feature_importances_.mean())))
# -
# ### 2. Algorithm Testing
# #### 2.1 Linear Regression
# ##### 2.1.1 Linear Regression simple
#Train the Model and predict
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lm = LinearRegression()
lm.fit(X_train[selected_feat],y_train)
lm_predictions = lm.predict(X_test[selected_feat])
#print RMSLE
print ('Simple Regression RMSE is', np.sqrt(mean_squared_error(y_test, lm_predictions)))
# ##### 2.1.2 Linear Lasso
#Train the Model and predict
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_log_error
Lasso = Lasso()
Lasso.fit(X_train[selected_feat],y_train)
Lasso_predictions = Lasso.predict(X_test[selected_feat])
#print RMSLE
print ('Lasso Regression RMSLE is', np.sqrt(mean_squared_error(y_test, Lasso_predictions)))
# ##### 2.1.1 Linear Ridge
#Train the Model and predict
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_log_error
Ridge = Ridge()
Ridge.fit(X_train[selected_feat],y_train)
Ridge_predictions = Ridge.predict(X_test[selected_feat])
#print RMSLE
print ('Ridge Regression RMSLE is', np.sqrt(mean_squared_error(y_test, Ridge_predictions)))
# 2.2 Algorithm: **Boosted Decision Tree Regressor**
#Train the Model and predict
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
Tree = DecisionTreeRegressor()
Tree.fit(X_train[selected_feat],y_train)
Tree_predictions = Tree.predict(X_test[selected_feat])
#print RMSLE
print ('Boosted Decision Tree Regression RMSE is', np.sqrt(mean_squared_error(y_test, Tree_predictions)))
# 2.3 Algorithm: **Random Forest Regressor**
#Train the Model and predict
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
Forest = RandomForestRegressor()
Forest.fit(X_train[selected_feat],y_train)
Forest_predictions = Forest.predict(X_test[selected_feat])
#print RMSLE
print ('Random Forest Regression RMSE is', np.sqrt(mean_squared_error(y_test, Forest_predictions)))
# 2.4 Algorithm: **Bayesian Linear Regressor**
#Train the Model and predict
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import mean_squared_error
Bayesian = BayesianRidge()
Bayesian.fit(X_train[selected_feat],y_train)
Bayesian_predictions = Bayesian.predict(X_test[selected_feat])
#print RMSLE
print ('Bayesian Ridge Regression RMSE is', np.sqrt(mean_squared_error(y_test, Bayesian_predictions)))
# 2.5 Algorithm **XGBoost Regressor**
# +
import xgboost as xgb
# set the seed for reproducibility
seed_val = 1000000000
np.random.seed(seed_val)
# build initial model using all the features
xgbregressor = xgb.XGBRegressor(
nthread=10, max_depth=4, n_estimators=500, learning_rate=0.05)
xgbregressor.fit(X_train[selected_feat], y_train)
xgbregressor_predictions=xgbregressor.predict(X_test[selected_feat])
# -
#print RMSLE
print ('XGB regressor RMSE is', np.sqrt(mean_squared_error(y_test, xgbregressor_predictions)))
# ### 3. Compare and chose best model
print ('Simple Regression RMSE is', np.sqrt(mean_squared_error(y_test, lm_predictions)))
print ('Lasso Regression RMSLE is', np.sqrt(mean_squared_error(y_test, Lasso_predictions)))
print ('Ridge Regression RMSLE is', np.sqrt(mean_squared_error(y_test, Ridge_predictions)))
print ('Boosted Decision Tree Regression RMSE is', np.sqrt(mean_squared_error(y_test, Tree_predictions)))
print ('Random Forest Regression RMSE is', np.sqrt(mean_squared_error(y_test, Forest_predictions)))
print ('Bayesian Ridge Regression RMSE is', np.sqrt(mean_squared_error(y_test, Bayesian_predictions)))
print ('XGB regressor RMSE is', np.sqrt(mean_squared_error(y_test, xgbregressor_predictions)))
# Clearly, **LASSO REGRESSION** is the one producing the best results
# ### 5. Improve the model
#
# Afer testing several alpha values, I found the best to be 1.8
# +
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
#Improve model by modifying alpha
lasso=Lasso()
search=GridSearchCV(estimator=lasso,param_grid={'alpha':np.logspace(-5,2,8)},scoring='neg_mean_squared_error',n_jobs=1,refit=True,cv=10)
search.fit(X_train[selected_feat], y_train)
# -
search.best_params_
# +
Lasso_i = Lasso(alpha=0.11)
Lasso_i.fit(X_train[selected_feat],y_train)
Lasso__ipredictions = Lasso_i.predict(X_test[selected_feat])
print ('Lasso improved RMSE is', np.sqrt(mean_squared_error(y_test, Lasso__ipredictions)))
# -
#Check for improvement
print ('Lasso Regression RMSLE is', np.sqrt(mean_squared_error(y_test, Lasso_predictions)))
print ('Lasso improved RMSE is', np.sqrt(mean_squared_error(y_test, Lasso__ipredictions)))
print ('RMSE improvement is',(np.sqrt(mean_squared_error(y_test, Lasso_predictions)) - np.sqrt(mean_squared_error(y_test, Lasso__ipredictions))))
# ### 6. Save the model for future predictions
# Saving LogRec model
from sklearn.externals import joblib
from joblib import dump, load
dump(Lasso_i,'Lassoi_i.joblib')
# ### 7. Verification that the model created will generalize well
# +
def print_metrics(y_train, y_test, n_parameters):
## First compute R^2 and the adjusted R^2
r2 = sklm.r2_score(y_train, y_test)
r2_adj = r2 - (n_parameters - 1)/(y_train.shape[0] - n_parameters) * (1 - r2)
## Print the usual metrics and the R^2 values
print('Mean Square Error = ' + str(sklm.mean_squared_error(y_train, y_test)))
print('Root Mean Square Error = ' + str(math.sqrt(sklm.mean_squared_error(y_train, y_test))))
print('Mean Absolute Error = ' + str(sklm.mean_absolute_error(y_train, y_test)))
print('Median Absolute Error = ' + str(sklm.median_absolute_error(y_train, y_test)))
print('R^2 = ' + str(r2))
print('Adjusted R^2 = ' + str(r2_adj))
y_score = Lasso_i.predict(X_test[selected_feat])
print_metrics(y_test, y_score, 40)
# +
#Lets look at the residuals plot
## Calculate residuals (y-yhat)
df['residuals']=y_train - y_test
# +
def resid_qq(y_train, y_test):
## first compute vector of residuals.
resids = np.subtract(y_test, y_score)
## now make the residual plots
ss.probplot(resids, plot = plt)
plt.title('Residuals vs. predicted values')
plt.xlabel('Predicted values')
plt.ylabel('Residual')
resid_qq(y_train, y_test)
# -
# As with the histogram, the Q-Q Normal plot indicates the residuals are close to Normally distributed, show some skew (deviation from the straight line). This is particularly for large residuals.
# +
def hist_resids(y_test, y_score):
## first compute vector of residuals.
resids = np.subtract(y_test, y_score)
## now make the residual plots
sns.distplot(resids)
plt.title('Histogram of residuals')
plt.xlabel('Residual value')
plt.ylabel('count')
hist_resids(y_test, y_score)
# -
# This plot looks reasonable. The residual values are distributed near both sides of 0.
# +
def resid_plot(y_test, y_score):
## first compute vector of residuals.
resids = np.subtract(y_test, y_score)
## now make the residual plots
sns.regplot(y_score, resids, fit_reg=False)
plt.title('Residuals vs. predicted values')
plt.xlabel('Predicted values')
plt.ylabel('Residual')
resid_plot(y_test, y_score)
# -
# As expected, when plotting residuals we cannot see any clear pattern. Otherwise it would mean there is a hidden pattern we could not identify.
#
# We can see, however, some outliers mainly positive.
| 3. Algorithm Selection and Fine tunning.ipynb |
# + [markdown]
"""
MSLP and 1000-500 hPa Thickness with High and Low Symbols
=========================================================
Plot MSLP, calculate and plot 1000-500 hPa thickness, and plot H and L markers.
Beyond just plotting a few variables, in the example we use functionality
from the scipy module to find local maximum and minimimum values within the
MSLP field in order to plot symbols at those locations.
"""
# -
# Imports
# +
from datetime import datetime
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
from metpy.units import units
from netCDF4 import num2date
from scipy.ndimage import gaussian_filter
from siphon.catalog import TDSCatalog
# -
# Function for finding and plotting max/min points
def plot_maxmin_points(lon, lat, data, extrema, nsize, symbol, color='k',
plotValue=True, transform=None, ax=None):
"""
This function will find and plot relative maximum and minimum for a 2D grid. The function
can be used to plot an H for maximum values (e.g., High pressure) and an L for minimum
values (e.g., low pressue). It is best to used filetered data to obtain a synoptic scale
max/min value. The symbol text can be set to a string value and optionally the color of the
symbol and any plotted value can be set with the parameter color
lon = plotting longitude values (2D)
lat = plotting latitude values (2D)
data = 2D data that you wish to plot the max/min symbol placement
extrema = Either a value of max for Maximum Values or min for Minimum Values
nsize = Size of the grid box to filter the max and min values to plot a reasonable number
symbol = String to be placed at location of max/min value
color = String matplotlib colorname to plot the symbol (and numerica value, if plotted)
plot_value = Boolean (True/False) of whether to plot the numeric value of max/min point
ax = axes object to plot onto, defaults to current axes
The max/min symbol will be plotted only within the bounding frame
(i.e., clip_on=True, clip_box=ax.bbox)
"""
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter, minimum_filter
if ax is None:
ax = plt.gca()
if (extrema == 'max'):
data_ext = maximum_filter(data, nsize, mode='nearest')
elif (extrema == 'min'):
data_ext = minimum_filter(data, nsize, mode='nearest')
else:
raise ValueError('Value for hilo must be either max or min')
mxy, mxx = np.where(data_ext == data)
for i in range(len(mxy)):
ax.text(lon[mxy[i], mxx[i]], lat[mxy[i], mxx[i]], symbol, color=color, size=24,
clip_on=True, clip_box=ax.bbox, horizontalalignment='center', verticalalignment='center',
transform=transform)
ax.text(lon[mxy[i], mxx[i]], lat[mxy[i], mxx[i]],
'\n' + str(np.int(data[mxy[i], mxx[i]])),
color=color, size=12, clip_on=True, clip_box=ax.bbox, fontweight='bold',
horizontalalignment='center', verticalalignment='top', transform=transform)
# Get NARR data
# +
# Specify our date/time of product desired
dt = datetime(1999, 1, 3, 0)
# Assemble our URL to the NCEI THREDDS Data Server catalog,
# and access our desired dataset within via NCSS
base_url = 'https://www.ncei.noaa.gov/thredds/model-narr-a-files/'
cat = TDSCatalog(f'{base_url}{dt:%Y%m}/{dt:%Y%m%d}/catalog.xml')
ncss = cat.datasets[f'narr-a_221_{dt:%Y%m%d}_{dt:%H}00_000.grb'].subset()
# Create a NCSS query to add specifications to
query = ncss.query()
query.all_times()
query.add_lonlat()
query.accept('netcdf')
query.variables('Pressure_reduced_to_MSL_msl',
'Geopotential_height_isobaric')
# Obtain the data we want to query for
data = ncss.get_data(query)
# -
# Extract data into variables
# +
# Grab pressure levels
plev = list(data.variables['isobaric1'][:])
# Grab lat/lons and make all lons 0-360
lats = data.variables['lat'][:]
lons = data.variables['lon'][:]
lons[lons < 0] = 360 + lons[lons < 0]
# Grab valid time and get into datetime format
time = data['time2']
vtime = num2date(time[:].squeeze(), units=time.units)
# Grab MSLP and smooth, use MetPy Units module for conversion
emsl_var = data.variables['Pressure_reduced_to_MSL_msl']
EMSL = units.Quantity(emsl_var[:], emsl_var.units).to('hPa')
mslp = gaussian_filter(EMSL[0], sigma=3.0)
# Grab pressure level data
hght_1000 = data.variables['Geopotential_height_isobaric'][0, plev.index(1000)]
hght_500 = data.variables['Geopotential_height_isobaric'][0, plev.index(500)]
# Calculate and smooth 1000-500 hPa thickness
thickness_1000_500 = gaussian_filter(hght_500 - hght_1000, sigma=3.0)
# -
# Set map and data projections for use in mapping
# +
# Set projection of map display
mapproj = ccrs.LambertConformal(central_latitude=45., central_longitude=-100.)
# Set projection of data
dataproj = ccrs.PlateCarree()
# Grab data for plotting state boundaries
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lakes',
scale='50m',
facecolor='none')
# -
# Create figure and plot data
# +
fig = plt.figure(1, figsize=(17., 11.))
ax = plt.subplot(111, projection=mapproj)
# Set extent and plot map lines
ax.set_extent([-145., -70, 20., 60.], ccrs.PlateCarree())
ax.coastlines('50m', edgecolor='black', linewidth=0.75)
ax.add_feature(states_provinces, edgecolor='black', linewidth=0.5)
# Plot thickness with multiple colors
clevs = (np.arange(0, 5400, 60),
np.array([5400]),
np.arange(5460, 7000, 60))
colors = ('tab:blue', 'b', 'tab:red')
kw_clabels = {'fontsize': 11, 'inline': True, 'inline_spacing': 5, 'fmt': '%i',
'rightside_up': True, 'use_clabeltext': True}
for clevthick, color in zip(clevs, colors):
cs = ax.contour(lons, lats, thickness_1000_500, levels=clevthick, colors=color,
linewidths=1.0, linestyles='dashed', transform=dataproj)
plt.clabel(cs, **kw_clabels)
# Plot MSLP
clevmslp = np.arange(800., 1120., 4)
cs2 = ax.contour(lons, lats, mslp, clevmslp, colors='k', linewidths=1.25,
linestyles='solid', transform=dataproj)
plt.clabel(cs2, **kw_clabels)
# Use definition to plot H/L symbols
plot_maxmin_points(lons, lats, mslp, 'max', 50, symbol='H', color='b', transform=dataproj)
plot_maxmin_points(lons, lats, mslp, 'min', 25, symbol='L', color='r', transform=dataproj)
# Put on some titles
plt.title('MSLP (hPa) with Highs and Lows, 1000-500 hPa Thickness (m)', loc='left')
plt.title(f'VALID: {vtime}', loc='right')
| pages/gallery/HILO_Symbol_Plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
import time
import os, sys, traceback
import cPickle as pickle
# %matplotlib inline
import matplotlib.pylab as plt
import numpy as np
from sigvisa import Sigvisa
from sigvisa.infer.run_mcmc import run_open_world_MH
from sigvisa.infer.coarse_to_fine_init import ModelSpec, EventRunSpec, TimeRangeRunSpec, do_coarse_to_fine, initialize_from, do_inference, initialize_sg
from sigvisa.graph.sigvisa_graph import SigvisaGraph
from sigvisa.treegp.gp import GPCov
from sigvisa.graph.region import Region
from sigvisa.source.event import get_event, Event
from sigvisa.infer.event_birthdeath import ev_template_birth_helper, ev_template_death_helper, ev_bare_death_move
from sigvisa.models.ttime import tt_predict
# +
# build a timespec SG with the first hour of Wells validation data
def build_sg(stas, stime, etime):
runid = 18
hz=10.0
uatemplate_rate=4e-6
bands=["freq_0.8_4.5"]
phases=[ "Pn", "Sn", "Lg", "Pg"]
hack_constraint = True
raw_signals = True
region_lon = (-126, -100)
region_lat = (32, 49)
region_stime = stime
region_etime = etime
runids=(runid,)
rs = TimeRangeRunSpec(sites=stas, start_time=region_stime, end_time=region_etime)
region_stime = rs.start_time
region_etime = rs.end_time
region = Region(lons=region_lon, lats=region_lat,
times=(region_stime, region_etime),
rate_bulletin="isc",
min_mb=2.0,
rate_train_start=1167609600,
rate_train_end=1199145600)
ms1 = ModelSpec(template_model_type="gpparam",
#wiggle_family="db4_2.0_3_20.0",
wiggle_family="iid",
wiggle_model_type="gplocal+lld+none",
uatemplate_rate=uatemplate_rate,
max_hz=hz,
phases=phases,
bands=bands,
runids=runids,
inference_region=region,
dummy_fallback=False,
raw_signals=raw_signals,
hack_param_constraint=hack_constraint,
vert_only=True)
sg = rs.build_sg(ms1)
return sg
#sg = build_sg(["NV01"], 1203625979, 1203626019)
#sg = build_sg(["NV01"], 1203626000, 1203626010)
sg = build_sg(["NV01"], 1203625000, 1203627010)
# +
def plot_wn(wn):
f = plt.figure(figsize=(28, 4))
ax = f.add_subplot(111)
wn.plot(ax=ax, plot_pred=True, plot_atimes=True, plot_stddev=True) # , xlim=xlim
ax.set_ylim([-2000, 2000])
wn = sg.station_waves["NV01"][0]
target_evid = 1051106
target_ev = get_event(evid=target_evid)
#target_ev = Event(lon=-114.81 ,lat =41.17 , time= 1203628520.4, depth=1.6 , mb=2.8 )
plot_wn(wn)
# -
print wn.log_p_old()
wn.cached_logp = None
print wn.log_p()
print np.sum(wn._cached_stepwise_ells)
evnodes = sg.add_event(target_ev)
print wn.log_p()
print np.sum(wn._cached_stepwise_ells)
wn.cached_logp = None
print wn.log_p_old()
run_open_world_MH(sg, steps=500,
enable_event_openworld=False,
enable_event_moves=True,
enable_phase_openworld=False,
enable_template_openworld=True,
enable_template_moves=True)
# +
from sigvisa.utils.array import index_to_time, time_to_index, time_to_index_offset
from sigvisa.ssms_c import TransientCombinedSSM, ARSSM, CompactSupportSSM
TSSM_NOISE_PADDING=1e-6
def transient_ssm(self, arrivals=None,
parent_values=None,
save_components=True):
# we allow specifying the list of parents in order to generate
# signals with a subset of arriving phases (used e.g. in
# wiggle extraction)
if arrivals is None:
arrivals = self.arrivals()
arrivals = list(arrivals)
n = len(arrivals)
sidxs = np.empty((n,), dtype=int)
envs = [None] * n
min_logenv = max(-7.0, np.log(self.nm_env.c)-3)
if self.wavelet_basis is not None:
try:
(start_idxs, end_idxs, identities, basis_prototypes, level_sizes, n_steps) = self.wavelet_basis
except ValueError:
(start_idxs, end_idxs, identities, basis_prototypes, level_sizes, n_steps), _ = self.wavelet_basis
n_basis = len(start_idxs)
else:
n_steps = 0
n_basis = 0
components = [(self.noise_arssm, 0, self.npts, None)]
tssm_components = [(None, None, None, 0, self.npts, "noise"),]
# TODO: can be smarter about this, and only regenerate the TSSM when arrival_time changes.
# Any other template param change can be implemented by just updating the scale vector in
# the current TSSM to the new envelope.
for (i, (eid, phase)) in enumerate(arrivals):
v, tg = self.get_template_params_for_arrival(eid=eid, phase=phase, parent_values=parent_values)
start_idx, offset = time_to_index_offset(v['arrival_time'], self.st, self.srate)
sidxs[i] = start_idx
if start_idx >= self.npts:
continue
env = np.exp(tg.abstract_logenv_raw(v, idx_offset=offset, srate=self.srate, min_logenv=min_logenv))
if start_idx + len(env) < 0:
continue
wssm = self.arrival_ssms[(eid, phase)]
npts = min(len(env), n_steps)
if self.is_env:
try:
wiggle_std = np.abs(v['mult_wiggle_std'])
except KeyError:
wiggle_std = 0.5
else:
# in the raw signal case, wiggle std is unidentifiable with coda_height.
wiggle_std = 1.0
if (wssm is not None) and self.hack_wavelets_as_iid:
assert (not self.is_env)
wavelet_mean = wssm.mean_obs(npts)
wavelet_std = np.sqrt(wssm.obs_var(npts))
plt.figure()
plt.plot(wavelet_mean)
plt.plot(wavelet_mean + wavelet_std)
plt.plot(wavelet_mean - wavelet_std)
marginal_stds = env
marginal_stds[:npts] *= wavelet_std
components.append((self.iid_arssm, start_idx, len(env), marginal_stds))
tssm_components.append((eid, phase, marginal_stds, start_idx, len(env), "multnoise"))
pred_mean = wavelet_mean[:npts] * env[:npts]
components.append((None, start_idx, npts, pred_mean))
tssm_components.append((eid, phase, pred_mean, start_idx, npts, "pred_mean"))
else:
if wssm is not None:
components.append((wssm, start_idx, npts, env*wiggle_std))
tssm_components.append((eid, phase, env*wiggle_std, start_idx, npts, "wavelet"))
if len(env) > npts:
n_tail = len(env)-npts
mn_scale = env[npts:] * wiggle_std
components.append((self.iid_arssm, start_idx+npts, len(env)-npts, mn_scale))
tssm_components.append((eid, phase, mn_scale, start_idx+npts, len(env)-npts, "multnoise"))
if self.is_env:
components.append((None, start_idx, len(env), env))
tssm_components.append((eid, phase, env, start_idx, len(env), "template"))
if save_components:
self.tssm_components=tssm_components
return TransientCombinedSSM(components, TSSM_NOISE_PADDING)
# +
wn.hack_wavelets_as_iid=True
wn._set_cssm_priors_from_model()
tssm = transient_ssm(wn)
# -
#plot_wn(wn)
t0 = time.time()
lp = tssm.run_filter(d)
t1 = time.time()
print lp
print t1-t0
#plot_wn(wn)
t0 = time.time()
lp = tssm.run_filter(d)
t1 = time.time()
pred_mean = tssm.mean_obs(wn.npts)
pred_std = np.sqrt(tssm.obs_var(wn.npts))
print lp
print t1-t0
plt.plot(pred_mean)
plt.plot(pred_mean + 2*pred_std)
with open("/home/dmoore/python/sigvisa/notebooks/thesis/corr_uatemplate_test.sg", 'rb') as f:
sg = pickle.load(f)
sg.current_log_p_breakdown()
print target_ev
A = np.random.randn(150, 150)
M = np.dot(A, A.T)
t0 = time.time()
Z = np.linalg.inv(M)
t1 = time.time()
print t1-t0
| notebooks/thesis/accelerating_signal_likelihoods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:astroconda]
# language: python
# name: conda-env-astroconda-py
# ---
# # `pysynphot2d` Demo
#
# This notebook is a demonstration of the `pysynphot2d` wrapper which vectorizes the functionality of `pysynphot` so that it supports 2D spectra.
# %matplotlib inline
import numpy as np
import pysynphot as ps
import matplotlib.pyplot as plt
from pysynphot2d import psp2d
# ## `ArraySpectrum` to `ArraySpectra`
#
# Just pass a 1D wavelength array and 2D flux array to `psp2d.ArraySpectra` as you would pass a 1D flux array to `pysynphot.ArraySpectrum`.
#
# Here's a simple example with some toy data:
# +
# Dummy data
wave = np.linspace(4000,8000,20)
flux = np.arange(100).reshape(5,20)
toy2D = psp2d.ArraySpectra(wave, flux)
# Take a look
for spec in toy2D.spectra:
plt.plot(spec.wave, spec.flux)
# -
# Now let's do some method calls as we would for a `pysynphot.ArraySpectrum` object.
# Sample each spectrum at the given wavelength
sampled_spectra = toy2D.sample(6789)
print('Samples:',sampled_spectra)
# +
# Renormalize each spectrum to the given magnitude
V_band = ps.ObsBandpass('johnson,v')
renormed_spectra = toy2D.renorm(17, 'vegamag', V_band)
# Take a look
for spec in renormed_spectra:
plt.plot(spec.wave, spec.flux)
# -
# That's it! Try your favorite `pysynphot` methods on your new `psp2d.ArraySpectra` objects (and let me know if it breaks!).
# ## `Observation` to `Observations`
#
# Just pass your `psp2d.ArraySpectra` object to `psp2d.Observations` along with a `pysynphot.ObsBandpass` as you would pass `pysynphot.ArraySpectrum` to `pysynphot.Observation`:
# +
# Dummy observations
obs2d = psp2d.Observations(toy2D, V_band)
# Take a look
for spec in obs2d.spectra:
plt.plot(spec.wave, spec.flux)
# -
# That's it! Enjoy and let me know when it breaks!
| psp2d_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3
# language: python
# name: python3
# ---
# # The Ax Benchmarking Suite
#
# Ax makes it easy to evaluate performance of Bayesian optimization methods on synthetic problems through the use of benchmarking tools. This notebook illustrates how the benchmark suite can be used to easy test new methods on custom problems.
# ## 1. Define a problem
#
# The first step is to define the benchmark problem. There are a collection of built-in useful benchmark problems, such as the classic Hartmann 6 optimization test problem:
from ax.benchmark.benchmark_problem import hartmann6
# Or you can create a new problem. Benchmark problems can be defined by creating a `BenchmarkProblem` object, as is done here for the constrained problem from Gramacy et al. (2016).
#
# This entails defining a search space, optimization config, and the true optimal value of the benchmark.
# +
import numpy as np
from ax.benchmark.benchmark_problem import BenchmarkProblem
from ax.core.objective import Objective
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint
from ax.core.parameter import ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.metrics.noisy_function import NoisyFunctionMetric
# Create a Metric object for each function used in the problem
class GramacyObjective(NoisyFunctionMetric):
def f(self, x: np.ndarray) -> float:
return x.sum()
class GramacyConstraint1(NoisyFunctionMetric):
def f(self, x: np.ndarray) -> float:
return 1.5 - x[0] - 2 * x[1] - 0.5 * np.sin(2 * np.pi * (x[0] ** 2 - 2 * x[1]))
class GramacyConstraint2(NoisyFunctionMetric):
def f(self, x: np.ndarray) -> float:
return x[0] ** 2 + x[1] ** 2 - 1.5
# Create the search space and optimization config
search_space = SearchSpace(
parameters=[
RangeParameter(name="x1", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0),
RangeParameter(name="x2", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0),
]
)
# When we create the OptimizationConfig, we can define the noise level for each metric.
optimization_config=OptimizationConfig(
objective=Objective(
metric=GramacyObjective(
name="objective", param_names=["x1", "x2"], noise_sd=0.05
),
minimize=True,
),
outcome_constraints=[
OutcomeConstraint(
metric=GramacyConstraint1(name="constraint_1", param_names=["x1", "x2"], noise_sd=0.05),
op=ComparisonOp.LEQ,
bound=0,
relative=False,
),
OutcomeConstraint(
metric=GramacyConstraint2(name="constraint_2", param_names=["x1", "x2"], noise_sd=0.2),
op=ComparisonOp.LEQ,
bound=0,
relative=False,
),
],
)
# Create a BenchmarkProblem object
gramacy_problem = BenchmarkProblem(
name="Gramacy",
fbest=0.5998,
optimization_config=optimization_config,
search_space=search_space,
)
# -
# ## 2. Define optimization methods
#
# The Bayesian optimization methods to be used in benchmark runs are defined as a `GenerationStrategy`, which is a list of model factory functions and a specification of how many iterations to use each model for.
#
# A GenerationStrategy can be defined using the built-in factory functions, like `get_sobol` and `get_GPEI`, or by constructing a custom model factory function. The factory function returns a ModelBridge object for the custom model (see documentation on creating custom models). Here we create a model factory function that returns a Botorch model:
# +
from ax.modelbridge.torch import TorchModelBridge
from ax.models.torch.botorch import BotorchModel
from ax.modelbridge.transforms.unit_x import UnitX
from ax.modelbridge.transforms.standardize_y import StandardizeY
def get_botorch_model(experiment, data, search_space):
m = BotorchModel() # This can be any implementation of TorchModel
return TorchModelBridge(
experiment=experiment,
search_space=search_space,
data=data,
model=m,
transforms=[UnitX, StandardizeY],
)
# -
# We then construct a `GenerationStrategy` that begins with 10 points from a non-scrambled Sobol sequence (we disable scrambling so all methods begin with the same initialization) and then switches to Bayesian optimization (using the Botorch model default of GP with noisy expected improvement) for an additional 10 iterations.
# +
from ax.modelbridge.generation_strategy import GenerationStrategy, GenerationStep
def unscrambled_sobol(search_space):
return get_sobol(search_space, scramble=False)
strategy1 = GenerationStrategy(
name='GP+NEI',
steps=[
GenerationStep(model=unscrambled_sobol, num_arms=10),
GenerationStep(model=get_botorch_model, num_arms=10),
],
)
# -
# The `get_botorch_model` factory function defined above is equivalent to using the built-in `get_GPEI` function, but was defined explicitly here to illustrate how custom models can be used in the benchmarking.
#
# We can also easily create purely (quasi-)random strategies for comparison:
# +
from ax.modelbridge.factory import get_sobol
strategy2 = GenerationStrategy(
name='Quasirandom',
steps=[
GenerationStep(model=unscrambled_sobol, num_arms=10),
GenerationStep(model=get_sobol, num_arms=10),
],
)
# -
# ## 3. Run the benchmarks
#
# We now run the benchmarks, which using the BOBenchmarkingSuite object will run each of the supplied methods on each of the supplied problems. Note that this runs a real set of benchmarks and so will take several minutes to complete. Here we repeat each benchmark test 5 times; normally that would be increased to reduce variance in the results.
# +
from ax.benchmark.benchmark_suite import BOBenchmarkingSuite
b = BOBenchmarkingSuite()
b.run(
num_runs=5, # Each benchmark task is repeated this many times
total_iterations=20, # The total number of iterations in each optimization
batch_size=2, # Number of synchronous parallel evaluations
bo_strategies=[strategy1, strategy2],
bo_problems=[hartmann6, gramacy_problem],
)
# -
# ## 4. Generate Report
# Once the benchmark is finished running, we can generate a report that shows the optimization performance for each method, as well as the wall time spent in model fitting and in candidate generation by each method.
# +
from IPython.core.display import HTML
report = b.generate_report(include_individual=False)
HTML(report)
# -
# #### References
#
# Gramacy, <NAME>., Gray, <NAME>., Digabel, <NAME>., Lee, <NAME>., <NAME>., <NAME>., and <NAME>. Modeling an Augmented Lagrangian for Blackbox Constrained Optimization. _Technometrics_, 58(1): 1–11, 2016.
| tutorials/benchmarking_suite_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# ## __Data 5600: Hwk1b__
#
# <br>
#
# ### __Solutions__
#
#
# ---
#
# -
from scipy.stats import binom
# <br>
#
# #### <u>__Problem 3__</u>
#
# <br>
#
# <u>__Part a.__</u> We can use the CDF function to solve for this. Let $\theta$ be the probability that an engine works properly, then $q = 1 - \theta$.
#
# - If $X$ is the random variable that represents how many out of $n$ engines work properly, then $X \sim Binomial(n, \theta)$
#
# - For a four-engine plane this is:
#
# <br>
#
# $$
# \begin{align}
# P(X \ge 2) &= 1 - P(X < 2) \\
# &= 1 - P(X = 0) - P(X = 1) \\
# &= 1 - \binom{4}{0}\theta^{0} (1 - \theta)^{4} - \binom{4}{1}\theta(1 - \theta)^{3} \\
# &= 1 - (1-\theta)^{4} + 4 \theta (1 - \theta)^{3}
# \end{align}
# $$
#
# <br>
#
# If $\theta = 0.6$ then
#
# <br>
#
# $$
# \begin{align}
# 1 - P(X = 0) - P(X = 1) &= 1 - (0.4)^{4} - 4 (0.6) (0.4)^{3} \\
# &= 1 - 0.256 - 4 \ast 0.6 \ast 0.064 \\
# &= 0.8208
# \end{align}
# $$
#
# <br>
# +
## Set theta (the probability of success)
θ = 0.8
## Calculate directly from above
1. - (1-θ)**4 - 4 * (θ) * (1 - θ)**3
# -
## Calculate using the binomial pmf
1. - binom(n=4,p=θ).pmf(0) - binom(n=4, p=θ).pmf(1)
## Use the complement of the CDF
1. - binom(n=4, p=θ).cdf(1)
# <br>
#
# <u>__Part b.__</u> For a two-engine plane $X \sim Binomial(2, \theta)$
#
# <br>
#
# $$
# \begin{align}
# P(X \ge 1) &= 1 - P(X = 0) \\
# &= 1 - \binom{2}{0} (1 - \theta)^{2}
# \end{align}
# $$
#
# <br>
# +
## Set theta (the probability of success)
θ = 0.8
## Calculate directly from above
1. - (1 - θ)**2
# -
## Calculate using the binomial pmf
1 - binom(n=2, p=θ).pmf(0)
## Calculate using the complement of the CDF
1 - binom(n=2, p=θ).cdf(0)
# <br>
#
# <u>__Part c.__</u> Let's make a table to compare for different values of $\theta$
#
# <br>
#
# | <b>$\theta$</b> | 4 Engines | 2 Engines |
# |:---------------:|:---------:|:---------:|
# | 0.5 | 0.6865 | 0.7500 |
# | 0.6 | 0.8208 | 0.8400 |
# | 0.7 | 0.9163 | 0.9100 |
# | 0.8 | 0.9728 | 0.9600 |
#
# <br>
#
# ---
# #### <u>__Problem 6__</u>
#
# <br>
| Assignments/Hwk1/hwk1b-key.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="xaKP5xD-2qch" colab_type="code" colab={}
import os
import time
import pandas as pd
# + id="KWi-Iplm3Kql" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="8cb55261-91e1-4bb8-ec49-aed3c666bc9d"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="I2sa2SOo8CC9" colab_type="code" colab={}
train = pd.read_csv("/content/gdrive/My Drive/DashGC/Original/jester-v1-train.csv", sep = ';', names = ("video_id", "labels", "frames"))
val = pd.read_csv("/content/gdrive/My Drive/DashGC/Original/jester-v1-validation.csv", sep = ';', names = ("video_id", "labels", "frames"))
test = pd.read_csv("/content/gdrive/My Drive/DashGC/Original/jester-v1-test.csv", names = ("id", "frames"))
# + id="j1FGnawvALS6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6607f5b3-c173-45bf-ddaa-6bbc45001876"
# Decompress the compressed "Jester" dataset in Google Colab VM
start = time.time()
# !cat /content/gdrive/My\ Drive/DashGC/Original/20bn-jester-v1-* | tar -xz
stop = time.time()
print('Decompression took', round(((stop - start) / 60), 2), 'mins')
# + id="hecsRHj4Qe75" colab_type="code" colab={}
train["frames"] = train["video_id"].map(lambda a: len(os.listdir("/content/20bn-jester-v1/" + str(a))))
val["frames"] = val["video_id"].map(lambda a: len(os.listdir("/content/20bn-jester-v1/" + str(a))))
test["frames"] = test["id"].map(lambda a: len(os.listdir("/content/20bn-jester-v1/" + str(a))))
# + id="JeJxvnsCQ7Xe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="cc613945-8cef-4afe-b2c6-0220de9f3b4f"
train.sort_values("video_id").head()
# + id="tM1aLi-eQ_oK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="7152b6cb-f639-420c-e16b-7631dbf23686"
val.sort_values("video_id").head()
# + id="_VOtR6pkSFx6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="fbcf6f1f-52a8-417a-f110-2b2870bfc95b"
test.sort_values("id").head()
# + id="I2GaM9DI808S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="6f4ae4e9-ae29-4e5a-f424-9485313360db"
train.describe()
# + id="580HRl-99hM6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="2eeffceb-d856-4381-b5e5-c9a1e8d5143f"
val.describe()
# + id="IawS52H29jHH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="782ec3a3-45c8-4f73-f77f-96548e5f8fcf"
test.describe()
# + id="9I-dpnIESHAi" colab_type="code" colab={}
train.sort_values("video_id").to_csv("/content/gdrive/My Drive/DashGC/Reorganized/Train.csv", sep = ",", index = False, encoding = "utf-8")
val.sort_values("video_id").to_csv("/content/gdrive/My Drive/DashGC/Reorganized/Validation.csv", sep = ",", index = False, encoding = "utf-8")
test.sort_values("id").to_csv("/content/gdrive/My Drive/DashGC/Reorganized/Test.csv", sep = ",", index = False, encoding = "utf-8")
# + id="4gA4RyJIn_7z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 510} outputId="1f3ffd54-0f03-41a7-8116-fa5a60e51b23"
train.groupby(by = "labels").size()
# + id="gCkZqCo_oHf1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 510} outputId="d73c9b72-0524-4b8b-b397-75bc4febb479"
val.groupby(by = "labels").size()
# + id="99L6R_XGoOTY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 850} outputId="9d870c94-417d-40e5-d1ab-7a8cc8384d6d"
train.groupby(by = "frames").size()
# + id="LZtZsf-01tiT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="15fc21da-0049-499f-d975-5bc866eb942c"
val.groupby(by = "frames").size()
# + id="DqhZ6RI013qg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="72d11413-62db-45b1-9175-5a1eda56e6c8"
test.groupby(by = "frames").size()
# + id="HoVa_nkk15LC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="0df10355-eeef-4179-e235-07736801c259"
train.groupby(by = "frames").size().plot.bar()
# + id="avQ2ukd44bxp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="c115f49f-ee01-44aa-a3a8-e29ee3d4996b"
val.groupby(by = "frames").size().plot.bar()
# + id="ca1AZz9g4hyd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="fd8da1bc-5ebc-440d-dde6-6fb07e17fa82"
test.groupby(by = "frames").size().plot.bar()
# + id="51SYVmzT9Mbj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="35451767-10e6-4214-ee4c-7f410c48303a"
train.groupby(by = "frames").size().describe()
# + id="pm4XUrhr-aBO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="51326ea1-79c0-4a5a-8f9d-8bbc94c16a59"
val.groupby(by = "frames").size().describe()
# + id="ZEe0EEvg-elp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="3809dd8a-e576-425e-a088-59dfe5beec4f"
test.groupby(by = "frames").size().describe()
# + id="aF2djkvC-hak" colab_type="code" colab={}
| detector/notebooks/Explorer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
# # Functions
#
# Topics:
# 1. How to declare a function
# 2. Duck-typing in Julia
# 3. Mutating vs. non-mutating functions
# 4. Some higher order functions
# ## How to declare a function
# Julia gives us a few different ways to write a function. The first requires the `function` and `end` keywords
function sayhi(name)
println("Hi $name, it's great to see you!")
end
function f(x)
x^2
end
# We can call either of these functions like this:
sayhi("C-3PO")
f(42)
# Alternatively, we could have declared either of these functions in a single line
sayhi2(name) = println("Hi $name, it's great to see you!")
f2(x) = x^2
sayhi2("R2D2")
f2(42)
# Finally, we could have declared these as "anonymous" functions
sayhi3 = name -> println("Hi $name, it's great to see you!")
f3 = x -> x^2
sayhi3("Chewbacca")
f3(42)
# ## Duck-typing in Julia
# *"If it quacks like a duck, it's a duck."* <br><br>
# Julia functions will just work on whatever inputs make sense. <br><br>
# For example, `sayhi` works on the name of this minor tv character, written as an integer...
sayhi(55595472)
# And `f` will work on a matrix.
A = rand(3, 3)
A
f(A)
# `f` will also work on a string like "hi" because `*` is defined for string inputs as string concatenation.
f("hi")
# On the other hand, `f` will not work on a vector. Unlike `A^2`, which is well-defined, the meaning of `v^2` for a vector, `v`, is not a well-defined algebraic operation.
v = rand(3)
# This won't work
f(v)
# ## Mutating vs. non-mutating functions
#
# By convention, functions followed by `!` alter their contents and functions lacking `!` do not.
#
# For example, let's look at the difference between `sort` and `sort!`.
#
v = [3, 5, 2]
sort(v)
v
# `sort(v)` returns a sorted array that contains the same elements as `v`, but `v` is left unchanged. <br><br>
#
# On the other hand, when we run `sort!(v)`, the contents of v are sorted within the array `v`.
sort!(v)
v
# ## Some higher order functions
#
# ### map
#
# `map` is a "higher-order" function in Julia that *takes a function* as one of its input arguments.
# `map` then applies that function to every element of the data structure you pass it. For example, executing
#
# ```julia
# map(f, [1, 2, 3])
# ```
# will give you an output array where the function `f` has been applied to all elements of `[1, 2, 3]`
# ```julia
# [f(1), f(2), f(3)]
# ```
map(f, [1, 2, 3])
# Here we've squared all the elements of the vector `[1, 2, 3]`, rather than squaring the vector `[1, 2, 3]`.
#
# To do this, we could have passed to `map` an anonymous function rather than a named function, such as
x -> x^3
# via
map(x -> x^3, [1, 2, 3])
# and now we've cubed all the elements of `[1, 2, 3]`!
# ### broadcast
#
# `broadcast` is another higher-order function like `map`. `broadcast` is a generalization of `map`, so it can do every thing `map` can do and more. The syntax for calling `broadcast` is the same as for calling `map`
broadcast(f, [1, 2, 3])
# and again, we've applied `f` (squared) to all the elements of `[1, 2, 3]` - this time by "broadcasting" `f`!
#
# Some syntactic sugar for calling `broadcast` is to place a `.` between the name of the function you want to `broadcast` and its input arguments. For example,
#
# ```julia
# broadcast(f, [1, 2, 3])
# ```
# is the same as
# ```julia
# f.([1, 2, 3])
# ```
f.([1, 2, 3])
# Notice again how different this is from calling
# ```julia
# f([1, 2, 3])
# ```
# We can square every element of a vector, but we can't square a vector!
# To drive home the point, let's look at the difference between
#
# ```julia
# f(A)
# ```
# and
# ```julia
# f.(A)
# ```
# for a matrix `A`:
A = [i + 3*j for j in 0:2, i in 1:3]
f(A)
# As before we see that for a matrix, `A`,
# ```
# f(A) = A^2 = A * A
# ```
#
# On the other hand,
B = f.(A)
# contains the squares of all the entries of `A`.
#
# This dot syntax for broadcasting allows us to write relatively complex compound elementwise expressions in a way that looks natural/closer to mathematical notation. For example, we can write
A .+ 2 .* f.(A) ./ A
# instead of
broadcast(x -> x + 2 * f(x) / x, A)
# and the two will perform exactly the same.
# ### Exercises
#
# #### 6.1
# Write a function `add_one` that adds 1 to its input.
function add_one(x)
x + 1
end
# + deletable=false editable=false hide_input=true nbgrader={"checksum": "253b17dc2f3d3a58042fbc36042a0fd5", "grade": true, "grade_id": "cell-5119b9e9623c1cb7", "locked": true, "points": 1, "schema_version": 1, "solution": false}
@assert add_one(1) == 2
# + deletable=false editable=false hide_input=true nbgrader={"checksum": "4e05440e19cd3606df11186d41d562bf", "grade": true, "grade_id": "cell-50f83d27187a2064", "locked": true, "points": 1, "schema_version": 1, "solution": false}
@assert add_one(11) == 12
# -
# #### 6.2
# Use `map` or `broadcast` to increment every element of matrix `A` by `1` and assign it to a variable `A1`.
A1 = map(add_one, A)
@assert A1 == [2 3 4; 5 6 7; 8 9 10]
# #### 6.3
# Use the broadcast dot syntax to increment every element of matrix `A1` by `1` and store it in variable `A2`
A2 = add_one.(A1)
# + deletable=false editable=false hide_input=true nbgrader={"checksum": "3e3d797962df904deed0e7ee7782b69a", "grade": true, "grade_id": "cell-f3bd5479679a8fe1", "locked": true, "points": 0, "schema_version": 1, "solution": false}
@assert A2 == [3 4 5; 6 7 8; 9 10 11]
| 5 - Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variational Autoencoder
#
# In this exercise you will implement a variational autoencoder on the MNIST dataset.
# For that, we will use the Tensorflow library.
# So in the first step we will import tensorflow, numpy and matplotlib for plotting.
# This tutorial is inspired by https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/variational_autoencoder.py.
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
# To be reproducable, we will set the random seed for numpy and tensorflow:
np.random.seed(3234)
tf.set_random_seed(4345)
# Then we load the MNIST dataset and make the image binary (black or white).
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
images = (x_train >= 128).astype(int)
# The following function will create a fully connected layer.
# If you provide a weight dictionary, it will re-use the weigths if the specified key is in the dictionary.
# If it is not yet present, it will add the weights.
def fully(x, neurons, weight_dict=None, weight_dict_key='fc'):
w = tf.Variable(tf.random_normal(shape=(int(x.shape[1]), int(neurons)), stddev=1. / tf.sqrt(int(x.shape[1]) / 2.)))
b = tf.Variable(tf.random_normal(shape=(neurons,), stddev=1. / tf.sqrt(neurons / 2.)))
if weight_dict is not None:
w_idx = weight_dict_key+'_w'
b_idx = weight_dict_key+'_b'
if w_idx in weight_dict:
w = weight_dict[w_idx]
if b_idx in weight_dict:
b = weight_dict[b_idx]
weight_dict[w_idx] = w
weight_dict[b_idx] = b
return tf.matmul(x, w) + b
# The following cells will create the architecture for the variational auto-encoder.
#
# We start by the encoder, which is basically a fully connected neural network with decreasing amount of neurons. In the current implementation, we have just one hidden layer with 500 neurons:
# +
hidden_neurons = [500]
# Encoder
X = tf.placeholder(tf.float32, shape=[None, 28, 28])
with tf.name_scope("Encoder"):
encoder = tf.reshape(X, [-1, 28*28])
for neurons in hidden_neurons:
encoder = tf.nn.tanh(fully(encoder, neurons))
# -
# The next part of the neural network are the latent variables.
# In the given example, we use a two dimensional normal distribution for our $q(z|x)$.
# For numerical stability, we learn the log of our $\sigma$ (z_log_std), since negative $\sigma$ values would result in nan values for the log of $\sigma$.
#
# Also note that we are using the reparametrization trick:
# +
latent_dim = 2
# Latent variable
with tf.name_scope("Normal_Sampling"):
z_mean = fully(encoder, latent_dim)
z_log_std = fully(encoder, latent_dim)
# Reparametrization trick!
eps = tf.random_normal(tf.shape(z_log_std), dtype=tf.float32, mean=0., stddev=1.0, name='epsilon')
z = z_mean + tf.exp(z_log_std)/2 * eps
# -
# The following cell defines the decoder which is constructing from our random variable $z$ an $\hat{x}$ that should be close to the original $x$ input.
# We store the weight variables such that we can re-use them by the plot_generated function below.
# +
# Decoder
def construct_decoder(inp, decoder_weights):
decoder = inp
for neurons in reversed(hidden_neurons):
decoder = tf.nn.tanh(fully(decoder, neurons, decoder_weights, str(neurons)))
return tf.sigmoid(fully(decoder, 28*28, decoder_weights, 'out'))
decoder_weights = {}
with tf.name_scope("Decoder"):
decoder = construct_decoder(z, decoder_weights)
reconstructed = tf.reshape(decoder, [-1, 28, 28])
# -
# The following function we define the loss of the variational autoencoder.
# As you have learned in the corresponding exercise sheed, the loss is defined by:
# $$E_{q(z)} \log p(x|z) - KL(q(z) || p(z))$$
# whereas the first part is the reconstruction loss and the second part is basically
# $$ KL(q||p_s) = -\log \sigma_1 + 0.5 * (\sigma_1^2 + \mu_1^2 -1) $$
#
# The reconstruction loss is the expectation of $\log p(x|z)$.
# Since the reconstructed value $\hat{x}$ is deterministically dependent on $z$, you can also see it as the expectation of $\log p(x|\hat{x})$.
# It can be shown that if we assume a Gaussian distribution, the reconstruction loss is basically the mean squared error (MSE).
# If you have a Bernoulli distribution, the reconstruction loss is the binary cross entropy.
# Since we have a binary input $x$ and we try to reconstruct a binary $\hat{x}$ such that it is close to $x$, we can assume that our distribution is Bernoulli.
#
# If you use gray scale or colored images as input, you should use the MSE instead.
# Define VAE Loss
def vae_loss(x_reconstructed, x_true, reconstruction_loss=True, kl_loss=True):
# Reconstruction loss
# encode_decode_loss = tf.reduce_mean(tf.square(x_reconstructed-x_true), 1) # MSE
encode_decode_loss = x_true * tf.log(1e-10 + x_reconstructed) + (1 - x_true) * tf.log(1e-10 + 1 - x_reconstructed)
encode_decode_loss = -tf.reduce_sum(encode_decode_loss, 1) # Binary Cross Entropy
# KL Divergence loss
kl_div_loss = -z_log_std + 0.5 * (tf.square(tf.exp(z_log_std)) + np.square(z_mean) -1)
kl_div_loss = tf.reduce_sum(kl_div_loss, 1)
result = 0
if reconstruction_loss:
result += encode_decode_loss
if kl_loss:
result += kl_div_loss
return tf.reduce_mean(result)
# Start a tensorflow session:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Plot generated images by choosing z values in the range $[-3,3]^2$:
def plot_generated():
noise_input = tf.placeholder(tf.float32, shape=[None, latent_dim])
decoder = construct_decoder(noise_input, decoder_weights)
n = 10
x_axis = np.linspace(-3, 3, n)
y_axis = np.linspace(-3, 3, n)
canvas = np.empty((28 * n, 28 * n))
for i, yi in enumerate(x_axis):
for j, xi in enumerate(y_axis):
z_mu = np.array([[xi, yi]])
x_mean = sess.run(decoder, feed_dict={noise_input: z_mu})
canvas[(n - i - 1) * 28:(n - i) * 28, j * 28:(j + 1) * 28] = x_mean.reshape(28, 28)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_axis, y_axis)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.show()
# Plot mean values of the first 1000 datapoints of the dataset:
def plot_means():
plt.figure()
for clazz in range(10):
inputs = images[:1000][y_train[:1000] == clazz]
means = sess.run(z_mean, {X: inputs})
plt.scatter(means[:,0], means[:,1])
plt.show()
# A method to train our model:
def train(epochs):
batch_size=64
for epoch in range(epochs):
# plot_generated()
# plot_means()
epoch_loss = 0
for batch in range(int(np.ceil(len(images)/batch_size))):
x_batch = images[batch*batch_size:min((batch+1)*batch_size, len(images)-1)]
_, l = sess.run([train_op, loss_op], {X: x_batch})
epoch_loss += l
print("Epoch %d: loss %.4f" % (epoch, epoch_loss))
# The following cell will train the autoencoder and generate hand-written digits.
# It is interesting to see how in the manifold the digits alter (almost) smoothly form one digit to another.
# +
loss_op = vae_loss(decoder, tf.reshape(X, [-1, 28*28]))
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op)
sess.run(tf.global_variables_initializer())
train(50)
plot_generated()
plot_means()
# -
# The variational autoencoder ensures that the distribution of the dataset is ${\cal N}(0, 1)$.
# If you do not enforce this with the kl loss (like a "normal" autoencoder) meaningful datapoints can be located anywhere and thus it is difficult to sample from it:
# +
loss_op = vae_loss(decoder, tf.reshape(X, [-1, 28*28]), reconstruction_loss=True, kl_loss=False)
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.0001)
train_op = optimizer.minimize(loss_op)
sess.run(tf.global_variables_initializer())
train(50)
plot_generated()
plot_means()
# -
# But if you just use the kl loss, the datapoints will be mapped to the to the standard normal distribution, but the decoder does not re-construct meaningful data points:
# +
loss_op = vae_loss(decoder, tf.reshape(X, [-1, 28*28]), reconstruction_loss=False, kl_loss=True)
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.00001)
train_op = optimizer.minimize(loss_op)
sess.run(tf.global_variables_initializer())
train(50)
plot_generated()
plot_means()
# -
sess.close()
| week9/vae.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cryptocurrency Proof-of-Work
#
# ### The purpose this excercise is to explain how the SHA-256 algorithm is used to implement the adjustable difficulty protocol for Bitcoin.
#
# ### In this exercise we will:
#
# <ul>
# <li>Hash a number using SHA-256.</li>
# <li>Plot the (roughly) linear distribution of the SHA-256 Function.</li>
# <li>Find a value that, when hashed, passes the <a href="https://en.wikipedia.org/wiki/Proof_of_work">proof-of-work</a> difficulty criterion.</li>
# <li>Plot the likelihood of finding a successful nonce for several values.</li>
# </ul>
#
# Note: Displaying plots can take 10 to 15 seconds.
# In this excerise we examine a function crucial to the proof-of-work process in <a href="https://en.wikipedia.org/wiki/Bitcoin">Bitcoin</a>, the SHA-256 function. SHA-256 is a one-way hashing function, that is, given a number A, the function will produce a number B, but given number B, there is (currently) no way to determine number A (without trying all possible numbers from 0 to $2^{256}$).
#
# Try it yourself. Assign a value to _X_, and see what number you get out!
# +
from hashlib import sha256
#change the number you want to hash here!
X = 3
#change the number you want to hash here!
hashedWord = sha256(X.to_bytes(16,'little'));
print(int(hashedWord.hexdigest(),16))
# -
# If you only tried one number, go back and change the value of X and try it again. You'll see that a very small change produces a very different number (this is called the <a href="https://en.wikipedia.org/wiki/Avalanche_effect">Avalanche Effect</a>).
# Next, let's see if numbers that are hashed are evenly distributed. Trying 10,000 numbers by hand would take a long time, so let's use a loop.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
x = []
for i in range(0,10000):
hashobj = sha256(i.to_bytes(16,'little'))
val_hex = hashobj.hexdigest()
val_int = int(val_hex, 16)
x.append(val_int / 1)
#plot a historgram of number of attempts for each case
num_bins = 5
n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
plt.show()
# -
# This (roughly) linear distribution means that when you pick a number, it has an equal likelihood of being any other number in the range 0 to $2^{256}$. The Bitcoin proof-of-work process has an adjustable difficulty based on the current amount of mining, so a Bitcoin miner's job is to find a number called, in this context, a _nonce_ to add to A to get a resulting B less than the difficulty value. We will set our difficulty value at $2^{253}$; the smaller the difficulty value, the harder it is to find a find a valid nonce, so this value is quite easy. Try out a few numbers by changing the value of X and see if you can find one that hashes to less than $2^{253}$!
# +
from hashlib import sha256
# %matplotlib inline
import matplotlib.pyplot as plt
#change the number you want to hash here!
X = 0
#change the number you want to hash here!
hashedWord = sha256(X.to_bytes(16,'little'));
val_hex = hashedWord.hexdigest()
val_int = int(val_hex, 16)
power=253
print("Your value: " + str(val_int))
print("Difficulty: " + str(2**power))
if( val_int < 2**power):
print( "SHA256(" + str(X) + ") is less than 2^" + str(power) + "!")
else:
print( "SHA256(" + str(X) + ") is not less than 2^" + str(power) + ".")
# -
# I bet it took you several attempts to find a number that generate a value less than $2^{253}$. Let's use a loop again to see how many attempts it takes to find a nonce for a random number to get a value less than $2^{250}$. This may take a few seconds to run.
# +
from hashlib import sha256
# %matplotlib inline
import matplotlib.pyplot as plt
import random
difficulty = 2**250
x = []
for i in range(0,1000):
counter = 0
startingValue = 2**32
startingValue *= random.random() #linear, I think?
startingValue = int(startingValue)
while True:
hashobj = sha256(startingValue.to_bytes(16,'little')) #is sha-256 linear?
val_hex = hashobj.hexdigest()
val_int = int(val_hex, 16)
if( val_int < difficulty):
break
counter += 1
startingValue += 1
x.append(counter)
#plot a historgram of number of attempts for each case
num_bins = 5
n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
plt.show()
# -
# You can see from this plot that most of the attempts took between 0 and 100 tries to find a valid nonce, but some took much longer. The current difficulty is around $2^{180}$, or around one sextillion times more difficult than what we just attempted. It takes all the Bitcoin miners, in all the world approximately 10 minutes to find the correct nonce.
| stage4/06-jupyter/files/04-Sha256.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # Transfer learning / fine-tuning
#
# This tutorial will guide you through the process of using _transfer learning_ to learn an accurate image classifier from a relatively small number of training samples. Generally speaking, transfer learning refers to the process of leveraging the knowledge learned in one model for the training of another model.
#
# More specifically, the process involves taking an existing neural network which was previously trained to good performance on a larger dataset, and using it as the basis for a new model which leverages that previous network's accuracy for a new task. This method has become popular in recent years to improve the performance of a neural net trained on a small dataset; the intuition is that the new dataset may be too small to train to good performance by itself, but we know that most neural nets trained to learn image features often learn similar features anyway, especially at early layers where they are more generic (edge detectors, blobs, and so on).
#
# Transfer learning has been largely enabled by the open-sourcing of state-of-the-art models; for the top performing models in image classification tasks (like from [ILSVRC](http://www.image-net.org/challenges/LSVRC/)), it is common practice now to not only publish the architecture, but to release the trained weights of the model as well. This lets amateurs use these top image classifiers to boost the performance of their own task-specific models.
#
# #### Feature extraction vs. fine-tuning
#
# At one extreme, transfer learning can involve taking the pre-trained network and freezing the weights, and using one of its hidden layers (usually the last one) as a feature extractor, using those features as the input to a smaller neural net.
#
# At the other extreme, we start with the pre-trained network, but don't freeze its weights, allowing them to be updated along with the new network. Another name for this procedure is called "fine-tuning" because we are slightly adjusting the pre-trained net's weights to the new task. We usually train such a network with a lower learning rate, since we expect the features are already relatively good and do not need to be changed too much.
#
# Sometimes, we do something in-between. Freeze just the early/generic layers, but fine-tune the later layers. Which strategy is best depends on the size of your dataset, the number of classes, and how much it resembles the dataset the previous model was trained on (and thus, whether it can benefit from the same learned feature extractors). A more detailed discussion of how to strategize can be found in [[1]](http://cs231n.github.io/transfer-learning/) [[2]](http://sebastianruder.com/transfer-learning/).
#
# ## Procedure
#
# In this guide will go through the process of loading a state-of-the-art, 1000-class image classifier, [VGG16](https://arxiv.org/pdf/1409.1556.pdf) which [won the ImageNet challenge in 2014](http://www.robots.ox.ac.uk/~vgg/research/very_deep/), and using it as a fixed feature extractor to train a smaller custom classifier on our own images, although with very few code changes, you can try fine-tuning as well.
#
# We will first load VGG16 and remove its final layer, the 1000-class softmax classification layer specific to ImageNet, and replace it with a new classification layer for the classes we are training over. We will then freeze all the weights in the network except the new ones connecting to the new classification layer, and then train the new classification layer over our new dataset.
#
# We will also compare this method to training a small neural network from scratch on the new dataset, and as we shall see, it will dramatically improve our accuracy. We will do that part first.
#
# As our test subject, we'll use a dataset consisting of around 6000 images belonging to 97 classes, and train an image classifier with around 80% accuracy on it. It's worth noting that this strategy scales well to image sets where you may have even just a couple hundred or less images. Its performance will be lesser from a small number of samples (depending on classes) as usual, but still impressive considering the usual constraints.
#
# #### Implementation details
#
# This guide requires you to install [keras](http://www.keras.io), if you have not done so already. It is highly recommended to make sure you are using the GPU to train these models, as it will otherwise take much longer to train (however it is still possible to use CPU). If you can use GPU and have Theano as your backend, you should run the following command _before_ importing keras, to ensure it uses the GPU.
#
# os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32"
#
# If you are using Tensorflow as the backend, this is unnecessary.
#
# Note this guide uses quite deep networks, much larger than the ones we trained in the [convnets guide](https://github.com/ml4a/ml4a-guides/blob/master/notebooks/convolutional_neural_networks.ipynb). If your system does not have enough memory, you may experience out-of-memory errors running this guide. A duplicate of this guide using smaller networks and smaller images is forthcoming soon -- in the meantime, you can try changing the architecture to suit your memory constraints.
#
# To start, make sure the following import statements all work.
# +
# %matplotlib inline
import os
#if using Theano with GPU
#os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32"
import random
import numpy as np
import keras
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Model
# -
# ### Getting a dataset
#
# The first step is going to be to load our data. As our example, we will be using the dataset [CalTech-101](http://www.vision.caltech.edu/Image_Datasets/Caltech101/), which contains around 9000 labeled images belonging to 101 object categories. However, we will exclude 5 of the categories which have the most images. This is in order to keep the class distribution fairly balanced (around 50-100) and constrained to a smaller number of images, around 6000.
#
# To obtain this dataset, you can either run the download script `download.sh` in the `data` folder, or the following commands:
#
# wget http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz
# tar -xvzf 101_ObjectCategories.tar.gz
#
# If you wish to use your own dataset, it should be aranged in the same fashion to `101_ObjectCategories` with all of the images organized into subfolders, one for each class. In this case, the following cell should load your custom dataset correctly by just replacing `root` with your folder. If you have an alternate structure, you just need to make sure that you load the list `data` where every element is a dict where `x` is the data (a 1-d numpy array) and `y` is the label (an integer). Use the helper function `get_image(path)` to load the image correctly into the array, and note also that the images are being resized to 224x224. This is necessary because the input to VGG16 is a 224x224 RGB image. You do not need to resize them on your hard drive, as that is being done in the code below.
#
# If you have `101_ObjectCategories` in your data folder, the following cell should load all the data.
# +
root = '../data/101_ObjectCategories'
exclude = ['BACKGROUND_Google', 'Motorbikes', 'airplanes', 'Faces_easy', 'Faces']
train_split, val_split = 0.7, 0.15
categories = [x[0] for x in os.walk(root) if x[0]][1:]
categories = [c for c in categories
if c not in [os.path.join(root, e) for e in exclude]]
# helper function to load image and return it and input vector
def get_image(path):
img = image.load_img(path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return img, x
# load all the images from root folder
data = []
for c, category in enumerate(categories):
images = [os.path.join(dp, f) for dp, dn, filenames
in os.walk(category) for f in filenames
if os.path.splitext(f)[1].lower() in ['.jpg','.png','.jpeg']]
for img_path in images:
img, x = get_image(img_path)
data.append({'x':np.array(x[0]), 'y':c})
# count the number of classes
num_classes = len(categories)
# randomize the data order
random.shuffle(data)
# create training / validation / test split (70%, 15%, 15%)
idx_val = int(train_split * len(data))
idx_test = int((train_split + val_split) * len(data))
train = data[:idx_val]
val = data[idx_val:idx_test]
test = data[idx_test:]
# separate data for labels
x_train, y_train = np.array([t["x"] for t in train]), [t["y"] for t in train]
x_val, y_val = np.array([t["x"] for t in val]), [t["y"] for t in val]
x_test, y_test = np.array([t["x"] for t in test]), [t["y"] for t in test]
# normalize data
x_train = x_train.astype('float32') / 255.
x_val = x_val.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
# convert labels to one-hot vectors
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# summary
print("finished loading %d images from %d categories"%(len(data), num_classes))
print("train / validation / test split: %d, %d, %d"%(len(x_train), len(x_val), len(x_test)))
print("training data shape: ", x_train.shape)
print("training labels shape: ", y_train.shape)
# -
# If everything worked properly, you should have loaded a bunch of images, and split them into three sets: `train`, `val`, and `test`. The shape of the training data should be (`n`, 224, 224, 3) where `n` is the size of your training set, and the labels should be (`n`, `c`) where `c` is the number of classes (97 in the case of `101_ObjectCategories`.
#
# Notice that we divided all the data into three subsets -- a training set `train`, a validation set `val`, and a test set `test`. The reason for this is to properly evaluate the accuracy of our classifier. During training, the optimizer uses the validation set to evaluate its internal performance, in order to determine the gradient without overfitting to the training set. The `test` set is always held out from the training algorithm, and is only used at the end to evaluate the final accuracy of our model.
#
# Let's quickly look at a few sample images from our dataset.
images = [os.path.join(dp, f) for dp, dn, filenames in os.walk(root) for f in filenames if os.path.splitext(f)[1].lower() in ['.jpg','.png','.jpeg']]
idx = [int(len(images) * random.random()) for i in range(8)]
imgs = [image.load_img(images[i], target_size=(224, 224)) for i in idx]
concat_image = np.concatenate([np.asarray(img) for img in imgs], axis=1)
plt.figure(figsize=(16,4))
plt.imshow(concat_image)
# ### First training a neural net from scratch
#
# Before doing the transfer learning, let's first build a neural network from scratch for doing classification on our dataset. This will give us a baseline to compare to our transfer-learned network later.
#
# The network we will construct contains 4 alternating convolutional and max-pooling layers, followed by a [dropout](https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf) after every other conv/pooling pair. After the last pooling layer, we will attach a fully-connected layer with 256 neurons, another dropout layer, then finally a softmax classification layer for our classes.
#
# Our loss function will be, as usual, categorical cross-entropy loss, and our learning algorithm will be [AdaDelta](https://arxiv.org/abs/1212.5701). Various things about this network can be changed to get better performance, perhaps using a larger network or a different optimizer will help, but for the purposes of this notebook, the goal is to just get an understanding of an approximate baseline for comparison's sake, and so it isn't neccessary to spend much time trying to optimize this network.
#
# Upon compiling the network, let's run `model.summary()` to get a snapshot of its layers.
# +
# build the network
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# compile the model to use categorical cross-entropy loss function and adadelta optimizer
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.summary()
# -
# We've created a medium-sized network with ~1.2 million weights and biases (the parameters). Most of them are leading into the one pre-softmax fully-connected layer "dense_5".
#
# We can now go ahead and train our model for 100 epochs with a batch size of 128. We'll also record its history so we can plot the loss over time later.
history = model.fit(x_train, y_train,
batch_size=128,
epochs=100,
validation_data=(x_val, y_val))
# Let's plot the validation loss and validation accuracy over time.
# +
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(121)
ax.plot(history.history["val_loss"])
ax.set_title("validation loss")
ax.set_xlabel("epochs")
ax2 = fig.add_subplot(122)
ax2.plot(history.history["val_acc"])
ax2.set_title("validation accuracy")
ax2.set_xlabel("epochs")
ax2.set_ylim(0, 1)
plt.show()
# -
# Notice that the validation loss begins to actually rise after around 16 epochs, even though validation accuracy remains roughly between 40% and 50%. This suggests our model begins overfitting around then, and best performance would have been achieved if we had stopped early around then. Nevertheless, our accuracy would not have likely been above 50%, and probably lower down.
#
# We can also get a final evaluation by running our model on the training set. Doing so, we get the following results:
loss, accuracy = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', loss)
print('Test accuracy:', accuracy)
# Finally, we see that we have achieved a (top-1) accuracy of around 49%. That's not too bad for 6000 images, considering that if we were to use a naive strategy of taking random guesses, we would have only gotten around 1% accuracy.
#
# ## Transfer learning by starting with existing network
#
# Now we can move on to the main strategy for training an image classifier on our small dataset: by starting with a larger and already trained network.
#
# To start, we will load the VGG16 from keras, which was trained on ImageNet and the weights saved online. If this is your first time loading VGG16, you'll need to wait a bit for the weights to download from the web. Once the network is loaded, we can again inspect the layers with the `summary()` method.
vgg = keras.applications.VGG16(weights='imagenet', include_top=True)
vgg.summary()
# Notice that VGG16 is _much_ bigger than the network we constructed earlier. It contains 13 convolutional layers and two fully connected layers at the end, and has over 138 million parameters, around 100 times as many parameters than the network we made above. Like our first network, the majority of the parameters are stored in the connections leading into the first fully-connected layer.
#
# VGG16 was made to solve ImageNet, and achieves a [8.8% top-5 error rate](https://github.com/jcjohnson/cnn-benchmarks), which means that 91.2% of test samples were classified correctly within the top 5 predictions for each image. It's top-1 accuracy--equivalent to the accuracy metric we've been using (that the top prediction is correct)--is 73%. This is especially impressive since there are not just 97, but 1000 classes, meaning that random guesses would get us only 0.1% accuracy.
#
# In order to use this network for our task, we "remove" the final classification layer, the 1000-neuron softmax layer at the end, which corresponds to ImageNet, and instead replace it with a new softmax layer for our dataset, which contains 97 neurons in the case of the 101_ObjectCategories dataset.
#
# In terms of implementation, it's easier to simply create a copy of VGG from its input layer until the second to last layer, and then work with that, rather than modifying the VGG object directly. So technically we never "remove" anything, we just circumvent/ignore it. This can be done in the following way, by using the keras `Model` class to initialize a new model whose input layer is the same as VGG but whose output layer is our new softmax layer, called `new_classification_layer`. Note: although it appears we are duplicating this large network, internally Keras is actually just copying all the layers by reference, and thus we don't need to worry about overloading the memory.
# +
# make a reference to VGG's input layer
inp = vgg.input
# make a new softmax layer with num_classes neurons
new_classification_layer = Dense(num_classes, activation='softmax')
# connect our new layer to the second to last layer in VGG, and make a reference to it
out = new_classification_layer(vgg.layers[-2].output)
# create a new network between inp and out
model_new = Model(inp, out)
# -
# We are going to retrain this network, `model_new` on the new dataset and labels. But first, we need to freeze the weights and biases in all the layers in the network, except our new one at the end, with the expectation that the features that were learned in VGG should still be fairly relevant to the new image classification task. Not optimal, but most likely better than what we can train to in our limited dataset.
#
# By setting the `trainable` flag in each layer false (except our new classification layer), we ensure all the weights and biases in those layers remain fixed, and we simply train the weights in the one layer at the end. In some cases, it is desirable to *not* freeze all the pre-classification layers. If your dataset has enough samples, and doesn't resemble ImageNet very much, it might be advantageous to fine-tune some of the VGG layers along with the new classifier, or possibly even all of them. To do this, you can change the below code to make more of the layers trainable.
#
# In the case of CalTech-101, we will just do feature extraction, fearing that fine-tuning too much with this dataset may overfit. But maybe we are wrong? A good exercise would be to try out both, and compare the results.
#
# So we go ahead and freeze the layers, and compile the new model with exactly the same optimizer and loss function as in our first network, for the sake of a fair comparison. We then run `summary` again to look at the network's architecture.
# +
# make all layers untrainable by freezing weights (except for last layer)
for l, layer in enumerate(model_new.layers[:-1]):
layer.trainable = False
# ensure the last layer is trainable/not frozen
for l, layer in enumerate(model_new.layers[-1:]):
layer.trainable = True
model_new.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model_new.summary()
# -
# Looking at the summary, we see the network is identical to the VGG model we instantiated earlier, except the last layer, formerly a 1000-neuron softmax, has been replaced by a new 97-neuron softmax. Additionally, we still have roughly 134 million weights, but now the vast majority of them are "non-trainable params" because we froze the layers they are contained in. We now only have 397,000 trainable parameters, which is actually only a quarter of the number of parameters needed to train the first model.
#
# As before, we go ahead and train the new model, using the same hyperparameters (batch size and number of epochs) as before, along with the same optimization algorithm. We also keep track of its history as we go.
history2 = model_new.fit(x_train, y_train,
batch_size=128,
epochs=100,
validation_data=(x_val, y_val))
# Our validation accuracy hovers close to 80% towards the end, which is more than 30% improvement on the original network trained from scratch (meaning that we make the wrong prediction on 20% of samples, rather than 50%).
#
# It's worth noting also that this network actually trains _slightly faster_ than the original network, despite having more than 100 times as many parameters! This is because freezing the weights negates the need to backpropagate through all those layers, saving us on runtime.
#
# Let's plot the validation loss and accuracy again, this time comparing the original model trained from scratch (in blue) and the new transfer-learned model in green.
# +
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(121)
ax.plot(history.history["val_loss"])
ax.plot(history2.history["val_loss"])
ax.set_title("validation loss")
ax.set_xlabel("epochs")
ax2 = fig.add_subplot(122)
ax2.plot(history.history["val_acc"])
ax2.plot(history2.history["val_acc"])
ax2.set_title("validation accuracy")
ax2.set_xlabel("epochs")
ax2.set_ylim(0, 1)
plt.show()
# -
# Notice that whereas the original model began overfitting around epoch 16, the new model continued to slowly decrease its loss over time, and likely would have improved its accuracy slightly with more iterations. The new model made it to roughly 80% top-1 accuracy (in the validation set) and continued to improve slowly through 100 epochs.
#
# It's possibly we could have improved the original model with better regularization or more dropout, but we surely would not have made up the >30% improvement in accuracy.
#
# Again, we do a final validation on the test set.
# +
loss, accuracy = model_new.evaluate(x_test, y_test, verbose=0)
print('Test loss:', loss)
print('Test accuracy:', accuracy)
# -
# To predict a new image, simply run the following code to get the probabilities for each class.
img, x = get_image('../data/101_ObjectCategories/airplanes/image_0003.jpg')
probabilities = model_new.predict([x])
print(probabilities)
# ### Improving the results
#
# 78.2% top-1 accuracy on 97 classes, roughly evenly distributed, is a pretty good achievement. It is not quite as impressive as the original VGG16 which achieved 73% top-1 accuracy on 1000 classes. Nevertheless, it is much better than what we were able to achieve with our original network, and there is room for improvement. Some techniques which possibly could have improved our performance.
#
# - Using data augementation: augmentation refers to using various modifications of the original training data, in the form of distortions, rotations, rescalings, lighting changes, etc to increase the size of the training set and create more tolerance for such distortions.
# - Using a different optimizer, adding more regularization/dropout, and other hyperparameters.
# - Training for longer (of course)
#
# A more advanced example of transfer learning in Keras, involving augmentation for a small 2-class dataset, can be found in the [Keras blog](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
| deep-learning/DLforArtists/notebooks/transfer-learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
def gsjn1():
print("Hello, World!")
# print("Objective 1 - Seting up Environment")
gsjn1()
# +
documentation = """### Python Function gsjn1()
Function **gsjn1()** (_Getting Started with Jupyter Notebook_) completes the first objective for this project. You invoke it like this: `gsjn1()`
"""
print (documentation)
# -
#
#
| gsjn_objective-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pickle
# These are from Data preprocessing notebook and pickles
non_validation_data = pd.read_pickle(r'non_validation_data.p')
validation_data = pd.read_pickle(r'validation_data.p')
# + [markdown] heading_collapsed=true
# # Feature Engineering Functions
# + [markdown] hidden=true
# ## Feature - Total Buys and Ratios
# + hidden=true
# total buy features
def generate_total_buy_7(df):
total_buy_7 = df[df.order_number_reverse<=6].groupby(['user_id', 'product_id']).size().reset_index()
total_buy_7.columns = ['user_id','product_id','total_buy_7']
total_buy_7['total_buy_ratio_7'] = total_buy_7['total_buy_7']/7
return total_buy_7
def generate_total_buy_6(df):
total_buy_6 = df[df.order_number_reverse<=5].groupby(['user_id', 'product_id']).size().reset_index()
total_buy_6.columns = ['user_id','product_id','total_buy_6']
total_buy_6['total_buy_ratio_6'] = total_buy_6['total_buy_6']/6
return total_buy_6
def generate_total_buy_5(df):
total_buy_5 = df[df.order_number_reverse<=4].groupby(['user_id', 'product_id']).size().reset_index()
total_buy_5.columns = ['user_id','product_id','total_buy_5']
total_buy_5['total_buy_ratio_5'] = total_buy_5['total_buy_5']/5
return total_buy_5
def generate_total_buy_4(df):
total_buy_4 = df[df.order_number_reverse<=3].groupby(['user_id', 'product_id']).size().reset_index()
total_buy_4.columns = ['user_id','product_id','total_buy_4']
total_buy_4['total_buy_ratio_4'] = total_buy_4['total_buy_4']/4
return total_buy_4
def generate_total_buy_3(df):
total_buy_3 = df[df.order_number_reverse<=2].groupby(['user_id', 'product_id']).size().reset_index()
total_buy_3.columns = ['user_id','product_id','total_buy_3']
total_buy_3['total_buy_ratio_3'] = total_buy_3['total_buy_3']/3
return total_buy_3
def generate_total_buy_all(df):
total_buy_all = df.groupby(['user_id', 'product_id']).size().reset_index()
total_buy_all.columns = ['user_id','product_id','total_buy_all']
return total_buy_all
# + [markdown] hidden=true
# ## Feature - Repurchase Period
# + hidden=true
#longest days without repurchase category
def generate_longest_period_wo_repurchase(df):
longest_period = df.groupby(['user_id', 'product_id'])['days_since_prior_order'].max().reset_index()
longest_period.rename(columns={'days_since_prior_order':'longest_period_without_repurchasing_in_all_orders'}, inplace=True)
return longest_period
def generate_longest_period_wo_repurchase_3(df):
longest_period_3 = df[df.order_number_reverse<=2].groupby(['user_id', 'product_id'])['days_since_prior_order'].max().reset_index()
longest_period_3.rename(columns={'days_since_prior_order':'longest_period_without_repurchasing_in_recent_3_orders'}, inplace=True)
return longest_period_3
def generate_longest_period_wo_repurchase_5(df):
longest_period_5 = df[df.order_number_reverse<=4].groupby(['user_id', 'product_id'])['days_since_prior_order'].max().reset_index()
longest_period_5.rename(columns={'days_since_prior_order':'longest_period_without_repurchasing_in_recent_5_orders'}, inplace=True)
return longest_period_5
def median_days_wo_repurchase_n5(df):
median_without_repurchasing_5 = df[df.order_number_reverse<=4].groupby(['user_id', 'product_id'])['days_since_prior_order'].median().reset_index()
median_without_repurchasing_5.rename(columns={'days_since_prior_order':'median_without_repurchasing_in_recent_5_orders'}, inplace=True)
return median_without_repurchasing_5
# + [markdown] hidden=true
# ## Feature - Chance
# + hidden=true
def generate_chance_and_ratio(df):
cnt = df.groupby(['user_id', 'product_id']).size()
cnt.name = 'cnt'
cnt = cnt.reset_index()
# chance <-- itself is a feature
# The latest order
user_onb_max = df.groupby('user_id').order_number.max().reset_index()
user_onb_max.columns = ['user_id', 'onb_max']
# The first order containing the pdt
user_item_min = df.groupby(['user_id', 'product_id']).order_number.min().reset_index()
user_item_min.columns = ['user_id', 'product_id', 'onb_min']
chance = pd.merge(user_item_min, user_onb_max, on='user_id', how='left')
# Since its first order of this pdt, how many orders have been made
chance['chance'] = chance.onb_max - chance.onb_min +1
# cnt-> how many times did the user bought the pdt
final = pd.merge(cnt, chance, on=['user_id', 'product_id'], how='left')
# calculate that ratio
final['order_ratio_bychance'] = final.cnt / final.chance
final =final.drop(['onb_max','onb_min','cnt'],axis=1)
return final
def generate_chance_and_ratio_recent_5(df1):
df = df1[df1.order_number_reverse<=4]
cnt = df.groupby(['user_id', 'product_id']).size()
cnt.name = 'cnt'
cnt = cnt.reset_index()
# The latest order
user_onb_max = df.groupby('user_id').order_number.max().reset_index()
user_onb_max.columns = ['user_id', 'onb_max']
# The first order containing the pdt
user_item_min = df.groupby(['user_id', 'product_id']).order_number.min().reset_index()
user_item_min.columns = ['user_id', 'product_id', 'onb_min']
chance = pd.merge(user_item_min, user_onb_max, on='user_id', how='left')
# Since its first order of this pdt, how many orders have been made, for the recent 5 orders
chance['chance_n5'] = chance.onb_max - chance.onb_min +1
# cnt-> how many times did the user bought the pdt
final = pd.merge(cnt, chance, on=['user_id', 'product_id'], how='left')
# calculate that ratio
final['order_ratio_bychance_n5'] = final.cnt / final.chance_n5
final =final.drop(['onb_max','onb_min','cnt'],axis=1)
return final
# + [markdown] hidden=true
# ## Feature - Days since first order
# + hidden=true
def generate_days_since_first_orders(df):
df['days_since_first_order'] = df.sort_values('order_number').groupby(['user_id','product_id']).days_since_prior_order.cumsum()
df = df.groupby(['user_id','product_id'])['days_since_first_order'].max().reset_index(name='days_since_first_order')
return df
# + [markdown] heading_collapsed=true
# # Feature Generation
# + hidden=true
total_buy = generate_total_buy_all(non_validation_data)
total_buy = pd.merge(left = total_buy,right=generate_total_buy_7(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_total_buy_6(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_total_buy_5(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_total_buy_4(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_total_buy_3(non_validation_data), how='left')
# + hidden=true
total_buy = pd.merge(left = total_buy,right=generate_longest_period_wo_repurchase(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_longest_period_wo_repurchase_3(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_longest_period_wo_repurchase_5(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=median_days_wo_repurchase_n5(non_validation_data), how='left')
# + hidden=true
total_buy = pd.merge(left = total_buy,right=generate_chance_and_ratio(non_validation_data), how='left')
total_buy = pd.merge(left = total_buy,right=generate_chance_and_ratio_recent_5(non_validation_data), how='left')
# + hidden=true
days_since_first_orders = generate_days_since_first_orders(non_validation_data)
# + hidden=true
features_all = pd.merge(left = total_buy,right=days_since_first_orders, how='left')
features_all = features_all.fillna(0)
# + [markdown] heading_collapsed=true
# # Label Generation - Reorder in Recent 5
# + [markdown] hidden=true
# ## Creating the Reordered Column for the recent 5 orders
# + hidden=true
# Getting the recent 5 orders
recent_5_orders = non_validation_data[non_validation_data["order_number_reverse"] <= 5][['order_id','product_id','user_id']]
# + hidden=true
# get number_of_order_in_recent_5 at the user product level
recent_5_reordered = recent_5_orders.groupby(['user_id','product_id']).size().reset_index(name='number_of_order_in_recent_5')
# + hidden=true
# If number_of_order_in_recent_5 is one, it means that the user only order it once, so reodered is false
# if number_of_order_in_recent_5 is more than one, it means there is reordering took place
recent_5_reordered['reordered_recent_5'] = np.where(recent_5_reordered['number_of_order_in_recent_5']==1 , 0, 1)
recent_5_reordered
# + hidden=true
# Drop number_of_order_in_recent_5 for recent_5_reordered
recent_5_reordered = recent_5_reordered.drop(['number_of_order_in_recent_5'],axis=1)
# + hidden=true
# get all user-product matrix for non-validation dataset
user_purchased_product_non_validation = non_validation_data.groupby(['user_id','product_id']).size().reset_index(name='no')
user_purchased_product_non_validation = user_purchased_product_non_validation.iloc[:,:-1]
# + hidden=true
# Merge user_purchased_product_non_validation with recent_5_reordered
train_with_all_user_purchased_product = pd.merge(user_purchased_product_non_validation, recent_5_reordered,how="left",on=['user_id',"product_id"])
# if it is NaN after merge, it shows that the user never purchase this product in recent 5 orders
train_with_all_user_purchased_product = train_with_all_user_purchased_product.fillna(0)
# + [markdown] heading_collapsed=true
# # Merging Features and Labels for Validation
# + hidden=true
to_train_1 = pd.merge(left=features_all, right=train_with_all_user_purchased_product,on=['user_id','product_id'],how='left')
to_train_1.to_pickle('to_train_1.p')
# + [markdown] heading_collapsed=true
# # Prepare test data at Validation
# + hidden=true
# for validation set
# test_data_1
# test data of user-product level matrix 12422758 rows is for orders up till the order before validation dataset inclusive.
# get all the user-product from history
test_data_1 = non_validation_data.groupby(['user_id','product_id']).size().reset_index()
# remove the last column
test_data_1 = test_data_1.iloc[:,:-1]
# merge with the lastest order (validation data) to get reordered
test_data_1 = pd.merge(test_data_1,validation_data[['user_id','product_id','reordered']],on=['user_id','product_id'], how='left')
# NaN means that the user never order the product in the validation order. Hence 0 is given since no reorder
test_data_1 = test_data_1.fillna(0)
test_data_1.to_pickle('to_test_1.p')
# + [markdown] heading_collapsed=true
# # Prepare for training and testing data after validation
# Repeat step 2 - 5 on order_master (include both validation and non-validation data) to get training and test data after validation
#
# They are saved to to_train_2.p and to_test_2.p
# + hidden=true
| model-1/2 Feature Engineering and Train and Test Set Preparation - Cleaned.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from pandas import DataFrame
import numpy as np
from datetime import datetime
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import warnings
import os.path
import info
#import lightgbm as lgb
from utils import *
warnings.filterwarnings("ignore")
today = datetime.today()
print("Today's date:", today.strftime("%Y-%m-%d"))
path_pc = 'C:/Users/admin/Desktop/AI Plan/Finance with AI/Notebooks/'
path_img = 'C:/Users/admin/Desktop/AI Plan/Finance with AI/PnL images/'+today.strftime("%Y%m%d")[-6:]+'/'
count = 0
day = today
while True:
path_scoreboard = 'C:/Users/admin/Desktop/AI Plan/Finance with AI/Notebooks/Score Boards individual symbol/'+day.strftime("%Y%m%d")[-6:]+'/'
if os.path.exists(path_scoreboard+'3988.HK_scoreboard_knn.csv') and len(os.listdir(path_scoreboard)) > 0:
print('\nLatest scoreboard folder: \n', path_scoreboard)
break
else:
day = day - to_days(1)
count += 1
if count > 7:
print("No valid scoreboard path. Please check.")
break
# +
#path_scoreboard = 'C:/Users/admin/Desktop/AI Plan/Finance with AI/Notebooks/Score Boards individual symbol/200304/'
# -
outcomes_new = load_latest(today, 'outcomes_', path_pc)
# outcomes_new = outcomes_new.drop(['log_return_10', 'return_10', 'log_return_5', 'return_5'], axis=1)
outcomes_new = outcomes_new.drop(['log_return_10', 'return_10'#,
#'log_return_5', 'return_5',
#'log_return_4', 'return_4',
#'log_return_3', 'return_3',
#'log_return_2', 'return_2'
], axis=1)
features_selected = ['(close/ema)-1',
'(close/sma)-1',
'1-lower/close',
#'bbands50_lower',
'bbands50_lower_slope',
#'bbands50_upper',
'bbands50_upper_slope',
#'close',
'close_pct_change_5_day',
'close_scaled50',
'close_slope',
#'ema50',
'ema50_scaled50',
'ema50_slope',
#'high',
'high_scaled50',
'high_slope',
'intraday_chg',
'log volume',
#'low',
'low_scaled50',
'low_slope',
#'macd',
#'macd_signal',
'macd_signal_pct_diff',
'momentum',
#'open',
'open_scaled50',
'open_slope',
'rsi14',
'rsi14_slope',
#'sma50',
'sma50_scaled50',
'sma50_slope',
'upper/close-1',
'volatility50',
'volatility50_ratio',
#'volume',
'volume_pct_change_1_day',
'volume_scaled50',
'volume_slope',
'past_return_1',
'past_return_2',
'past_return_3',
'past_return_4',
'past_return_5',
'past_return_10',
'past_log_return_1',
'past_log_return_2',
'past_log_return_3',
'past_log_return_4',
'past_log_return_5',
'past_log_return_10'
]
last_date = outcomes_new.index.get_level_values('date')[-1].to_pydatetime()
print ("Last date in dataset: ", last_date.strftime("%Y-%m-%d"))
# +
#master_scoreboard = load_latest(last_date, 'master_scoreboard_', path_pc)
# +
#scoreboard_last_date =
# -
outcomes_new_dropna = outcomes_new.dropna()
prob_threshold = 0.7
test_period = 100
test_size = 1
valid_size = 400
training_size = 2000
valid_test_gap = 4
q_upper = 0.9
q_lower = 0.1
# return_col = 'log_return_1'
# return_col_actual = 'return_1'
#return_col = 'log_return_2'
#return_col_actual = 'return_2'
# return_col = 'log_return_3'
# return_col_actual = 'return_3'
return_col = 'log_return_5'
return_col_actual = 'return_5'
# return_col = 'log_return_10'
# return_col_actual = 'return_10'
max_depth_range = np.arange(2,5,1)
num_leaves_range = np.arange(9,15,1)
p_range = [1,2]
leaf_size_range= [1] #list(range(1,50))
n_neighbors_range = list(range(1,30))
# ## Back Test each symbol one by one, store results in master scoreboard
# +
from pathlib import Path
#Create new folder for scoreboards
path_scoreboard_new = 'C:/Users/admin/Desktop/AI Plan/Finance with AI/Notebooks/Score Boards individual symbol/'+today.strftime("%Y%m%d")[-6:]+'/'
print("Creating new folder: ", today.strftime("%Y%m%d")[-6:]+'/')
Path(path_scoreboard_new).mkdir(parents=True, exist_ok=True)
# +
master_scoreboard = pd.DataFrame()
for symbol_loop in list(info.multiplier.keys()):
#print("-"*80)
#print("\nWorking on ", symbol_loop)
outcomes_new_dropna = outcomes_new.dropna()
outcomes_new_dropna = outcomes_new_dropna.loc[outcomes_new_dropna.index.get_level_values('symbol')==symbol_loop]
#load latest scoreboard for this symbol
score_board_filename = symbol_loop + '_scoreboard_knn.csv'
if os.path.exists(path_scoreboard + score_board_filename):
score_board = pd.read_csv(path_scoreboard + score_board_filename)
else:
print("Skipping ", symbol_loop, " as previous data doesn't exist.")
continue
#get all trading dates from dataset of this symbol
data_dates = list(set(outcomes_new_dropna.index.get_level_values(0)))
data_dates = sorted(data_dates)
data_converted_dates = []
for ts in data_dates:
data_converted_dates.append(ts.to_pydatetime())
#calculate training start dates according to test periods, train, valid and test sizes
total_num_days = len(data_converted_dates)
num_periods = int((total_num_days -(valid_size + training_size + valid_test_gap))/test_size)
start_dates = []
for i in range(1, num_periods):
start_dates.append(data_converted_dates[(test_size*-i) - valid_test_gap - valid_size - training_size])
start_dates = sorted(start_dates)
#skip symbols that are relatively new and has not enough trading days for training
if len(start_dates) < test_period:
print("Skipped ", symbol_loop, " due to insuffient trading days.")
continue
#Calculate dates for new test data that are not in scoreboard
score_board_dates = []
for date_str in score_board['01) start_date']:
score_board_dates.append(datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S'))
new_start_dates = list(set(start_dates[-test_period:]) - set(score_board_dates))
new_start_dates = sorted(new_start_dates)
#Calculate next day open
next_day_open = lambda x: x.shift(-1)
outcomes_new_dropna['next_day_open'] = outcomes_new_dropna.groupby(level='symbol').apply(next_day_open)['open']
if new_start_dates == []:
print("No new data are found for {}. No backtest is done.".format(symbol_loop))
else:
print("Training for ", symbol_loop)
for start_date in new_start_dates:
counter = data_converted_dates.index(start_date)
start_date_valid = data_converted_dates[counter + training_size -1]
end_date_valid = data_converted_dates[counter + training_size + valid_size -1]
start_date_test = data_converted_dates[counter + training_size + valid_test_gap + valid_size-1]
end_date_test = data_converted_dates[counter + training_size + valid_test_gap + valid_size + test_size-1]
#print("start training date: ", start_date)
#print("Valid dataset period: ", start_date_valid, " - ", end_date_valid)
#print("Test datasset period: ", start_date_test, " - ", end_date_test)
#split into train, valid and test sets
X_y_train, X_y_valid, X_y_test = train_valid_test_split(outcomes_new_dropna, start_date,
start_date_valid, end_date_valid, start_date_test, end_date_test)
#calculate upper threshold in training set, then create targets for both valid and test
X_y_train, X_y_valid, X_y_test = add_target_upper(X_y_train, X_y_valid,
X_y_test, q_upper, 'target', return_col)
X_y_test['symbol'] = X_y_test.index.get_level_values('symbol')
#downsample the training set's negative data points
X_y_train_resampled = downsample(X_y_train, 'target', test_ratio=0.11, random_seed=11)
#create 2 extra sets for calculating gain
X_valid_close = X_y_valid[['close',return_col_actual]]
X_test_close = X_y_test[['close',return_col_actual, 'symbol','next_day_open']]
num_shares = []
for i, row in X_test_close.iterrows():
num_shares.append(info.board_lots[row[2]] * info.multiplier[row[2]])
X_test_close['num_shares'] = num_shares
#split into features and target sets
X_train, y_train = feature_target_split(X_y_train_resampled, features_selected, 'target')
X_valid, y_valid = feature_target_split(X_y_valid, features_selected, 'target')
X_test, y_test = feature_target_split(X_y_test, features_selected, 'target')
(best_model, best_auc_model, max_auc, p_at_max_auc, leaf_size_at_max_auc,
n_neighbors_at_max_auc, p_at_max_tt, leaf_size_at_max_tt, n_neighbors_at_max_tt,
y_class_pred) = knn_train(X_train, y_train, X_valid, y_valid, X_valid_close, p_range,
leaf_size_range, n_neighbors_range, return_col_actual, prob_threshold = 0.7, sign = 1)
#calculate test gain using best gain model and best pres model
test_total_gain, total_amount_spent, y_class_pred = total_actual_gain_knn(best_model, X_test, X_test_close,
y_test, prob_threshold, return_col_actual)
test_auc_total_gain, total_auc_amount_spent, y_class_auc_pred = total_actual_gain_knn(best_auc_model, X_test, X_test_close,
y_test, prob_threshold, return_col_actual)
score_board = score_board.append({'01) start_date': start_date, '02) start_date_valid':start_date_valid, '03) start_date_test': start_date_test,
'04a) test_total_gain': test_total_gain,
'04b) total_amount_spent': total_amount_spent,
'07) max_auc': max_auc,
'08) test_auc_total_gain': test_auc_total_gain,
#'09) optimal_precision_depth': optimal_precision_depth,
#'10) optimal_precision_num_leaves': optimal_precision_num_leaves,
'10a) total_auc_amount_spent': total_auc_amount_spent}, ignore_index=True)
gain_loss_ratio = gain_vs_loss(score_board['04a) test_total_gain'])
gain_loss_ratio_auc = gain_vs_loss(score_board['08) test_auc_total_gain'])
######################################################################################
# MAY NEED TO DROP FIRST FEW ROWS IN SCORE_BOARD TO MAINTAIN EXACTLY 100 ROWS #
######################################################################################
score_filename = symbol_loop + '_scoreboard_knn.csv'
print("Saving score board for ", symbol_loop)
save_csv(score_board, path_scoreboard_new, score_filename)
master_scoreboard = master_scoreboard.append({'symbol': symbol_loop,
'Test Total Gain': score_board['04a) test_total_gain'].sum(),
'Gain Loss Ratio' : gain_loss_ratio,
'Gain Loss Ratio AUC' : gain_loss_ratio_auc,
'Test AUC Total Gain': score_board['08) test_auc_total_gain'].sum(),
'Profit Percentage': score_board['04a) test_total_gain'].sum()/(score_board['04b) total_amount_spent'].mean()*5),
'Profit Percentage Max AUC': score_board['08) test_auc_total_gain'].sum()/(score_board['10a) total_auc_amount_spent'].mean()*5),
'Total amount spent': score_board['04b) total_amount_spent'].mean(),
'Total AUC amount spent': score_board['10a) total_auc_amount_spent'].mean()}, ignore_index=True)
print("Saving plots for ", symbol_loop)
plot_filename_total = symbol_loop + '_knn_total.png'
plot_filename_pres = symbol_loop + '_knn_auc.png'
fig_total = score_board['04a) test_total_gain'].cumsum().plot().get_figure()
fig_total.savefig(path_img+plot_filename_total)
fig_pres = score_board['08) test_auc_total_gain'].cumsum().plot().get_figure()
fig_pres.savefig(path_img+plot_filename_pres)
fig_pres.clf()
fig_total.clf()
# -
master_scoreboard.sort_values(by=['Gain Loss Ratio'], ascending=False)[0:5]
master_scoreboard.loc[master_scoreboard['Gain Loss Ratio'] == float("-inf")]
if new_start_dates == []:
print('No master scoreboard is saved.')
else:
save_csv(master_scoreboard, path_pc, 'master_scoreboard_knn_'+last_date.strftime("%Y-%m-%d")+'.csv')
print('Master scoreboard is saved to ', 'master_scoreboard_knn_'+last_date.strftime("%Y-%m-%d")+'.csv')
# # Read Master Scoreboard
master_scoreboard = pd.read_csv(path_pc+'master_scoreboard_knn_'+last_date.strftime("%Y-%m-%d")+'.csv')
master_scoreboard.sort_values(by=['Test Total Gain'], ascending=False)[0:5]
master_scoreboard.sort_values(by=['Profit Percentage'], ascending=False)[0:5]
master_scoreboard.sort_values(by=['Gain Loss Ratio AUC'], ascending=False)[0:5]
master_scoreboard.loc[master_scoreboard['Gain Loss Ratio'] == float("-inf")]
| Backtest with KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/ShopRunner/collie_recs/blob/main/tutorials/quickstart.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/ShopRunner/collie_recs#quick-start"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a>
# </td>
# <td>
# <a target="_blank" href="https://raw.githubusercontent.com/ShopRunner/collie_recs/main/tutorials/quickstart.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" /> Download notebook</a>
# </td>
# </table>
# for Collab notebooks, we will start by installing the ``collie_recs`` library
# !pip install collie_recs --quiet
# # ``collie_recs`` ``README`` Quickstart
#
# Below is the code snippet found in the [Collie ``README`` Quickstart](https://github.com/ShopRunner/collie_recs#quick-start).
# +
from collie_recs.cross_validation import stratified_split
from collie_recs.interactions import Interactions
from collie_recs.metrics import auc, evaluate_in_batches, mapk, mrr
from collie_recs.model import MatrixFactorizationModel, CollieTrainer
from collie_recs.movielens import read_movielens_df
from collie_recs.utils import convert_to_implicit
# read in MovieLens 100K data
df = read_movielens_df()
# convert the data to implicit
df_imp = convert_to_implicit(df)
# store data as ``Interactions``
interactions = Interactions(users=df_imp['user_id'],
items=df_imp['item_id'],
allow_missing_ids=True)
# perform a data split
train, val = stratified_split(interactions)
# train an implicit ``MatrixFactorization`` model
model = MatrixFactorizationModel(train=train,
val=val,
embedding_dim=10,
lr=1e-1,
loss='adaptive',
optimizer='adam')
trainer = CollieTrainer(model, max_epochs=10)
trainer.fit(model)
model.eval()
# evaluate the model
auc_score, mrr_score, mapk_score = evaluate_in_batches([auc, mrr, mapk], val, model)
print(f'AUC: {auc_score}')
print(f'MRR: {mrr_score}')
print(f'MAP@10: {mapk_score}')
| tutorials/quickstart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (12,8)
# -
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=150, n_features=2, centers=4, random_state=1)
plt.scatter(X[:, 0], X[:, 1])
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2, init='k-means++', n_init=10, random_state=1)
kmeans.fit(X)
labels = kmeans.labels_
plt.scatter(X[:, 0], X[:, 1], c=labels)
# +
crit = []
for k in range(2, 8):
kmeans = KMeans(n_clusters=k, random_state=1)
kmeans.fit(X)
crit.append(kmeans.inertia_)
# -
plt.plot(range(2,8), crit)
kmeans = KMeans(n_clusters=4, init='k-means++', n_init=10, random_state=1)
kmeans.fit(X)
labels = kmeans.labels_
plt.scatter(X[:, 0], X[:, 1], c=labels)
| W4/KMeans.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# # Example: 2D Simulation
# ## Set up Julia environment
# +
import Pkg
Pkg.activate(mktempdir())
Pkg.add("CSV")
# Pkg.add("Plots")
Pkg.add("NPZ")
# -
using UltraDark
using Test
using NPZ
using CSV
using Plots
Threads.nthreads()
# ## Define initial conditions
# Load a function that initialises solitons.
include(joinpath(@__DIR__, "init_soliton.jl"));
# Define a 2D grid.
# +
resol = 128
len = 10.0
grids = Grids((len, len, len/resol), (resol, resol, 1));
# -
# Add some solitons to the grid. Strictly speaking, these aren't solitonic solutions in 2D, but they'll do for demonstration purposes.
# +
mass = 10
position_1 = [-len/5, -len/5, 0]
position_2 = [-len/5, +len/5, 0]
velocity = [1, 0, 0]
phase_1 = 0
phase_2 = π
t0 = 0
add_soliton(grids, mass, position_1, velocity, phase_1, t0)
add_soliton(grids, mass, position_2, velocity, phase_2, t0)
# -
# ## Set options
# +
output_dir = joinpath(pwd(), "output", "2D")
output_times = 0:0.1:5
output_config = OutputConfig(output_dir, output_times);
# -
options = Config.SimulationConfig();
# ## Run simulation
@time simulate(grids, options, output_config)
# ## Plot output
summary = CSV.File(joinpath(output_config.directory, "summary.csv"));
rho_init = npzread("$(output_config.directory)/rho_1.npy");
rho_last = npzread("$(output_config.directory)/rho_$(length(output_times)).npy");
lims = extrema(rho_last)
lims = (0, lims[2])
contour(rho_init[:, :, 1]; aspectratio=:equal, ticks=false, clim=lims)
xlims!(1, resol)
contour(rho_last[:, :, 1]; aspectratio=:equal, ticks=false, clim=lims)
xlims!(1, resol)
anim = @animate for i in 1:length(output_times)
rho_box = npzread("$(output_config.directory)/rho_$i.npy");
contour(
rho_box[:, :, 1];
aspectratio=:equal,
clims=lims,
axis=([], false),
colorbar=:none,
title="\$t=$(output_times[i])\$",
)
end;
gif(anim, fps=10)
| examples/2d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ocp-models
# language: python
# name: ocp-models
# ---
# + active=""
# https://www.programmersought.com/article/84646175252/
# -
import lmdb
# Запись в lmdb
env = lmdb.open('test')
txt = env.begin(write=True)
txt.put(key = str(1).encode() , value = str(111).encode())
txt.put(key = str('2').encode(), value = 'bbb'.encode())
txt.put(key = '3'.encode(), value = 'ccc'.encode())
txt.delete(key = '1'.encode())
txt.put(key = '3'.encode(), value = 'ddd'.encode())
dict = {'6': 'eee', '4':'fff', '5':'ggg'}
for k, v in dict.items():
txt.put(key = k.encode(), value = v.encode())
txt.commit()
env.close()
# + active=""
#
# -
Чтение lmdb
import lmdb
env = lmdb.open('train_test/')
txt = env.begin()
# +
# print(txt.get(str(2).encode()))
# -
for k, v in txt.cursor():
with open('data_copy.txt', 'w') as f:
f.write(v)
# + active=""
# read pickle
# -
i = 0
with open("/Users/korovin/Documents/GitHub/ocp_datasets/oc20_data_mapping.pkl", 'rb') as f:
while (byte := f.read(1)):
print(byte)
i+=1
if i == 10: break
dic['random2181546']
| airi_utils/lmdb_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + code_folding=[0]
# import all relevant libraries dependencies and set up the backend
import time
import IPython
import numpy as np
import matplotlib
import math
import pandas
import pickle
import subprocess
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy
import scipy.special
import scipy.optimize
import scipy.sparse.linalg as LA
import scipy.sparse as sparse
from scipy.io import loadmat
from scipy.optimize import curve_fit
from scipy.signal import argrelextrema
# add the ED.py functions to the notebook
import sys
import hubbard_1d as ED
from matplotlib import animation, rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],'size':12})
# rc('font',**{'family':'serif','serif':['FreeSerif'],'size':12})
rc('text', usetex=True)
from cycler import cycler
c1_a = '#003AF0'
c2_a = '#008F24'
c3_a = '#FE7B22'
c4_a = '#FE000C'
c5_a = '#FB30F2'
c6_a = '#82E0AA' #'#FFE135'
c7_a = '#9966CC'
c8_a = '#7FFFD4'
c9_a = '#66FF00'
c10_a = '#8B008B'
c11_a = '#000000'
colours = [c1_a,c2_a,c3_a,c4_a,c5_a,c6_a,c7_a,c8_a,c9_a,c10_a,c11_a]
matplotlib.rcParams['backend']='MacOSX'
matplotlib.rcParams['savefig.dpi']=250
matplotlib.rcParams['text.usetex']=True
# matplotlib.rcParams['text.latex.unicode']=True
# matplotlib.rcParams['axes.color_cycle'] = [c1_ac2_ac3_ac4_ac5_a]
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',colours)
# matplotlib.rcParams['axes.'] = \in\
# matplotlib.rcParams['legend.fancybox']=True
matplotlib.rcParams['legend.frameon']=False
# matplotlib.rcParams['legend.fontsize']=10
matplotlib.rcParams['figure.figsize'] = (10,8)
# matplotlib.rcParams['axes.color_cycle'] = colors
# matplotlib.rcParams['axes.prop_cycle'] = colors
# + code_folding=[]
# define system parameters
p = {}
p['L'] = 2 # system length
p['W'] = 3 # system width
p['N1'] = 3 # number of particles in 1
p['N2'] = 3 # number of particles in 2
p['t1'] = 1.0 # hopping for species 1
p['t2'] = 1.0 # hopping for species 2
p['U12'] = 0.0 # on-site interaction between 1 and 2
p['mu1'] = 0.0 # chemical potential for 1
p['mu2'] = 0.0 # chemical potential for 2
p['t_initial'] = 0.0 # begin of time evolution
p['dt'] = 0.1#1e-2 # time-step
p['t_final'] = 1.0 # end of time evolution
p['N'] = p['L'] * p['W'] # system size
# + code_folding=[0]
# get state table and dimension of Hilberspace
state_table = ED.generate_state_table(p)
print("dim(H) = %i" % (len(state_table)))
# look at particular states
i = 3
state_int = state_table[i]
state = ED.int_to_state(p, state_int)
print("state #%i has the unique id (%i)" % (i, state_int), \
"and looks like:\n", ED.reshape_state(p, state))
state_projection = ED.project_state_into_spinsectors(ED.int_to_state(p, state_table[i]))
print("Decomposed into its spin-compontents it reads: \n")
print("Spin-Up = 1 : ", state_projection[0], "\n")
print("Spin-Up = 2 : ", state_projection[1], "\n")
# construct a certain product state
statelist = [3,0,2,3,0,1]
state_int = ED.state_to_int(p, statelist)
i = state_table.index(state_int)
state = ED.int_to_state(p, state_int)
print("state #%i has the unique id (%i)" % (i,state_int), \
"and looks like:", ED.reshape_state(p, state))
state_projection = ED.project_state_into_spinsectors(ED.int_to_state(p, state_table[i]))
print("Decomposed into its spin-compontents it reads: \n")
print("Spin-Up = 1 : ", state_projection[0], "\n")
print("Spin-Up = 2 : ", state_projection[1], "\n")
# + code_folding=[0]
# Check if Fermi-Sign works properly
# It should count the occupied sites from (i,sigma) to (j, tau) (excluding both the former, and the latter)
# If the number is odd, we get a -ive sign, else +ive
# Why? Because we only have correlators of the form <c^{dagger}_{i,sigma}c_{j,tau}> (c.f. hopping and nk
# matrices) and rewriting this in terms of Jordan-Wigner strings gives <a^{dagger}_{i,sigma}(JW)a_{j,tau}>
# where JW = F_{i,alpha>=sigma}F_{i+1}...F_{j-1}F{j,beta<tau}, now at the beginning of the correlator we thus
# have a^{dagger}_{i,sigma}F_{i,sigma} and since a^{dagger} only gives sth. non-zero if it acts on an empty site
# |0>, F_{i,sigma}|0> = 1, so we can simplify this to get a^{dagger}_{i,sigma}F_{i,sigma} = a^{dagger}_{i,sigma}
# JW -> Thus F_{i,alpha>sigma}F_{i+1}...F_{j-1}F{j,beta<tau} and therefore ONLY includes F matrices which are
# strictly between (i,sigma) and (j,tau)
# select state
i = 25
state_int = state_table[i]
state = ED.int_to_state(p, state_int)
state_projection = ED.project_state_into_spinsectors(ED.int_to_state(p, state_int))
reshaped_state_proj = ED.reshape_projected_states(p, state_projection)
print("state id: ", state_int, "\n")
print("state = ", state, "\n")
print("spin1 = \n", reshaped_state_proj[0], "\n")
print("spin2 = \n", reshaped_state_proj[1], "\n")
print('For (spin, site)')
i = 0
sigma = 1
for tau in [1, 2]:
for j in range(p['N']):
print("b/w (%i,%i) and (%i,%i)" % (sigma, i, tau, j),
" => Fsgn = ", ED.fermisign(state_projection, i, j, sigma, tau))
# + code_folding=[0]
# do GS search
E0, GS, state_table = ED.calculate_gs(p)
print("E0 = %.6g" % (E0))
# + code_folding=[0]
# do GS evolution - should see no dynamics, all is constant in time (since we're in an eigenstate)
tic = time.time()
sim, state_table = ED.evolve(p, state_table, GS, kind='ket', correlation_measurement=True)
toc = time.time()
print("Elapsed time = ", toc-tic, "s")
# + code_folding=[0]
# Plot Results
fig = plt.figure(figsize=(10,8))
i = 3
plt.plot(sim['Time'], sim['Re(N1 Site %i)' % (i)], '-', label=r'$n_{i,1}$')
plt.plot(sim['Time'], sim['Re(N12 Site %i)' % (i)], '-', label=r'$n_{i,1}n_{i,2}$')
plt.plot(sim['Time'], sim['Re(N2 Site %i)' % (i)], '--', label=r'$n_{i,2}$')
plt.legend()
plt.title(r'$\textrm{Site} ~ i = %i$' % (i))
plt.ylim([0.0, 0.6])
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
if 'k' in sim:
fig = plt.figure(figsize=(10,8))
for m in range(p['N']):
plt.plot(sim['Time'], sim['nk1'][m,:], '-', label=r'$n_{k_{%i},1}$' % (m+1))
plt.legend()
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
fig = plt.figure(figsize=(10,8))
for i in range(0, len(sim['Time']), 25):
plt.plot(sim['k']/np.pi, sim['nk1'][:,i], '.-', label=r'$t=%.2f$' % (sim['Time'][i]))
plt.legend()
plt.xlabel(r'$k/\pi$')
plt.ylabel(r'$n_{k,1}$')
plt.show()
plt.close()
# + code_folding=[0]
# do evolution from a product state (not an eigenstate) - should see dynamics
tic = time.time()
state = ED.int_to_state(p, state_table[5])
# sim, state_table = ED.evolve(p, state_table, state_table[5], kind="int", correlation_measurement=True)
sim, state_table = ED.evolve(p, state_table, state, kind="list", correlation_measurement=False,
trotterised=False)
toc = time.time()
print("Elapsed time = ", toc-tic, "s")
# + code_folding=[0]
# Plot Results
fig = plt.figure(figsize=(10,8))
i = 3
plt.plot(sim['Time'], sim['Re(N1 Site %i)' % (i)], '-', label=r'$n_{i,1}$')
plt.plot(sim['Time'], sim['Re(N12 Site %i)' % (i)], '-', label=r'$n_{i,1}n_{i,2}$')
plt.plot(sim['Time'], sim['Re(N2 Site %i)' % (i)], '--', label=r'$n_{i,2}$')
plt.legend()
plt.title(r'$\textrm{Site} ~ i = %i$' % (i))
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
if 'k' in sim:
fig = plt.figure(figsize=(10,8))
for m in range(p['N']):
plt.plot(sim['Time'], sim['nk1'][m,:], '-', label=r'$n_{k_{%i},1}$' % (m+1))
plt.legend()
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
fig = plt.figure(figsize=(10,8))
for i in range(0, len(sim['Time']), 100):
plt.plot(sim['k']/np.pi, sim['nk1'][:,i], '.-', label=r'$t=%.2f$' % (sim['Time'][i]))
plt.legend()
plt.xlabel(r'$k/\pi$')
plt.ylabel(r'$n_{k,1}$')
plt.show()
plt.close()
# + code_folding=[0]
# same evolution but trotterised
tic = time.time()
state = ED.int_to_state(p, state_table[5])
# sim, state_table = ED.evolve(p, state_table, state_table[5], kind="int", correlation_measurement=True)
sim_trott, state_table = ED.evolve(p, state_table, state, kind="list", correlation_measurement=False,
trotterised=True)
toc = time.time()
print("Elapsed time = ", toc-tic, "s")
sim_errors = ED.calculate_average_errors(p, sim, sim_trott)
# + code_folding=[0, 13, 21]
# Plot Errors
fig = plt.figure(figsize=(10,8))
i = 3
plt.plot(sim_errors['Time'], sim_errors['Re(N1)'], '-', label=r'$n_1 err$')
plt.plot(sim_errors['Time'], sim_errors['Re(N12)'], '-', label=r'$n_1n_2 err$')
plt.plot(sim_errors['Time'], sim_errors['Re(N2)'], '--', label=r'$n_2 err$')
plt.legend()
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
if 'k' in sim_errors:
fig = plt.figure(figsize=(10,8))
for m in range(p['N']):
plt.plot(sim['Time'], sim['nk1'][m,:], '-', label=r'$n_{k_{%i},1}$' % (m+1))
plt.legend()
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
fig = plt.figure(figsize=(10,8))
for i in range(0, len(sim['Time']), 100):
plt.plot(sim['k']/np.pi, sim['nk1'][:,i], '.-', label=r'$t=%.2f$' % (sim['Time'][i]))
plt.legend()
plt.xlabel(r'$k/\pi$')
plt.ylabel(r'$n_{k,1}$')
plt.show()
plt.close()
# + code_folding=[0]
# do Quench evolution - find GS to initial Hamiltonian and evolve with final Hamiltonian
# initial Hamiltonian U12 = 0.0
p['U12'] = 0.0
E0, GS, state_table = ED.calculate_gs(p)
# evolve with final Hamiltonian U12 = -2.0
p['U12'] = -2.0
tic = time.time()
sim, state_table = ED.evolve(p, state_table, GS, kind='ket', correlation_measurement=True)
toc = time.time()
print("Elapsed time = ", toc-tic, "s")
# + code_folding=[0, 14, 22]
# Plot Results
fig = plt.figure(figsize=(10,8))
i = 3
plt.plot(sim['Time'], sim['Re(N1 Site %i)' % (i)], '-', label=r'$n_{i,1}$')
plt.plot(sim['Time'], sim['Re(N12 Site %i)' % (i)], '-', label=r'$n_{i,1}n_{i,2}$')
plt.plot(sim['Time'], sim['Re(N2 Site %i)' % (i)], '--', label=r'$n_{i,2}$')
plt.legend()
plt.title(r'$\textrm{Site} ~ i = %i$' % (i))
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
if 'k' in sim:
fig = plt.figure(figsize=(10,8))
for m in range(p['N']):
plt.plot(sim['Time'], sim['nk1'][m,:], '-', label=r'$n_{k_{%i},1}$' % (m+1))
plt.legend()
plt.xlabel(r'$Jt/\hbar$')
plt.show()
plt.close()
fig = plt.figure(figsize=(10,8))
for i in range(0, len(sim['Time']), 50):
plt.plot(sim['k']/np.pi, sim['nk1'][:,i], '.-', label=r'$t=%.2f$' % (sim['Time'][i]))
plt.legend()
plt.xlabel(r'$k/\pi$')
plt.ylabel(r'$n_{k,1}$')
plt.show()
plt.close()
# -
| exactdiagonalisation/.ipynb_checkpoints/hubbard_1d-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pathlib
import pandas as pd
from sklearn.metrics import classification_report
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
# !pip install ktrain
import ktrain
from ktrain import text
# +
#check if the paths for the input data is valid.
train_path="../Train.csv"
test_path="../Test.csv"
tr_path= pathlib.Path(train_path)
te_path=pathlib.Path(test_path)
if tr_path.exists ():
print("Train data path set.")
else:
raise SystemExit("Train data path does not exist.")
if te_path.exists ():
print("Test data path set.")
else:
raise SystemExit("Test data path does not exist.")
# -
#showing the first 5 lines of the train data
train_df=pd.read_csv(train_path, encoding='utf-16', sep=';', header=None).values
#train_df.head()
#showing the first 5 lines of the test data
test_df=pd.read_csv(test_path, encoding='utf-16', sep=';', header=None).values
#test_df.head()
# +
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_array(train_df[:,2], train_df[:,1], x_test=test_df[:,2], y_test=test_df[:,1],
maxlen=500, preprocess_mode='bert')
# -
model = text.text_classifier('bert', (x_train, y_train) , preproc=preproc)
learner = ktrain.get_learner(model,
train_data=(x_train, y_train),
val_data=(x_test, y_test),
batch_size=6)
learner.lr_find()
learner.lr_plot()
# 2e-5 is one of the LRs recommended by Google and is consistent with the plot above.
learner.autofit(2e-5, early_stopping=5)
model.save("model.h5")
# Let's make some predictions on new data.
predictor = ktrain.get_predictor(learner.model, preproc)
data=test_df[:,2].tolist()
label=test_df[:,1].tolist()
# +
i=0
correct=0
wrong=0
total=len(data)
true_lab=[]
pred_lab=[]
text=[]
for dt in data:
result=predictor.predict(dt)
if not result== label[i]:
text.append(dt)
pred_lab.append(result)
true_lab.append(label[i])
wrong+=1
else:
correct+=1
i+=1
name_dict = {
'Name': text,
'Gold Label' : true_lab,
'Predicted Label': pred_lab
}
wrong_data= pd.DataFrame(name_dict)
wrong_data.to_csv("wrong_results.csv", sep=';')
# -
names = ['negative', 'neutral', 'positive']
y_pred = predictor.predict(data)
y_true= test_df[1]
print(classification_report(y_true, y_pred, target_names=names))
print("Correct: ", correct,"/",total,"\nWrong: ", wrong,"/",total)
| BERT Sentiment Analysis for SE/notebook/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
'''
自动编码器的PaddlePaddle实践代码。
'''
import paddle
from paddle import nn, optimizer
#设置超参数。
INPUT_SIZE = 784
HIDDEN_SIZE = 256
EPOCHS = 5
BATCH_SIZE = 64
LEARNING_RATE = 1e-3
paddle_model = nn.Sequential(
nn.Linear(INPUT_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, INPUT_SIZE)
)
model = paddle.Model(paddle_model)
model.prepare(optimizer=optimizer.Adam(learning_rate=LEARNING_RATE, parameters=model.parameters()),
loss=nn.MSELoss())
# +
import pandas as pd
#使用pandas,读取fashion_mnist的训练和测试数据文件。
train_data = pd.read_csv('../datasets/fashion_mnist/fashion_mnist_train.csv')
test_data = pd.read_csv('../datasets/fashion_mnist/fashion_mnist_test.csv')
#从训练数据中,拆解出训练特征和类别标签。
X_train = train_data[train_data.columns[1:]]
#从测试数据中,拆解出测试特征和类别标签。
X_test = test_data[train_data.columns[1:]]
# +
from sklearn.preprocessing import StandardScaler
#初始化数据标准化处理器。
ss = StandardScaler()
#标准化训练数据特征。
X_train = ss.fit_transform(X_train)
#标准化测试数据特征。
X_test = ss.transform(X_test)
# +
from paddle.io import TensorDataset
X_train = paddle.to_tensor(X_train.astype('float32'))
#构建适用于PaddlePaddle模型训练的数据集。
train_dataset = TensorDataset([X_train, X_train])
#启动模型训练,指定训练数据集,设置训练轮次,设置每次数据集计算的批次大小。
model.fit(train_dataset, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1)
# +
import numpy as np
test_sample = X_test[:1].reshape((28, 28))
reconstructed_features = model.predict(X_test[:1].astype('float32'))
reconstructed_sample = np.array(reconstructed_features[0]).reshape((28, 28))
# +
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 100
#展示原始的图片。
plt.imshow(test_sample)
plt.show()
# +
#展示自编码重建的图片。
plt.imshow(reconstructed_sample)
plt.show()
| Chapter_6/Section_6.5.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
plt.style.use('ggplot')
# 排除警告信息
import warnings
# matplotlib画图常见参数设置
mpl.rcParams["font.family"] = "SimHei"
# 设置字体
mpl.rcParams["axes.unicode_minus"]=False
# 用来正常显示负号
plt.rcParams['font.sans-serif']=['SimHei']
# 用来正常显示中文标签# 嵌入式显示图形
# %matplotlib inline
warnings.filterwarnings("ignore")
class Mycount(object):
def __init__(self,df):
self.onames=df.select_dtypes(include=['O']).columns.tolist()
self.onum=len(self.onames)
self.nnames=df.select_dtypes(include=['number']).columns.tolist()
self.nnum=len(self.nnames)
print('object 类型数据{}列,数字类型数据{}列'.format(self.onum,self.nnum))
def odescribe(self,df):
__deslis__=['最高','最低']
count_name=[]
count_rate=[]
for i in self.onames:
lis_ma=df[i].value_counts().idxmax()
rate_ma=df[i].value_counts(normalize=True).max()
lis_mi=df[i].value_counts().idxmin()
rate_mi=df[i].value_counts(normalize=True).min()
count_name+=[lis_ma,lis_mi]
count_rate+=[rate_ma,rate_mi]
__countIndex__=pd.MultiIndex.from_product([self.onames,__deslis__],names=('类别', '统计项'))
re=pd.DataFrame({'名称':count_name,'比例':count_rate},index=__countIndex__)
return re
def plt_cov(self,df):
plt.figure(figsize=(6,6))
plt.title('MY COV',y=1.0,size=16)
sns.heatmap(df[self.nnames].cov(),square=True,linewidths=0.1,linecolor='white',annot=True)
plt.show()
def plt_corr(self,df):
plt.figure(figsize=(6,6))
plt.title('MY CORR',y=1.0,size=16)
sns.heatmap(df[self.nnames].corr(),square=True,linewidths=0.1,linecolor='white',annot=True)
plt.show()
def plt_violin_box(self,df):
__plt_data__=df.select_dtypes(include=['number'])
plt.figure(figsize = [10, 5])
# left plot: violin plot
plt.subplot(1, 2, 1)
ax1 = sns.violinplot(data=__plt_data__)
ax1.set_title('数据小提琴图',y=1.05,size=16)
# right plot: box plot
plt.subplot(1, 2, 2)
sns.boxplot(data=__plt_data__)
plt.title('数据箱图',y=1.05,size=16)
plt.ylim(ax1.get_ylim()) # set y-axis limits to be same as left plot
plt.show()
def plt_crossbar(self,df,bywho):
__data_list__=[x for x in self.onames if x!=bywho]
for i in __data_list__:
pd.crosstab(index=df[i],columns=df[bywho]).plot(kind='bar')
plt.xticks(rotation=0)
plt.show()
def plt_obar(self,df):
for i in self.onames:
x=df[i].value_counts().index
__n__=x.shape[0]
y=df[i].value_counts()
plt.figure(figsize=(__n__,4))
ax = sns.barplot(x=x,y=y,palette="Blues_d")
plt.show()
def plt_opie(self,df):
for i in self.onames:
plt.figure(figsize=(5, 5)) # 将画布设定为正方形,则绘制的饼图是正圆
a=df[i].value_counts()
b=a.index.tolist()
plt.axes(aspect='equal')
plt.pie(a,labels=b,autopct='%1.2f%%',pctdistance=0.6,labeldistance=1.1)
plt.title(i+'分布') # title的使用
plt.show() # 可视化呈现
# -
path=r'E:\Machine Learning\pandas\joyful-pandas-master\data\Employee2.csv'
df1=pd.read_csv(path)
df1.sample(5)
plt.style.use('ggplot')
df1['Company'].value_counts().plot(kind='bar',title='Company')
df1['Company'].value_counts().plot(kind='pie',title='Company')
mc1=Mycount(df1)
mc1.plt_corr(df1)
mc=Mycount(df1)
mc.plt_obar(df1)
mc.plt_obar(data)
# +
plt.figure(figsize=(5, 5)) # 将画布设定为正方形,则绘制的饼图是正圆
a=data['年级'].value_counts()
b=a.index.tolist()
plt.axes(aspect='equal')
plt.pie(a,labels=b,autopct='%1.2f%%',pctdistance=0.6,labeldistance=1.1)
plt.title('课程类型分布') # title的使用
plt.legend()
plt.show() # 可视化呈现
# -
a.index
data['年级'].value_counts().name
a=data.groupby('生活区域')['消费用途'].value_counts()
a=pd.crosstab(index=data['生活区域'],columns=data['消费用途'])
a=pd.crosstab(index=data['消费用途'],columns=data['生活区域'])
pd.crosstab(index=data['消费用途'],columns=data['生活区域']).plot(kind='bar')
plt.xticks(rotation=0)
plt.show()
g = sns.catplot(x="消费用途", y="total_bill",hue="smoker",data=a)
g = sns.catplot(x="time", y="pulse", hue="kind", data=exercise)
g = sns.catplot(x="消费用途", y="生活区域", data=a)
plt.figure(figsize=(5,4))
x=data['消费用途'].value_counts().index
y=data['消费用途'].value_counts()
ax = sns.barplot(x=x,y=y,palette="Blues_d")
plt.show()
mc2=Mycount(data)
mc2.plt_obar(data)
data['消费用途'].value_counts().index
file_path='E:/data analy/客户数据/夏-交付文件/'
raw_data=pd.read_excel(file_path+'大数据.xlsx')
raw_data.shape
# +
mapping_dict={'年级':{1:'大一',
2:'大二',
3:'大三',
4:'大四'},
'生活区域':{
1:'城镇',
2:'农村'},
'消费来源':{
1:'父母',
2:'打工',
3:'奖学金',
4:'其他'},
'消费用途':{
1:'饮食方面',
2:'学习方面',
3:'服装方面',
4:'日化用品',
5:'其他'},
'零食花费':{
1:'50元以下',
2:'50—80元',
3:'80—100元',
4:'100—120元',
5:'120元以上'
},
'娱乐消费':{
1:'50元以下',
2:'50-100元',
3:'100-150元',
4:'150-200元',
5:'200元以上',
},
'影响购物因素':{
1:'质量',
2:'外表美观',
3:'价格',
4:'品牌',
5:'其他'
},
'消费看法':{
1:'消费过高',
2:'消费偏高',
3:'消费适中',
4:'比较节俭'
},
'增加消费一':{
1:'伙食',
2:'服装',
3:'学习费用',
4:'零食',
5:'娱乐',
6:'日用品',
7:'其他'
},
'增加消费二':{
1:'伙食',
2:'服装',
3:'学习费用',
4:'零食',
5:'娱乐',
6:'日用品',
7:'其他'
},
'增加消费三':{
1:'伙食',
2:'服装',
3:'学习费用',
4:'零食',
5:'娱乐',
6:'日用品',
7:'其他'
},
'消费观点':{
1:'以经济实惠为主',
2:'兼顾实惠和商标',
3:'尽量追求高标准',
4:'看心情'
},
'购物方式':{
1:'全网购',
2:'全线下购买',
3:'部分网上,部分线下'
},
'计划消费':{
1:'有计划',
2:'没有计划',
3:'有部分计划'
}
}
data=raw_data.replace(mapping_dict)
data.head()
# -
import numpy as np
import pandas as pd
mc=Mycount()
mc.plt_crossbar(data,'')
mc.nname(data)
# +
fig = plt.figure(figsize=(6, 4))
bplot = plt.boxplot(data[['月消费', '合理月消费']].values,
notch=False,
vert=True, # vertical box aligmnent
patch_artist=True) # fill with color
colors = ['pink', 'lightblue', 'lightgreen']
for patch, color in zip(bplot['boxes'], colors[:2]):
patch.set_facecolor(color)
plt.xticks([y + 1 for y in range(2)], mc.nname(data))
plt.title('Box plot')
plt.show()
# +
fig, axes = plt.subplots(figsize=(12, 5))
all_data = data[['月消费', '合理月消费']].values
axes.violinplot(all_data,
showmeans=False,
showmedians=True
)
axes.set_title('violin plot')
# adding horizontal grid lines
axes.yaxis.grid(True)
axes.set_xticks([y + 1 for y in range(2)], )
axes.set_xlabel('xlabel')
axes.set_ylabel('ylabel')
plt.setp(axes, xticks=[y + 1 for y in range(2)],
xticklabels=['月消费', '合理月消费'],
)
plt.show()
# -
import seaborn as sns
sns.violinplot(data = data[mc.nname(data)])
plt.show()
# +
plt.figure(figsize = [10, 5])
# left plot: violin plot
plt.subplot(1, 2, 1)
ax1 = sns.violinplot(data=data[mc.nname(data)])
# right plot: box plot
plt.subplot(1, 2, 2)
sns.boxplot(data=data[mc.nname(data)])
plt.ylim(ax1.get_ylim()) # set y-axis limits to be same as left plot
plt.show()
# -
data[mc.nname(data)]
col=list('abcde')
data=pd.DataFrame(a,columns=col)
data
data.query('a>0.5')
data.max(axis=1)
data.mean(axis=1)
data.max(axis=0)
| Mycount.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip3 install networkx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import base
from iotfunctions import bif
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
from iotfunctions import estimator
import datetime as dt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
# %matplotlib inline
import pprint as pprint
import json
import networkx as nx
# +
with open('credentials_as_monitor_demo.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db_schema=None
db = Database(credentials=credentials)
print (db)
# -
# load all entity types - similar to the explore tab on the dashboard
metadata = db.http_request(object_type='allEntityTypes', object_name='', request='GET', payload={}, object_name_2='')
pp = pprint.PrettyPrinter(depth = 3)
metadataJson = json.loads(metadata)
pp.pprint(metadataJson)
# get my entity type
et = db.http_request(object_type='entityType', object_name='Z_Robot_Function', request='GET', payload={}, object_name_2='')
# explore my data
etJson = json.loads(et)
pp.pprint(etJson)
engineInput = db.http_request(object_type='engineInput', object_name='z_robot_function', request='GET', payload={}, object_name_2='')
engineInputJson = json.loads(engineInput)
pp._depth = 5
pp.pprint(engineInputJson)
# +
dataItems = engineInputJson['dataItems']
G=nx.DiGraph()
# get rid of unneeded stuff an fill dictionary
for el in dataItems:
if el['type'] not in ('METRIC', 'DERIVED_METRIC'):
continue
G.add_node(el.get('name'))
kpiFctDto = el['kpiFunctionDto']
if kpiFctDto != None and not isinstance(kpiFctDto, str):
kpiInput = kpiFctDto['input']
for kpiInputEl in kpiInput:
attr = kpiInput.get(kpiInputEl)
#print (kpiInputEl, attr)
if isinstance(attr, str):
for el2 in dataItems:
if (el2.get('name') == attr):
G.add_edge(el2.get('name'), el.get('name'))
# remove isolated nodes
G.remove_nodes_from(list(nx.isolates(G)))
#pp.pprint(engineInputJson['dataItems'])
plt.figure(figsize=(7,5))
pos = nx.spring_layout(G)
nx.draw(G, with_labels = True, font_size=14)
#for p in pos: # raise text positions
# pos[p][1] += 0.07
#nx.draw_networkx_labels(G, pos)
plt.show()
# -
| WhatIsInMyPipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dbckz/dissertation/blob/master/notebooks/perspective_emoji_evaluation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="vyIcOpopc5Ni"
import pandas as pd
import ast
import os
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from tqdm import tqdm
from google.colab import drive
import plotly.graph_objects as go
import dateutil
# + colab={"base_uri": "https://localhost:8080/"} id="KMCfQm1Xc80Q" outputId="92fc5f49-f564-4fca-d864-a6e90aab4bbe"
drive.mount('/content/drive')
# + id="n9YlP0YlnCPw"
# Up the pandas display limits so printed dataframes aren't so truncated
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_info_rows', 100)
pd.set_option('display.max_info_columns', 100)
# + id="8MIF_TtWdDbv"
root_path = "/content/drive/MyDrive/University/Dissertation/data_collection"
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="f6_dp-R4eEOY" outputId="dcffc56c-28f7-4a15-9b1c-a1d0c149f72a"
reviewed = pd.read_csv(root_path + "/emoji_tweets_reviewed.csv")
reviewed
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="4E6JXgCjfC_F" outputId="75966c3d-4888-4256-cb0c-d5f808914873"
original = pd.read_csv(root_path + "/emoji_tweets_reviewed.csv")
reviewed
# + id="16fH_stSf_VC"
persp_df = pd.read_csv(root_path + '/regression_tweets.csv',
usecols = [
'tweet_id',
'persp_over_threshold'
])
# + id="2qpy1BgkjXHA"
persp_df = persp_df[persp_df['persp_over_threshold']]
# + colab={"base_uri": "https://localhost:8080/"} id="RVAK4ayNlFGF" outputId="28265eb0-80db-4b88-9cd4-8d685e527cac"
persp_df.merge(reviewed, 'inner', 'tweet_id')['tweet_text']
| notebooks/perspective_emoji_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# **Chapter 15 – Autoencoders**
# + [markdown] deletable=true editable=true
# _This notebook contains all the sample code and solutions to the exercices in chapter 15._
# + [markdown] deletable=true editable=true
# # Setup
# + [markdown] deletable=true editable=true
# First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
# + deletable=true editable=true
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import numpy.random as rnd
import os
import sys
# to make this notebook's output stable across runs
rnd.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "autoencoders"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# + [markdown] deletable=true editable=true
# A couple utility functions to plot grayscale 28x28 image:
# + deletable=true editable=true
def plot_image(image, shape=[28, 28]):
plt.imshow(image.reshape(shape), cmap="Greys", interpolation="nearest")
plt.axis("off")
# + deletable=true editable=true
def plot_multiple_images(images, n_rows, n_cols, pad=2):
images = images - images.min() # make the minimum == 0, so the padding looks white
w,h = images.shape[1:]
image = np.zeros(((w+pad)*n_rows+pad, (h+pad)*n_cols+pad))
for y in range(n_rows):
for x in range(n_cols):
image[(y*(h+pad)+pad):(y*(h+pad)+pad+h),(x*(w+pad)+pad):(x*(w+pad)+pad+w)] = images[y*n_cols+x]
plt.imshow(image, cmap="Greys", interpolation="nearest")
plt.axis("off")
# + [markdown] deletable=true editable=true
# # PCA with a linear Autoencoder
# + [markdown] deletable=true editable=true
# Build 3D dataset:
# + deletable=true editable=true
rnd.seed(4)
m = 100
w1, w2 = 0.1, 0.3
noise = 0.1
angles = rnd.rand(m) * 3 * np.pi / 2 - 0.5
X_train = np.empty((m, 3))
X_train[:, 0] = np.cos(angles) + np.sin(angles)/2 + noise * rnd.randn(m) / 2
X_train[:, 1] = np.sin(angles) * 0.7 + noise * rnd.randn(m) / 2
X_train[:, 2] = X_train[:, 0] * w1 + X_train[:, 1] * w2 + noise * rnd.randn(m)
# + [markdown] deletable=true editable=true
# Normalize the data:
# + deletable=true editable=true
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
# + [markdown] deletable=true editable=true
# Going to need TensorFlow...
# + deletable=true editable=true
import tensorflow as tf
# + [markdown] deletable=true editable=true
# Now let's build the Autoencoder...
# + [markdown] deletable=true editable=true
# Note: instead of using the `fully_connected()` function from the `tensorflow.contrib.layers` module (as in the book), we now use the `dense()` function from the `tf.layers` module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while `tf.layers` is part of the official API. As you will see, the code is mostly the same.
#
# The main differences relevant to this chapter are:
# * the `scope` parameter was renamed to `name`, and the `_fn` suffix was removed in all the parameters that had it (for example the `activation_fn` parameter was renamed to `activation`).
# * the `weights` parameter was renamed to `kernel` and the weights variable is now named `"kernel"` rather than `"weights"`,
# * the bias variable is now named `"bias"` rather than `"biases"`,
# * the default activation is `None` instead of `tf.nn.relu`
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 3
n_hidden = 2 # codings
n_outputs = n_inputs
learning_rate = 0.01
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden)
outputs = tf.layers.dense(hidden, n_outputs)
mse = tf.reduce_mean(tf.square(outputs - X))
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# + deletable=true editable=true
n_iterations = 10000
codings = hidden
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
training_op.run(feed_dict={X: X_train})
codings_val = codings.eval(feed_dict={X: X_train})
# + deletable=true editable=true
fig = plt.figure(figsize=(4,3))
plt.plot(codings_val[:,0], codings_val[:, 1], "b.")
plt.xlabel("$z_1$", fontsize=18)
plt.ylabel("$z_2$", fontsize=18, rotation=0)
save_fig("linear_autoencoder_pca_plot")
plt.show()
# + [markdown] deletable=true editable=true
# # Stacked Autoencoders
# + [markdown] deletable=true editable=true
# Let's use MNIST:
# + deletable=true editable=true
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
# + [markdown] deletable=true editable=true
# ## Train all layers at once
# + [markdown] deletable=true editable=true
# Let's build a stacked Autoencoder with 3 hidden layers and 1 output layer (ie. 2 stacked Autoencoders). We will use ELU activation, He initialization and L2 regularization.
# + [markdown] deletable=true editable=true
# Note: since the `tf.layers.dense()` function is incompatible with `tf.contrib.layers.arg_scope()` (which is used in the book), we now use python's `functools.partial()` function instead. It makes it easy to create a `my_dense_layer()` function that just calls `tf.layers.dense()` with the desired parameters automatically set (unless they are overridden when calling `my_dense_layer()`).
# + deletable=true editable=true
tf.reset_default_graph()
from functools import partial
n_inputs = 28*28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0001
initializer = tf.contrib.layers.variance_scaling_initializer() # He initialization
#Equivalent to:
#initializer = lambda shape, dtype=tf.float32: tf.truncated_normal(shape, 0., stddev=np.sqrt(2/shape[0]))
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
my_dense_layer = partial(
tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=initializer,
kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_reg))
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3 = my_dense_layer(hidden2, n_hidden3)
outputs = my_dense_layer(hidden3, n_outputs, activation=None)
mse = tf.reduce_mean(tf.square(outputs - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([mse] + reg_losses)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + [markdown] deletable=true editable=true
# Now let's train it! Note that we don't feed target values (`y_batch` is not used). This is unsupervised training.
# + deletable=true editable=true
n_epochs = 4
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
mse_train = mse.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", mse_train)
saver.save(sess, "./my_model_all_layers.ckpt")
# + [markdown] deletable=true editable=true
# This function loads the model, evaluates it on the test set (it measures the reconstruction error), then it displays the original image and its reconstruction:
# + deletable=true editable=true
def show_reconstructed_digits(X, outputs, model_path = None, n_test_digits = 2):
with tf.Session() as sess:
if model_path:
saver.restore(sess, model_path)
X_test = mnist.test.images[:n_test_digits]
outputs_val = outputs.eval(feed_dict={X: X_test})
fig = plt.figure(figsize=(8, 3 * n_test_digits))
for digit_index in range(n_test_digits):
plt.subplot(n_test_digits, 2, digit_index * 2 + 1)
plot_image(X_test[digit_index])
plt.subplot(n_test_digits, 2, digit_index * 2 + 2)
plot_image(outputs_val[digit_index])
# + deletable=true editable=true
show_reconstructed_digits(X, outputs, "./my_model_all_layers.ckpt")
save_fig("reconstruction_plot")
# + [markdown] deletable=true editable=true
# ## Training one Autoencoder at a time in multiple graphs
# + [markdown] deletable=true editable=true
# There are many ways to train one Autoencoder at a time. The first approach it to train each Autoencoder using a different graph, then we create the Stacked Autoencoder by simply initializing it with the weights and biases copied from these Autoencoders.
# + [markdown] deletable=true editable=true
# Let's create a function that will train one autoencoder and return the transformed training set (ie. the output of the hidden layer) and the model parameters.
# + deletable=true editable=true
from functools import partial
def train_autoencoder(X_train, n_neurons, n_epochs, batch_size, learning_rate = 0.01, l2_reg = 0.0005, activation=tf.nn.elu):
graph = tf.Graph()
with graph.as_default():
n_inputs = X_train.shape[1]
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
my_dense_layer = partial(
tf.layers.dense,
activation=activation,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_reg))
hidden = my_dense_layer(X, n_neurons, name="hidden")
outputs = my_dense_layer(hidden, n_inputs, activation=None, name="outputs")
mse = tf.reduce_mean(tf.square(outputs - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([mse] + reg_losses)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session(graph=graph) as sess:
init.run()
for epoch in range(n_epochs):
n_batches = len(X_train) // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(len(X_train))[:batch_size]
X_batch = X_train[indices]
sess.run(training_op, feed_dict={X: X_batch})
mse_train = mse.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", mse_train)
params = dict([(var.name, var.eval()) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
hidden_val = hidden.eval(feed_dict={X: X_train})
return hidden_val, params["hidden/kernel:0"], params["hidden/bias:0"], params["outputs/kernel:0"], params["outputs/bias:0"]
# + [markdown] deletable=true editable=true
# Now let's train two Autoencoders. The first one is trained on the training data, and the second is trained on the previous Autoencoder's hidden layer output:
# + deletable=true editable=true
hidden_output, W1, b1, W4, b4 = train_autoencoder(mnist.train.images, n_neurons=300, n_epochs=4, batch_size=150)
_, W2, b2, W3, b3 = train_autoencoder(hidden_output, n_neurons=150, n_epochs=4, batch_size=150)
# + [markdown] deletable=true editable=true
# Finally, we can create a Stacked Autoencoder by simply reusing the weights and biases from the Autoencoders we just trained:
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28*28
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden1 = tf.nn.elu(tf.matmul(X, W1) + b1)
hidden2 = tf.nn.elu(tf.matmul(hidden1, W2) + b2)
hidden3 = tf.nn.elu(tf.matmul(hidden2, W3) + b3)
outputs = tf.matmul(hidden3, W4) + b4
# + deletable=true editable=true
show_reconstructed_digits(X, outputs)
# + [markdown] deletable=true editable=true
# ## Training one Autoencoder at a time in a single graph
# + [markdown] deletable=true editable=true
# Another approach is to use a single graph. To do this, we create the graph for the full Stacked Autoencoder, but then we also add operations to train each Autoencoder independently: phase 1 trains the bottom and top layer (ie. the first Autoencoder) and phase 2 trains the two middle layers (ie. the second Autoencoder).
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0001
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights3_init = initializer([n_hidden2, n_hidden3])
weights4_init = initializer([n_hidden3, n_outputs])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.Variable(weights3_init, dtype=tf.float32, name="weights3")
weights4 = tf.Variable(weights4_init, dtype=tf.float32, name="weights4")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
with tf.name_scope("phase1"):
optimizer = tf.train.AdamOptimizer(learning_rate)
phase1_outputs = tf.matmul(hidden1, weights4) + biases4 # bypass hidden2 and hidden3
phase1_mse = tf.reduce_mean(tf.square(phase1_outputs - X))
phase1_reg_loss = regularizer(weights1) + regularizer(weights4)
phase1_loss = phase1_mse + phase1_reg_loss
phase1_training_op = optimizer.minimize(phase1_loss)
with tf.name_scope("phase2"):
optimizer = tf.train.AdamOptimizer(learning_rate)
phase2_mse = tf.reduce_mean(tf.square(hidden3 - hidden1))
phase2_reg_loss = regularizer(weights2) + regularizer(weights3)
phase2_loss = phase2_mse + phase2_reg_loss
phase2_training_op = optimizer.minimize(phase2_loss, var_list=[weights2, biases2, weights3, biases3]) # freeze hidden1
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + deletable=true editable=true
training_ops = [phase1_training_op, phase2_training_op]
mses = [phase1_mse, phase2_mse]
n_epochs = [4, 4]
batch_sizes = [150, 150]
with tf.Session() as sess:
init.run()
for phase in range(2):
print("Training phase #{}".format(phase + 1))
for epoch in range(n_epochs[phase]):
n_batches = mnist.train.num_examples // batch_sizes[phase]
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_sizes[phase])
sess.run(training_ops[phase], feed_dict={X: X_batch})
mse_train = mses[phase].eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", mse_train)
saver.save(sess, "./my_model_one_at_a_time.ckpt")
mse_test = mses[phase].eval(feed_dict={X: mnist.test.images})
print("Test MSE:", mse_test)
# + deletable=true editable=true
show_reconstructed_digits(X, outputs, "./my_model_one_at_a_time.ckpt")
# + [markdown] deletable=true editable=true
# ## Cache the frozen layer outputs
# + deletable=true editable=true
training_ops = [phase1_training_op, phase2_training_op, training_op]
mses = [phase1_mse, phase2_mse, mse]
n_epochs = [4, 4]
batch_sizes = [150, 150]
with tf.Session() as sess:
init.run()
for phase in range(2):
print("Training phase #{}".format(phase + 1))
if phase == 1:
mnist_hidden1 = hidden1.eval(feed_dict={X: mnist.train.images})
for epoch in range(n_epochs[phase]):
n_batches = mnist.train.num_examples // batch_sizes[phase]
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
if phase == 1:
indices = rnd.permutation(len(mnist_hidden1))
hidden1_batch = mnist_hidden1[indices[:batch_sizes[phase]]]
feed_dict = {hidden1: hidden1_batch}
sess.run(training_ops[phase], feed_dict=feed_dict)
else:
X_batch, y_batch = mnist.train.next_batch(batch_sizes[phase])
feed_dict = {X: X_batch}
sess.run(training_ops[phase], feed_dict=feed_dict)
mse_train = mses[phase].eval(feed_dict=feed_dict)
print("\r{}".format(epoch), "Train MSE:", mse_train)
saver.save(sess, "./my_model_cache_frozen.ckpt")
mse_test = mses[phase].eval(feed_dict={X: mnist.test.images})
print("Test MSE:", mse_test)
# + deletable=true editable=true
show_reconstructed_digits(X, outputs, "./my_model_cache_frozen.ckpt")
# + [markdown] deletable=true editable=true
# ## Tying weights
# + [markdown] deletable=true editable=true
# It is common to tie the weights of the encoder and the decoder (`weights_decoder = tf.transpose(weights_encoder)`). Unfortunately this makes it impossible (or very tricky) to use the `tf.layers.dense()` function, so we need to build the Autoencoder manually:
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0005
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.transpose(weights2, name="weights3") # tied weights
weights4 = tf.transpose(weights1, name="weights4") # tied weights
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
mse = tf.reduce_mean(tf.square(outputs - X))
reg_loss = regularizer(weights1) + regularizer(weights2)
loss = mse + reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + deletable=true editable=true
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
mse_train = mse.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", mse_train)
saver.save(sess, "./my_model_tying_weights.ckpt")
# + deletable=true editable=true
show_reconstructed_digits(X, outputs, "./my_model_tying_weights.ckpt")
# + [markdown] deletable=true editable=true
# # Unsupervised pretraining
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150
n_outputs = 10
learning_rate = 0.01
l2_reg = 0.0005
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.int32, shape=[None])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights3_init = initializer([n_hidden2, n_hidden3])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.Variable(weights3_init, dtype=tf.float32, name="weights3")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
logits = tf.matmul(hidden2, weights3) + biases3
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
reg_loss = regularizer(weights1) + regularizer(weights2) + regularizer(weights3)
loss = cross_entropy + reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
pretrain_saver = tf.train.Saver([weights1, weights2, biases1, biases2])
saver = tf.train.Saver()
# + [markdown] deletable=true editable=true
# Regular training (without pretraining):
# + deletable=true editable=true
n_epochs = 4
batch_size = 150
n_labeled_instances = 20000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = n_labeled_instances // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(n_labeled_instances)[:batch_size]
X_batch, y_batch = mnist.train.images[indices], mnist.train.labels[indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print("\r{}".format(epoch), "Train accuracy:", accuracy_val, end=" ")
saver.save(sess, "./my_model_supervised.ckpt")
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print("Test accuracy:", accuracy_val)
# + [markdown] deletable=true editable=true
# Now reusing the first two layers of the autoencoder we pretrained:
# + deletable=true editable=true
n_epochs = 4
batch_size = 150
n_labeled_instances = 20000
#training_op = optimizer.minimize(loss, var_list=[weights3, biases3]) # Freeze layers 1 and 2 (optional)
with tf.Session() as sess:
init.run()
pretrain_saver.restore(sess, "./my_model_cache_frozen.ckpt")
for epoch in range(n_epochs):
n_batches = n_labeled_instances // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(n_labeled_instances)[:batch_size]
X_batch, y_batch = mnist.train.images[indices], mnist.train.labels[indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print("\r{}".format(epoch), "Train accuracy:", accuracy_val, end="\t")
saver.save(sess, "./my_model_supervised_pretrained.ckpt")
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print("Test accuracy:", accuracy_val)
# + [markdown] deletable=true editable=true
# # Stacked denoising Autoencoder
# -
# Note: the book uses `tf.contrib.layers.dropout()` rather than `tf.layers.dropout()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dropout()`, because anything in the contrib module may change or be deleted without notice. The `tf.layers.dropout()` function is almost identical to the `tf.contrib.layers.dropout()` function, except for a few minor differences. Most importantly:
# * you must specify the dropout rate (`rate`) rather than the keep probability (`keep_prob`), where `rate` is simply equal to `1 - keep_prob`,
# * the `is_training` parameter is renamed to `training`.
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.00001
dropout_rate = 0.3
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
is_training = tf.placeholder_with_default(False, shape=(), name='is_training')
X_drop = tf.layers.dropout(X, dropout_rate, training=is_training)
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.transpose(weights2, name="weights3") # tied weights
weights4 = tf.transpose(weights1, name="weights4") # tied weights
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X_drop, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
optimizer = tf.train.AdamOptimizer(learning_rate)
mse = tf.reduce_mean(tf.square(outputs - X))
reg_loss = regularizer(weights1) + regularizer(weights2)
loss = mse + reg_loss
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + deletable=true editable=true
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, is_training: True})
mse_train = mse.eval(feed_dict={X: X_batch, is_training: False})
print("\r{}".format(epoch), "Train MSE:", mse_train)
saver.save(sess, "./my_model_stacked_denoising.ckpt")
# + deletable=true editable=true
show_reconstructed_digits(X, outputs, "./my_model_stacked_denoising.ckpt")
# + [markdown] deletable=true editable=true
# ## Visualizing the extracted features
# + deletable=true editable=true
with tf.Session() as sess:
saver.restore(sess, "./my_model_stacked_denoising.ckpt")
weights1_val = weights1.eval()
# + deletable=true editable=true
for i in range(5):
plt.subplot(1, 5, i + 1)
plot_image(weights1_val.T[i])
save_fig("extracted_features_plot")
plt.show()
# + [markdown] deletable=true editable=true
# # Sparse Autoencoder
# + deletable=true editable=true
p = 0.1
q = np.linspace(0, 1, 500)
kl_div = p * np.log(p / q) + (1 - p) * np.log((1 - p) / (1 - q))
mse = (p - q)**2
plt.plot([p, p], [0, 0.3], "k:")
plt.text(0.05, 0.32, "Target\nsparsity", fontsize=14)
plt.plot(q, kl_div, "b-", label="KL divergence")
plt.plot(q, mse, "r--", label="MSE")
plt.legend(loc="upper left")
plt.xlabel("Actual sparsity")
plt.ylabel("Cost", rotation=0)
plt.axis([0, 1, 0, 0.95])
save_fig("sparsity_loss_plot")
# + deletable=true editable=true
def kl_divergence(p, q):
"""Kullback Leibler divergence"""
return p * tf.log(p / q) + (1 - p) * tf.log((1 - p) / (1 - q))
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 1000 # sparse codings
n_outputs = n_inputs
learning_rate = 0.01
sparsity_target = 0.1
sparsity_weight = 0.2
#activation = tf.nn.softplus # soft variant of ReLU
activation = tf.nn.sigmoid
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_outputs])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_outputs), name="biases2")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
outputs = tf.matmul(hidden1, weights2) + biases2
optimizer = tf.train.AdamOptimizer(learning_rate)
mse = tf.reduce_mean(tf.square(outputs - X))
hidden1_mean = tf.reduce_mean(hidden1, axis=0) # batch mean
sparsity_loss = tf.reduce_sum(kl_divergence(sparsity_target, hidden1_mean))
loss = mse + sparsity_weight * sparsity_loss
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + deletable=true editable=true
n_epochs = 100
batch_size = 1000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
mse_val, sparsity_loss_val, loss_val = sess.run([mse, sparsity_loss, loss], feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", mse_val, "\tSparsity loss:", sparsity_loss_val, "\tTotal loss:", loss_val)
saver.save(sess, "./my_model_sparse.ckpt")
# + deletable=true editable=true
show_reconstructed_digits(X, outputs, "./my_model_sparse.ckpt")
# + [markdown] deletable=true editable=true
# # Variational Autoencoder
# + deletable=true editable=true
tf.reset_default_graph()
n_inputs = 28*28
n_hidden1 = 500
n_hidden2 = 500
n_hidden3 = 20 # codings
n_hidden4 = n_hidden2
n_hidden5 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.001
activation = tf.nn.elu
initializer = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG",
uniform=True)
X = tf.placeholder(tf.float32, [None, n_inputs])
weights1 = tf.Variable(initializer([n_inputs, n_hidden1]))
weights2 = tf.Variable(initializer([n_hidden1, n_hidden2]))
weights3_mean = tf.Variable(initializer([n_hidden2, n_hidden3]))
weights3_log_sigma = tf.Variable(initializer([n_hidden2, n_hidden3]))
weights4 = tf.Variable(initializer([n_hidden3, n_hidden4]))
weights5 = tf.Variable(initializer([n_hidden4, n_hidden5]))
weights6 = tf.Variable(initializer([n_hidden5, n_inputs]))
biases1 = tf.Variable(tf.zeros([n_hidden1], dtype=tf.float32))
biases2 = tf.Variable(tf.zeros([n_hidden2], dtype=tf.float32))
biases3_mean = tf.Variable(tf.zeros([n_hidden3], dtype=tf.float32))
biases3_log_sigma = tf.Variable(tf.zeros([n_hidden3], dtype=tf.float32))
biases4 = tf.Variable(tf.zeros([n_hidden4], dtype=tf.float32))
biases5 = tf.Variable(tf.zeros([n_hidden5], dtype=tf.float32))
biases6 = tf.Variable(tf.zeros([n_inputs], dtype=tf.float32))
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3_mean = tf.matmul(hidden2, weights3_mean) + biases3_mean
hidden3_log_sigma = tf.matmul(hidden2, weights3_log_sigma) + biases3_log_sigma
noise = tf.random_normal(tf.shape(hidden3_log_sigma), dtype=tf.float32)
hidden3 = hidden3_mean + tf.sqrt(tf.exp(hidden3_log_sigma)) * noise
hidden4 = activation(tf.matmul(hidden3, weights4) + biases4)
hidden5 = activation(tf.matmul(hidden4, weights5) + biases5)
logits = tf.matmul(hidden5, weights6) + biases6
outputs = tf.sigmoid(logits)
reconstruction_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits))
latent_loss = 0.5 * tf.reduce_sum(tf.exp(hidden3_log_sigma) + tf.square(hidden3_mean) - 1 - hidden3_log_sigma)
cost = reconstruction_loss + latent_loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(cost)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + deletable=true editable=true
tf.reset_default_graph()
from functools import partial
n_inputs = 28*28
n_hidden1 = 500
n_hidden2 = 500
n_hidden3 = 20 # codings
n_hidden4 = n_hidden2
n_hidden5 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.001
initializer = tf.contrib.layers.variance_scaling_initializer()
my_dense_layer = partial(
tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=initializer)
X = tf.placeholder(tf.float32, [None, n_inputs])
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3_mean = my_dense_layer(hidden2, n_hidden3, activation=None)
hidden3_gamma = my_dense_layer(hidden2, n_hidden3, activation=None)
noise = tf.random_normal(tf.shape(hidden3_gamma), dtype=tf.float32)
hidden3 = hidden3_mean + tf.exp(0.5 * hidden3_gamma) * noise
hidden4 = my_dense_layer(hidden3, n_hidden4)
hidden5 = my_dense_layer(hidden4, n_hidden5)
logits = my_dense_layer(hidden5, n_outputs, activation=None)
outputs = tf.sigmoid(logits)
reconstruction_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits))
latent_loss = 0.5 * tf.reduce_sum(tf.exp(hidden3_gamma) + tf.square(hidden3_mean) - 1 - hidden3_gamma)
cost = reconstruction_loss + latent_loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(cost)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# + deletable=true editable=true
n_epochs = 50
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
cost_val, reconstruction_loss_val, latent_loss_val = sess.run([cost, reconstruction_loss, latent_loss], feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train cost:", cost_val, "\tReconstruction loss:", reconstruction_loss_val, "\tLatent loss:", latent_loss_val)
saver.save(sess, "./my_model_variational.ckpt")
# + [markdown] deletable=true editable=true
# Encode:
# + deletable=true editable=true
n_digits = 3
X_test, y_test = mnist.test.next_batch(batch_size)
codings = hidden3
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
codings_val = codings.eval(feed_dict={X: X_test})
# + [markdown] deletable=true editable=true
# Decode:
# + deletable=true editable=true
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
outputs_val = outputs.eval(feed_dict={codings: codings_val})
# + [markdown] deletable=true editable=true
# Let's plot the reconstructions:
# + deletable=true editable=true
fig = plt.figure(figsize=(8, 2.5 * n_digits))
for iteration in range(n_digits):
plt.subplot(n_digits, 2, 1 + 2 * iteration)
plot_image(X_test[iteration])
plt.subplot(n_digits, 2, 2 + 2 * iteration)
plot_image(outputs_val[iteration])
# + [markdown] deletable=true editable=true
# ## Generate digits
# + deletable=true editable=true
n_rows = 6
n_cols = 10
n_digits = n_rows * n_cols
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
outputs_val = outputs.eval(feed_dict={codings: codings_rnd})
# + deletable=true editable=true
plot_multiple_images(outputs_val.reshape(-1, 28, 28), n_rows, n_cols)
save_fig("generated_digits_plot")
plt.show()
# + deletable=true editable=true
n_rows = 6
n_cols = 10
n_digits = n_rows * n_cols
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
outputs_val = outputs.eval(feed_dict={codings: codings_rnd})
# + [markdown] deletable=true editable=true
# ## Interpolate digits
# + deletable=true editable=true
n_iterations = 3
n_digits = 6
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
target_codings = np.roll(codings_rnd, -1, axis=0)
for iteration in range(n_iterations + 1):
codings_interpolate = codings_rnd + (target_codings - codings_rnd) * iteration / n_iterations
outputs_val = outputs.eval(feed_dict={codings: codings_interpolate})
plt.figure(figsize=(11, 1.5*n_iterations))
for digit_index in range(n_digits):
plt.subplot(1, n_digits, digit_index + 1)
plot_image(outputs_val[digit_index])
plt.show()
# + [markdown] deletable=true editable=true
# # Exercise solutions
# + [markdown] deletable=true editable=true
# Coming soon...
# + deletable=true editable=true
| 15_autoencoders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
sys.path.append(os.path.dirname('./'))
print(sys.path)
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
from lovasz_loss import LovaszSoftmax
from md.mdpytorch.loss.multi_dice_loss import MultiDiceLoss
from utils import IoU
# +
image_path='./ProstateX-0002/t2w.mhd'
seg_path='./ProstateX-0002/seg.mhd'
itkimage=sitk.ReadImage(image_path)
image=sitk.GetArrayFromImage(itkimage)
itkimage=sitk.ReadImage(seg_path)
seg=sitk.GetArrayFromImage(itkimage)
scale=0.2
image=np.transpose(image,(1,2,0))
seg=np.transpose(seg,(1,2,0))
hei,wid,_=image.shape
image=cv2.resize(image,(int(scale*wid),int(scale*hei)),cv2.INTER_CUBIC)
seg=cv2.resize(seg,(int(scale*wid),int(scale*hei)),cv2.INTER_NEAREST)
image=np.transpose(image,(2,0,1))
seg=np.transpose(seg,(2,0,1))
print(seg.shape)
# -
image=np.asarray(image,dtype=np.float32)
image=(image-np.mean(image))/np.std(image)
image=np.expand_dims(image,0)
seg=np.asarray(seg,np.int64)
data=torch.from_numpy(image)
target=torch.from_numpy(seg)
data=data.unsqueeze(0).cuda()
target=target.unsqueeze(0).cuda()
# +
tmp=data.squeeze(1)
tmp=tmp.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8,normalize=True)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp)
plt.show()
tmp=target.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
# -
class Net(nn.Module):
def __init__(self,in_channels,out_channels):
super(Net, self).__init__()
self.Conv1=nn.Sequential(
nn.Conv3d(in_channels,32,3,padding=1),
nn.BatchNorm3d(32),
nn.ReLU()
)
self.Conv2=nn.Sequential(
nn.Conv3d(32,64,3,padding=1),
nn.BatchNorm3d(64),
nn.ReLU()
)
self.Conv3=nn.Sequential(
nn.Conv3d(64,out_channels,3,padding=1),
nn.Softmax(dim=1)
)
def forward(self,input):
out=self.Conv1(input)
out=self.Conv2(out)
out=self.Conv3(out)
return out
iters=100
model=Net(1,2).cuda()
optimizer=optim.Adam(model.parameters(),lr=0.001,betas=(0.9,0.99))
lovasz_loss=LovaszSoftmax()
ious=[]
losses=[]
for i in range(iters):
optimizer.zero_grad()
out1=model(data)
loss=lovasz_loss(out1,target)
loss.backward()
optimizer.step()
_,pred1=out1.max(1)
iou=IoU(pred1.cpu().numpy(),target.cpu().numpy())
ious.append(iou)
losses.append(loss.item())
plt.plot(ious)
plt.show()
print(np.min(losses))
# +
_,pred1=out1.max(1)
tmp=pred1.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
diff=(pred1-target).abs()
tmp=diff.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
print('IoU: {}'.format(IoU(pred1.cpu().numpy(),target.cpu().numpy())))
# -
iters=100
model=Net(1,2).cuda()
optimizer=optim.Adam(model.parameters(),lr=0.001,betas=(0.9,0.99))
dice_loss=MultiDiceLoss(weights=[0.5,0.5],num_class=2)
lovasz_loss=LovaszSoftmax()
losses=[]
ious=[]
for i in range(iters):
optimizer.zero_grad()
out2=model(data)
loss,_=dice_loss(out2,target)
loss.backward()
optimizer.step()
_,pred2=out2.max(1)
iou=IoU(pred2.cpu().numpy(),target.cpu().numpy())
ious.append(iou)
losses.append(loss.item())
plt.plot(ious)
plt.show()
print(np.min(losses))
# +
_,pred2=out2.max(1)
tmp=pred2.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
diff=(pred2-target).abs()
tmp=diff.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
print('IoU: {}'.format(IoU(pred2.cpu().numpy(),target.cpu().numpy())))
# -
iters=100
model=Net(1,2).cuda()
optimizer=optim.Adam(model.parameters(),lr=0.001,betas=(0.9,0.99))
dice_loss=MultiDiceLoss(weights=[0.5,0.5],num_class=2)
lovasz_loss=LovaszSoftmax()
losses=[]
ious=[]
for i in range(iters):
optimizer.zero_grad()
out2=model(data)
loss,_=dice_loss(out2,target)
loss2=lovasz_loss(out2,target)
loss=0.9*loss.cuda()+0.1*loss2
loss.backward()
optimizer.step()
_,pred2=out2.max(1)
iou=IoU(pred2.cpu().numpy(),target.cpu().numpy())
ious.append(iou)
losses.append(loss.item())
plt.plot(ious)
plt.show()
print(np.min(losses))
# +
_,pred2=out2.max(1)
tmp=pred2.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
diff=(pred2-target).abs()
tmp=diff.permute(1,0,2,3)
grid=make_grid(tmp,nrow=8)
grid_tmp=grid.cpu().numpy()
grid_tmp=np.transpose(grid_tmp,(1,2,0))
plt.imshow(grid_tmp*255,cmap='gray')
plt.show()
print('IoU: {}'.format(IoU(pred2.cpu().numpy(),target.cpu().numpy())))
# -
os.path.isdi
| LovaszSoftmax/lovasz_loss_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Tce3stUlHN0L"
# ##### Copyright 2020 The TensorFlow IO Authors.
# + cellView="form" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="qFdPvlXBOdUN"
# # 音频数据准备和增强
# + [markdown] id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td><a target="_blank" href="https://tensorflow.google.cn/io/tutorials/audio"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/io/tutorials/audio.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行</a></td>
# <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/io/tutorials/audio.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 上查看源代码</a></td>
# <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/io/tutorials/audio.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td>
# </table>
# + [markdown] id="xHxb-dlhMIzW"
# ## 概述
#
# 自动语音识别面临的最大挑战之一是音频数据的准备和增强。音频数据分析可能涉及时域或频域,与图像等其他数据源相比,这提高了复杂性。
#
# 作为 TensorFlow 生态系统的一部分,`tensorflow-io` 软件包提供了不少与音频相关的 API。这些 API 非常有用,可简化音频数据的准备和增强。
# + [markdown] id="MUXex9ctTuDB"
# ## 设置
# + [markdown] id="upgCc3gXybsA"
# ### 安装要求的软件包,然后重新启动运行时
# + id="uUDYyMZRfkX4" colab={"base_uri": "https://localhost:8080/"} outputId="a207e94b-de5c-425a-c9b1-e6fc511cace9"
# !pip install tensorflow-io
# + [markdown] id="J0ZKhA6s0Pjp"
# ## 使用方法
# + [markdown] id="yZmI7l_GykcW"
# ### 读取音频文件
#
# 在 TensorFlow IO 中,利用类 `tfio.audio.AudioIOTensor` 可以将音频文件读取到延迟加载的 `IOTensor` 中:
# + id="nS3eTBvjt-O5" colab={"base_uri": "https://localhost:8080/"} outputId="b629c816-a34e-483c-b09f-be520bdbde45"
import tensorflow as tf
import tensorflow_io as tfio
audio = tfio.audio.AudioIOTensor('gs://cloud-samples-tests/speech/brooklyn.flac')
print(audio)
# + [markdown] id="z9GCyPWNuOm7"
# 在上面的示例中,Flac 文件 `brooklyn.flac` 来自 [Google Cloud](https://cloud.google.com/speech-to-text/docs/quickstart-gcloud) 中可公开访问的音频片段。
#
# 示例中直接使用 GCS 地址 `gs://cloud-samples-tests/speech/brooklyn.flac`,因为 TensorFlow 支持 GCS 文件系统。除了 `Flac` 格式,凭借自动文件格式检测,`AudioIOTensor` 还支持 `WAV`、`Ogg`、`MP3` 和 `MP4A` 格式。
#
# `AudioIOTensor` 是一个延迟加载张量,因此,刚开始只显示形状、dtype 和采样率。`AudioIOTensor` 的形状用 `[samples, channels]` 表示,这表示您加载的音频片段是单声道音频(`int16` 类型的 `28979` 个样本)。
# + [markdown] id="IF_kYz_o2DH4"
# 仅需要时才会读取该音频片段的内容。要读取音频片段的内容,可通过 `to_tensor()` 将 `AudioIOTensor` 转换为 `Tensor`,也可以通过切片读取。如果只需要一个大音频片段的一小部分,切片尤其实用:
# + id="wtM_ixN724xb" colab={"base_uri": "https://localhost:8080/"} outputId="e0f01caa-0240-4a38-9095-70c55dd64a7d"
audio_slice = audio[100:]
# remove last dimension
audio_tensor = tf.squeeze(audio_slice, axis=[-1])
print(audio_tensor)
# + [markdown] id="IGnbXuVnSo8T"
# 音频可通过以下方式播放:
# + id="0rLbVxuFSvVO" colab={"base_uri": "https://localhost:8080/", "height": 75} outputId="63a682b0-539c-445d-c23e-2991a459f0bc"
from IPython.display import Audio
Audio(audio_tensor.numpy(), rate=audio.rate.numpy())
# + [markdown] id="fmt4cn304IbG"
# 更方便的方式是,将张量转换为浮点数并在计算图中显示音频片段:
# + id="ZpwajOeR4UMU" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="476e61a2-8cad-4640-84da-b688c47ca7a6"
import matplotlib.pyplot as plt
tensor = tf.cast(audio_tensor, tf.float32) / 32768.0
plt.figure()
plt.plot(tensor.numpy())
# + [markdown] id="86qE8BPl5rcA"
# ### 降噪
#
# 为音频降噪有时很有意义,这可以通过 API `tfio.audio.trim` 实现。从该 API 返回的是片段的一对 `[start, stop]` 位置:
# + id="eEa0Z5U26Ep3" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="bf188ab6-896d-4775-eb90-d6bd48448965"
import tensorflow_io as tfio
position = tfio.audio.trim(tensor, axis=0, epsilon=0.1)
print(position)
start = position[0]
stop = position[1]
print(start, stop)
processed = tensor[start:stop]
plt.figure()
plt.plot(processed.numpy())
# + [markdown] id="ineBzDeu-lTh"
# ### 淡入和淡出
#
# 一种有用的音频工程技术是淡入淡出,也就是逐渐增强或减弱音频信号。这可以通过 `tfio.audio.fade` 实现。`tfio.audio.fade` 支持不同的淡入淡出形状,如 `linear`、`logarithmic` 或 `exponential`:
# + id="LfZo0XaaAaeM" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="67ef324d-1c7e-444c-d4ee-015504e94aaa"
fade = tfio.audio.fade(
processed, fade_in=1000, fade_out=2000, mode="logarithmic")
plt.figure()
plt.plot(fade.numpy())
# + [markdown] id="7rhLvOSZB0k0"
# ### 声谱图
#
# 高级音频处理通常需要根据时间调整音频频率。在 `tensorflow-io` 中,可通过 `tfio.audio.spectrogram` 将波形图转换为声谱图。
# + id="UyFMBK-LDDnN" colab={"base_uri": "https://localhost:8080/", "height": 176} outputId="bf61e9d9-8294-4e54-871f-5adccdc43cf9"
# Convert to spectrogram
spectrogram = tfio.audio.spectrogram(
fade, nfft=512, window=512, stride=256)
plt.figure()
plt.imshow(tf.math.log(spectrogram).numpy())
# + [markdown] id="pZ92HnbJGHBS"
# 也可以转换为其他不同的比例:
# + id="ZgyedQdxGM2y" colab={"base_uri": "https://localhost:8080/", "height": 536} outputId="cc59e5cd-66dc-42f7-a621-db32f72248c8"
# Convert to mel-spectrogram
mel_spectrogram = tfio.audio.melscale(
spectrogram, rate=16000, mels=128, fmin=0, fmax=8000)
plt.figure()
plt.imshow(tf.math.log(mel_spectrogram).numpy())
# Convert to db scale mel-spectrogram
dbscale_mel_spectrogram = tfio.audio.dbscale(
mel_spectrogram, top_db=80)
plt.figure()
plt.imshow(dbscale_mel_spectrogram.numpy())
# + [markdown] id="nXd776xNIr_I"
# ### SpecAugment
#
# 除上述数据准备和增强 API 外,`tensorflow-io` 软件包还提供了高级声谱图增强,最主要的是在 [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition (Park et al., 2019)](https://arxiv.org/pdf/1904.08779.pdf) 中讨论的频率掩蔽和时间掩蔽。
# + [markdown] id="dajm7k-2J5l7"
# #### 频率掩蔽
#
# 在频率掩蔽中,对频率通道 `[f0, f0 + f)` 进行掩蔽,其中 `f` 选自从 `0` 到频率掩蔽参数 `F` 的均匀分布,而 `f0` 则选自 `(0, ν − f)`,其中 `ν` 是频率通道的数量。
# + id="kLEdfkkoK27A" colab={"base_uri": "https://localhost:8080/", "height": 285} outputId="8f783480-0f9a-4a96-fc93-65527a3c56f7"
# Freq masking
freq_mask = tfio.audio.freq_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(freq_mask.numpy())
# + [markdown] id="_luycpCWLe5l"
# #### 时间掩蔽
#
# 在时间掩蔽中,对 `t` 个连续时间步骤 `[t0, t0 + t)` 进行掩蔽,其中 `t` 选自从 `0` 到时间掩蔽参数 `T` 的均匀分布,而 `t0` 则选自 `[0, τ − t)`,其中 `τ` 是时间步数。
# + id="G1ie8J3wMMEI" colab={"base_uri": "https://localhost:8080/", "height": 285} outputId="a95d96a1-967c-44b8-bb0c-c494880f9169"
# Time masking
time_mask = tfio.audio.time_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(time_mask.numpy())
| site/zh-cn/io/tutorials/audio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["tag_a"] active=""
# Has tag_a
# + tags=["tag_b", "tag_c"] active=""
# Has tag_b and tag_c
# + tags=["tag_d"] active=""
# Has tag_d
# + active=""
# Has no tag
| jubox/test/test_files/nb_with_tags.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 1 - MNIST CLASSIFIER
# MNIST is a standard dataset used for classification task. This dataset consists of lots of images of handwritten digits. The task is to classify each image according to the number they represent
# ## Importing Dependencies
#import dependancies
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from matplotlib import pyplot as plt
# ## Setting the hyperparameters
# In the cell below, we shall set the different parameters of the model. These are user defined and you are encouraged to experiment with them
#config cell
num_classes = 10 #number of classes(0 to 9)
input_shape = (28, 28, 1) #shape of an image
batch_size = 128 #How many images to train in one iteration
epochs = 15 #Number of times the model gets trained on the entire dataset
# ## Data Loading, Visualization and Preprocessing
# +
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
#visualize the 79th image of the dataset. You can change this value to get an idea about different images
plt.imshow(x_train[79],cmap='gray')
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# -
# ## Model Architecture
# +
#Build the model
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
# -
# ## Training the model
#Compile the model with a loss function, optimizer and an evaluation metric
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
#Train the model, with 10% of the data being the validation set
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
# ## Testing
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
#view an image and the predicted label. Encouraged to experiment
test_img = x_test[129]
correct_label = y_test[129]
predicted_label = model.predict(np.expand_dims(test_img, 0))
plt.imshow(test_img[:,:,0],cmap='gray')
print("Predicted Label: ",np.argmax(predicted_label))
print("Correct Label: ",np.argmax(correct_label))
| DeepLearningExercises/Exercise_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import infostop
import pandas as pd
MY_PATH_TO_INPUT_DATA = '....'
MY_PATH_TO_OUPUT_DATA = '....'
# +
#Read data (a dataframe containing latitude, longitude and timestamp records)
df = pd.read_csv(MY_PATH_TO_INPUT_DATA)
df = df[['latitude','longitude','time']]
#Sort by time
df = df.sort_values(by = 'time').reset_index(drop = True)
#Find stop locations
model_infostop = infostop.Infostop(r1 = 30,
r2 = 30,
label_singleton=False,
min_staying_time = 600,
max_time_between = 86400,
min_size = 2)
labels = model_infostop.fit_predict(df[['latitude','longitude','time']].values)
trajectory = infostop.postprocess.compute_intervals(df[['latitude','longitude','time']].values,
labels)
#Write to file
pd.DataFrame(trajectory, columns = ['label','start','end','latitude','longitude']).to_csv(MY_PATH_TO_OUPUT_DATA, index = False)
| 1_Pre_process_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [Previous Page]() [Next Page]()
# ## Exercise: Some More Algorithms
# This notebook shows another class comparison between cuML and Scikit-learn: The `LogisticRegression`. The basic form of logistic regression is used to model the probability of a certain class or event happening based on a set of variables.
#
# We also use this as an example of how cuML can adapt to other GPU centric workflows, this time based on CuPy, a GPU centric NumPy like library for array manipulation: [CuPy](https://cupy.chainer.org)
#
# Thanks to the [CUDA Array Interface](https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html) cuML is compatible with multiple GPU memory libraries that conform to the spec, and tehrefore can use objects from libraries such as CuPy or Pytorch without additional memory copies!
#
#
# ## Here is the list of exercises and modules in the lab:
# <a href='#lr'>Logistic Regression</a><br>
# - <a href='#ex1'>Exercise 1</a><br>
# - <a href='#ex2'>Exercise 2</a><br>
#
# <a id='lr'></a>
# Lets begin by importing our needed libraries:
# +
import pandas as pd
# Lets use cupy in a similar fashion to how we use numpy
import cupy as cp
from sklearn import metrics, datasets
from sklearn.linear_model import LogisticRegression as skLogistic
from sklearn.preprocessing import binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
# -
# Once again, lets use Scikit-learn to create a dataset to use:
a = datasets.make_classification(10000, n_features=2, n_informative=2, n_redundant=0,
n_clusters_per_class=1, class_sep=0.5, random_state=1485)
# Now lets create our `X` and `y` arrays in CuPy:
X = cp.array(a[0], order='F') # the API of CuPy is almost identical to NumPy
y = cp.array(a[1], order='F')
# Lets see how the dataset works:
plt.scatter(cp.asnumpy(X[:,0]), cp.asnumpy(X[:,1]), c=[cm_bright.colors[i] for i in cp.asnumpy(y)],
alpha=0.1);
# Now lets divide our dataset into training and testing datasets in a simple manner:
# Split the data into a training and test set using NumPy like syntax
X_train = X[:8000, :].copy(order='F')
X_test = X[-2000:, :].copy(order='F')
y_train = y[:8000]
y_test = y[8000:10000]
# Note that the resulting objects are still CuPy arrays in GPU:
X_train.__class__
# ## Exercise: Fit the cuML and Scikit-learn `LogisticRegression` objects and compare them when they use as similar parameters as possible
#
# * Hint 1: the **default values** of parameters in cuML are **the same** as the default values for Scikit-learn most of the time, so we recommend to leave all parameters except for `solver` as the default
#
#
# * Hint 2: Remember the **solver can differ significantly between the libraries**, so look into the solvers offered by both libraries to make them match
#
#
# * Hint 3: Even though Scikit-learn expects Numpy objects, it **cannot** accept CuPy objects for many of its methods since it expects the memory to be on CPU (host), not on GPU (device)
#
# For convenience, the notebook offers a few cells to organize your work.
# <a id='ex1'></a>
#
# ### 1. Fit Scikit-learn LogisticRegression and show its accuracy
# <details><summary><b>Solution for Scikit-learn</b></summary>
# <pre>
#
# clf = skLogistic()
# clf.fit(X_train.get(), y_train.get())
# clf.score(X_test.get(), y_test.get())
# </pre>
# </details>
#
#
from sklearn.metrics import accuracy_score
# <a id='ex2'></a>
#
# ### 2. Fit cuML Regression and show its accuracy
#
# * Hint 1: Look at the data types expected by cuML methods: https://rapidsai.github.io/projects/cuml/en/stable/api.html#cuml.LogisticRegression.fit
# one or more of the input vectors might not be of the expected data type! You may need to typecast.
#
#
#
# * Hint 2: as mentioned above, cuML has native support for CuPy objects
# <details><summary><b>Solution for CuML</b></summary>
# <pre>
#
# reg = LogisticRegression()
# reg.fit(X_train,y_train)
#
# print("Coefficients:")
# print(reg.coef_)
# print("Intercept:")
# print(reg.intercept_)
#
# preds = reg.predict(X_test)
#
# print(preds)
#
# print('Scikit-learn accuracy: ' + str(reg.score(X_test, y_test)))
# </pre>
# </details>
from cuml import LogisticRegression
import numpy as np
# useful methods: cupy_array.astype(np_dtype) converts an array from one datatype to np_datatype, where np_datatype can be something like np.float32, np.float64, etc.
# useful methods: cudf_seris.to_array() converts a cuDF Series to a numpy array
# useful methods: cp.asnumpy(cupy_array) converts cupy to numpy
# **Expected accuracies for apples to apples comparison: 0.8025 vs 0.8695**
# Additional Exercise: Play with the different parameters, particularly the different Scikit-learn solvers to see how they differ in behavior even in the same library!
# ## Licensing
#
# This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).
# [[1]]()
# [[2]]()
# [[3]]()
# [[4]]()
# [[5]]()
| ai/RAPIDS/English/Python/jupyter_notebook/CuML/Bonus_Lab-LogisticRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 007. Investigating a major flooding event: May-June 2013 - Danube in Lower Austria
# To get a feel how different variables correlate with each other for floodings, we look at one example for the Danube region.
#
# We specify the point of interest near the city Krems, which is located directly next to the Danube in Lower Austria and was heavily affected by the major flooding event.
# enable parent dir package loading
import link_src
# +
# %matplotlib inline
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from dask.distributed import Client
client = Client(processes=True)
import dask
#dask.config.set(scheduler='processes')
from dask.diagnostics import ProgressBar
# -
from python.aux.ml_flood_config import path_to_data
from python.aux.utils import open_data
from python.aux.plot import Map
from python.aux.utils import calc_stat_moments
from python.aux.utils import spatial_cov
# load dask client
client
# define some vars
data_path = f'{path_to_data}danube/'
print(data_path)
# load data
era5 = open_data(data_path, kw='era5')
glofas = open_data(data_path, kw='glofas_ra')
# First of all, we need to specify the lat and lon values for Krems.
lat = 48.403
lon = 15.615
krems = dict(latitude=lat, longitude=lon)
# Let us take a look how the river discharge in the GLOFAS Reanalysis developed over time.
start_date = '2013-05-20'
end_date = '2013-07-20'
dis_krems = glofas.sel(time=slice(start_date, end_date)).interp(krems).drop(['latitude', 'longitude'])['dis']
long_term_mean_krems = glofas.interp(krems).drop(['latitude', 'longitude'])['dis'].mean().values
print(dis_krems)
dis_krems.plot(label='discharge')
plt.gca().axhline(long_term_mean_krems, ls='--', color='r', label='long-term average')
plt.legend()
plt.title(f'GLOFAS river discharge at {krems}')
# We can see that the event hit Krems at the beginning of June, but the higher levels of discharge lasted for a very long term, as is usual for major flooding events (after a flash flooding event the discharge level would go back to more average levels much quicker.
#
# What is of big interest, is of course the beginning of the event, i.e. the sudden increase (almost trippling) of the river discharge.
#
# Going forward, we want to find to find and distinguish between two factors:
# - i. changes in discharge happening locally, where we suppose that mainly precipitation and runoff are the causing factors
# - ii. changes in discharge happening from mass influx on some location far away (far away corresponds - in our terms - to the approximate distance the mean flow transports a mass particle during one day: this, of course, varies heavily on the water level, and the location; we assume for the upstream area of Krems ~ 2.2m/s => which translates to about 190 km a day)
#
# This cascades into using a box about 35 grid points across latitude and longitude each to capture all potential 'local' effects.
#
# Hereafter, we distinguish between the two tasks and call the first one 'local model' and the second one 'transport model' (transport as in: effects changing the discharge level happen outside the 'local' region and thus, are 'transported' into the area of interest.).
# Now, lets have a look at at spatial correlations for the period in which the increase happened.
start_date_rise = '2013-05-31'
end_date_rise = '2013-06-06'
dis_krems_rise = glofas.sel(time=slice(start_date_rise, end_date_rise)).interp(krems).drop(['latitude', 'longitude'])['dis']
dis_krems_rise.plot(label='absolute discharge')
plt.legend()
plt.gca().twinx()
dis_krems_rise.diff(dim='time').plot(color='r', label='change per day')
plt.ylabel('change in discharge per day')
plt.title(f'absolute and cahnge per day discharge for {krems}')
plt.legend()
# Spatial covariances for some variables with local discharge.
m = Map(figure_kws=dict(figsize=(15,10)))
from python.aux.utils import spatial_cov_2var
dis_krems_rise_diff = dis_krems_rise.diff(dim='time')
cp_rise = era5['cp'].sel(time=slice(start_date_rise, end_date_rise))[1:]
spcov_2 = spatial_cov_2var(dis_krems_rise_diff, cp_rise)
m.plot(spcov_2)
m.plot_point(plt.gca(), lat, lon)
plt.title(f'spatial covariance between discharge at {krems} and convective precip')
lsp_rise = era5['lsp'].sel(time=slice(start_date_rise, end_date_rise))[1:]
spcov_2 = spatial_cov_2var(dis_krems_rise_diff, lsp_rise)
m.plot(spcov_2)
m.plot_point(plt.gca(), lat, lon)
plt.title(f'spatial covariance between discharge at {krems} and large-scale precip')
ro_rise = era5['ro'].sel(time=slice(start_date_rise, end_date_rise))[1:]
spcov_2 = spatial_cov_2var(dis_krems_rise_diff, ro_rise)
m.plot(spcov_2)
m.plot_point(plt.gca(), lat, lon)
plt.title(f'spatial covariance between discharge at {krems} and runoff')
# We can see, that especially the large-scale precipitation features high correlation with the discharge at Krems. The correlation far away is of course unrealistic from a physical point of view, but it indicates that maybe a synoptic event was responsible for the increase (which was indeed the case).
#
# This also highlights the significance to incorporate the whole catchment basin into the prediction for a downstream location. Using all values inside the catchment as features in ML models would technically be possible, but unfeasible reagarding computational ressources.
# At the next step, we want to look at the 'local' vicinity around the location of interest.
#
# Note that 1.5 degrees is approximately the already mentioned range incorporated in the 'local model', due to the rivers not flowing in a straight fashion.
local_region = dict(latitude=slice(krems['latitude']+1.5,
krems['latitude']-1.5),
longitude=slice(krems['longitude']-1.5,
krems['longitude']+1.5))
start_date = '2013-05-20'
end_date = '2013-07-20'
xds = era5.sel(local_region).sel(time=slice(start_date, end_date))
yda = dis_krems.copy()
print(xds)
print(yda)
yda_change = yda.diff(dim='time')
yda_change.name = 'dis_change'
print(yda_change)
sm_era5 = calc_stat_moments(xds, dim_aggregator='spatial')
print(sm_era5)
# Plot coefficient of variation for the grid box around krems and compare the time development to absolute and changes in discharge values.
# +
fig, ax = plt.subplots(figsize=(12,7))
for var in sm_era5.variables:
if not var in ['time', 'level', 'stat_moments']:
da_iter = sm_era5[var].sel(stat_moments='vc')
try:
da_iter.mean(dim='level').plot(ax=ax, label=var)
except:
da_iter.plot(ax=ax, label=var)
plt.yscale('log')
plt.grid()
ax2 = ax.twinx()
yda.plot(ax=ax2, ls='--', color='k', label='glofas-discharge')
yda_change.plot(ax=ax2, ls='--', color='grey', label='glofas-discharge change')
fig.legend()
# +
fig, ax = plt.subplots(figsize=(12,7))
for var in sm_era5.variables:
if not var in ['time', 'level', 'stat_moments']:
da_iter = sm_era5[var].sel(stat_moments='mean')/sm_era5[var].sel(stat_moments='std')
try:
da_iter.mean(dim='level').plot(ax=ax, label=var)
except:
da_iter.plot(ax=ax, label=var)
plt.yscale('log')
plt.grid()
ax2 = ax.twinx()
yda.plot(ax=ax2, ls='--', color='k', label='glofas-discharge')
yda_change.plot(ax=ax2, ls='--', color='grey', label='glofas-discharge change')
plt.title('mean/std')
fig.legend()
# -
| notebooks/1_data_download_analysis_visualization/1.07_investigate_a_major_flooding_event.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Telling Stories With Data
#
#
# **Version 0.1**
#
# ***
# By <NAME>
# 8 Jan 2021
# Find a dataset of your choice. This can be one you have visualized before, one you found online, or the sample data provided. Information is provided below for the sample data, but **you are encouraged to use your own data**. You are not required to complete the assignment in this Jupyter notebook, but can use any tool you are comfortable with.
#
# Use the lessons from the lecture to complete the assignment below.
# ## Sample Dataset (skip if using your own data)
#
# Download the [`hipgalv.LSR.csv`](https://northwestern.box.com/s/ykmt0tf4hdjbx61o6f83kv5qsyex91yb) file which includes the Hipparcos star catalog, processed and described below by Stuart Levy. The star positions/velocities are given in Galactic coordinates relative to the Sun.
#
# What the fields mean:
#
# * x, y, z -- star position, in parsecs. Sun = 0,0,0. z is approximately the altitude above/below the Galactic plane
# * colorb_v and colorv_i -- two measures of color, "B-V" (blue vs yellow) and "V-I" (yellow vs near-infrared). More positive => more red. A white star has values near zero, a yellow sunlike star around 0.5, a very red star 1.5 or more.
# * Mv -- estimated intrinsic luminosity in magnitude (logarithmic) units. (Note that Mv is different from lower-case mv.)
# * vx, vy, vz -- velocity. vz is the vertical (above/below Galactic plane) velocity.
# * speed -- magnitude of |vx, vy, vz| vector
#
# Things that might be neat to study:
# * Is the altitude above/below the plane related to the star's color?
# * What's the distribution of color vs Mv intrinsic luminosity?
# * A fairly-dim star might have Mv of +3 or +5 or so (the sun is about +5); a fairly bright star might have Mv around 0; a superluminous one, Mv ~ -5.
# * Do the B-V and V-I colors change their distribution for nearby stars vs more-distant ones? (more positive => more red) (They might adopt "nearby" as closer than 200 parsecs, say.)
# * Is the distribution of velocities in the xy plane different for nearby stars than for more-distant stars?
# * Are there noticeable clumps in the distribution of velocities in the xy plane? (Can you detect star clusters from their space velocities?)
# * Stars' orbits send them oscillating above and below the Galactic plane. We're catching them at arbitrary places in their orbits, but can use a combination of z and vz to estimate how high/low they'll go. This isn't an accurate formula, but each star's peak altitude might be something like 1.4*(vz^2) + abs(z), so that the Sun (with vz of about 7 km/sec) will have a peak altitude of about 80 parsecs.
# * Given that, is there a relation between star color and that derived peak altitude? (I'd expect that there'll be lower peak altitudes for bluer stars, higher ones for redder stars.)
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
# %matplotlib notebook
# -
# Here is a code sample showing how to read the data and draw a colored plot.
# +
# Load the csv with pandas
df = pd.read_csv('hipgalv.LSR.csv', index_col=0)
#print(df)
# Use matplotlib's default "Reds" colormap. More colormaps and information here:
# https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
cmap = cm.get_cmap('Reds')
# Make the plot
ax = df.plot.scatter(x='z',
y='Mv',
c='colorb_v',
cmap=cmap,
title="Sample Visualization")
# Set the x-axis label
ax.set_xlabel("Altitude")
# Set the y-axis label
ax.set_ylabel("Intrinsic Luminosity")
# Set the colormap label
f = plt.gcf()
cax = f.get_axes()[1]
cax.set_ylabel('B-V Color')
# -
# ## Problem 1) Tell different stories to different audiences
#
# Create at least two different visualizations from your dataset to answer the following prompts. Vary the labels/captions/title if you use the same visualization for a different prompt.
#
# Note: You do not need to complete this entire problem in this Jupyter notebook. If you would like to add annotations, make a flow chart, show an overview + detail, or anything more complex than a basic chart, feel free to start your visualization here (or elsewhere), and complete it in PowerPoint, PhotoShop, Paint, GIMP, or whatever tool you are comfortable working with.
#
# **Prompt #1: Tell a story to your peers in astronomy.**
# **Prompt #2: Tell a story to a fifth grader.**
# **Prompt #3: Tell a story using the ten hundred most common words in the English language** (use this tool: https://xkcd.com/simplewriter/ )
#
# Note: This should only require a change in title/caption/labels, not in the visualization itself.
# **Prompt #4: Tell a story to a government policymaker who is considering cutting funding for your field.**
# ## Problem 2) Miscommunication
# Choose one of the following two assignments to complete. If you are feeling ambitious, complete both as an optional **Challenge Problem**.
# **Prompt Option A: Tell a false, but believable, story with your data**
# **Prompt Option B: Review the visualizations you created in Problem 1. How could they be misinterpreted?** Use the space below to write your answer in "markdown" mode.
#
# ## Problem 3) Audience Testing
# Show one or two of your visualizations to your friends, peers, family members, or other groups of your choice. Did they understand your story? Did they care? How would you change your visualization(s) based on this feedback? Use the space below to write your answer in "markdown" mode.
# ## Challenge Problem (Optional)
#
# Using what you learned in Problem 2 and Problem 3, revise your visualization(s).
| Sessions/Session12/Day2/TellingStoriesWithData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 사전 작업
# ## 모듈 로드
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import seaborn as sns
import gc
import lightgbm as lgb
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve
pd.set_option('display.max_columns', 400)
# # 데이터 로드
train = pd.read_csv('./data/train.csv')
test = pd.read_csv('./data/test.csv')
train.shape, test.shape
train, test_cv = train_test_split(train, test_size=0.1)
train.shape, test.shape
features = [c for c in train.columns if c not in ['ID_code', 'target']]
target = train['target']
# # LGB
param = {
'bagging_freq': 5,
'bagging_fraction': 0.335,
'boost_from_average': False,
'boost': 'gbdt',
'feature_fraction_seed': 47,
'feature_fraction': 0.041,
'learning_rate': 0.01,
'max_depth': -1,
'metric':'auc',
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 13,
'num_threads': 8,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1,
'num_threads': 8
}
# +
# %%time
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
oof_lgb = np.zeros(len(train))
predictions_lgb = np.zeros(len(test))
feature_importance = pd.DataFrame()
train_columns = [c for c in train.columns if c not in ['ID_code', 'target']]
for fold_, (trn_idx, val_idx) in enumerate(folds.split(train, target.values)):
print("fold n°{}".format(fold_))
trn_data = lgb.Dataset(train.iloc[trn_idx][train_columns], label=target.iloc[trn_idx])
val_data = lgb.Dataset(train.iloc[val_idx][train_columns], label=target.iloc[val_idx])
num_round = 60000
clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=3000, early_stopping_rounds = 200)
oof_lgb[val_idx] = clf.predict(train.iloc[val_idx][train_columns], num_iteration=clf.best_iteration)
predictions_lgb += clf.predict(test[train_columns], num_iteration=clf.best_iteration) / folds.n_splits
fold_importance = pd.DataFrame()
fold_importance["Feature"] = train_columns
fold_importance["importance"] = clf.feature_importance()
fold_importance["fold"] = fold_ + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
print("CV score: {:<8.5f}".format(roc_auc_score(target.values[val_idx], oof_lgb[val_idx])))
print("CV score: {:<8.5f}".format(roc_auc_score(target.values, oof_lgb)))
# -
temp = train.target.to_frame()
temp['predict'] = oof_lgb
temp = temp.loc[(temp.predict <= 0.1)].reset_index()
temp['new_target'] = 0
temp.loc[temp.target == 1, 'new_target'] = 1
train = temp[['index', 'new_target']].merge(train.reset_index(), on='index', how='left').drop(['index', 'ID_code', 'target'], axis=1)
param = {
'bagging_freq': 5,
'bagging_fraction': 0.335,
'boost_from_average': False,
'boost': 'gbdt',
'feature_fraction_seed': 47,
'feature_fraction': 0.041,
'learning_rate': 0.01,
'max_depth': -1,
'metric':'binary_logloss',
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 13,
'num_threads': 8,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1,
'num_threads': 8
}
target = train['new_target']
# +
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
oof_lgb = np.zeros(len(train))
# predictions_lgb = np.zeros(len(test))
feature_importance = pd.DataFrame()
train_columns = [c for c in train.columns if c not in ['ID_code', 'target', 'new_target']]
for fold_, (trn_idx, val_idx) in enumerate(folds.split(train, target.values)):
print("fold n°{}".format(fold_))
trn_data = lgb.Dataset(train.iloc[trn_idx][train_columns], label=target.iloc[trn_idx])
val_data = lgb.Dataset(train.iloc[val_idx][train_columns], label=target.iloc[val_idx])
num_round = 60000
clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=3000, early_stopping_rounds = 200)
oof_lgb[val_idx] = clf.predict(train.iloc[val_idx][train_columns], num_iteration=clf.best_iteration)
# predictions_lgb += clf.predict(test[train_columns], num_iteration=clf.best_iteration) / folds.n_splits
fold_importance = pd.DataFrame()
fold_importance["Feature"] = train_columns
fold_importance["importance"] = clf.feature_importance()
fold_importance["fold"] = fold_ + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
print("CV score: {:<8.5f}".format(roc_auc_score(target.values[val_idx], oof_lgb[val_idx])))
print("CV score: {:<8.5f}".format(roc_auc_score(target.values, oof_lgb)))
# -
temp = train.new_target.to_frame()
temp['pred'] = oof_lgb
temp[temp.new_target == 1]
oof_lgb
| Santander Customer Transaction Prediction/code/Kaggle-Santander-master/model/04 Modeling - BBiggu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Custom Entity detection with Textract and Comprehend
#
# ## Contents
# 1. [Background](#Background)
# 1. [Setup](#Setup)
# 1. [Data Prep](#Data-Prep)
# 1. [Textract OCR++](#Textract-OCR++)
# 1. [Amazon GroundTruth Labeling](#Amazon-GroundTruth-Labeling)
# 1. [Comprehend Custom Entity Training](#Comprehend-Custom-Entity-Training)
# 1. [Model Performance](#Model-Performance)
# 1. [Inference](#Inference)
# 1. [Results](#Results)
#
# ## Background
#
# In this notebook, we will cover how to extract and build a custom entity recognizer using Amazon Textract and Comprehend. We will be using Amazon Textract to perform OCR++ on scanned document, GroundTruth to label the interested entities, then passing the extracted documents to Amazon Comprehend to build and train a custom entity recognition model. No prior machine learning knowledge is required.
#
# In this example, We are using a public dataset from Kaggle: [Resume Entities for NER](https://www.kaggle.com/dataturks/resume-entities-for-ner?select=Entity+Recognition+in+Resumes.json). The dataset comprised 220 samples of candidate resumes in JSON format.
#
# ## Setup
# _This Notebook was created on ml.t2.medium notebook instances._
#
# Let's start by install and import all neccessary libaries:
# Installing tqdm Python Library
# !pip install tqdm
# +
import sagemaker
import logging
import boto3
import glob
import time
import os
from tqdm import tqdm
import json
region = boto3.Session().region_name
role = sagemaker.get_execution_role()
bucket = sagemaker.Session().default_bucket()
prefix = 'textract_comprehend_NER'
# -
# ## Data Prep <a class="anchor" id="Data-Prep"></a>
#
# PDF and PNG are most common format for scanned documents within enterprises. We already converted these resumes into PDF format to emulate this. Let's upload all these PDF resumes onto S3 for Textract processing. Please note, there are only 220 samples of resume inside the dataset. By modern standards, this is a very small dataset. This dataset also come with few labeled custom entities. However, we will be running this dataset through Amazon GroundTruth to obtain a fresh copy of entity list.
# +
# Uploading PDF resumes to S3
pdfResumeFileList = glob.glob("./sales_resumes/*.pdf")
prefix_resume_pdf = prefix + "/sales_resumes/"
for filePath in tqdm(pdfResumeFileList):
file_name = os.path.basename(filePath)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix_resume_pdf, file_name)).upload_file(filePath)
resume_pdf_bucket_name = 's3://'+bucket+'/'+prefix+'/'+'sales_resumes/'
print('Uploaded Resume PDFs :\t', resume_pdf_bucket_name)
# -
# ## Textract OCR++ <a class="anchor" id="Textract-OCR++"></a>
#
# Now these PDFs are ready for Textract to perform OCR++, you can kick off the process with [StartDocumentTextDetection](https://docs.aws.amazon.com/textract/latest/dg/API_StartDocumentTextDetection.html) async API cal. Here we are only set to process 2 resume PDF for demonstrating the process. To save time, we have all 220 resumes processed and avaliable for you. See textract_output directory for all the reuslts.
# +
s3_client = boto3.client('s3')
pdf_object_list = []
# Getting a list of resume PDF files:
response = s3_client.list_objects(
Bucket= bucket,
Prefix= prefix+'/'+'sales_resumes/text_output'
)
for obj in response['Contents']:
pdf_object_list.append(obj['Key'])
pdf_object_list[:5]
# +
from s3_textract_functions import *
import codecs
sample_to_process = 2
# We are only processing few files as example; You do not need to process all 220 files
for file_obj in tqdm(pdf_object_list[:sample_to_process]):
print('Textract Processing PDF: \t'+ file_obj)
job_id = StartDocumentTextDetection(bucket, file_obj)
print('Textract Job Submitted: \t'+ job_id)
response = getDocumentTextDetection(job_id)
# renaming .pdf to .text
text_output_name = file_obj.replace('.pdf', '.txt')
text_output_name = text_output_name[(text_output_name.rfind('/')+1):]
print('Output Name:\t', text_output_name)
output_dir = './textract_output/'
# Writing Textract Output to Text Files:
with codecs.open(output_dir + text_output_name, "w", "utf-8") as output_file:
for item in response["Blocks"]:
if item["BlockType"] == "LINE":
print('\033[94m' + item["Text"] + '\033[0m')
output_file.write(item["Text"]+'\n')
output_file.close()
# +
from tqdm import tqdm
# Uploading Textract Output to S3
textract_output_filelist = glob.glob("./textract_output/*.txt")
prefix_textract_output = prefix + "/textract_output/"
for filePath in tqdm(textract_output_filelist):
file_name = os.path.basename(filePath)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix_textract_output, file_name)).upload_file(filePath)
comprehend_input_doucuments = 's3://' + bucket+'/'+prefix_textract_output
print('Textract Output:\t', comprehend_input_doucuments)
# -
# ## Amazon GroundTruth Labeling <a class="anchor" id="Amazon-GroundTruth-Labeling"></a>
#
# Since we need to train a custom entity recognition model with Comprehend, and with any machine learning models, we need large amount of training data. In this example, we are leveraging Amazon GroundTruth to label our entities. Amazon Comprehend by default already can recognize entities like [Person, Title, Organization, and etc](https://docs.aws.amazon.com/comprehend/latest/dg/how-entities.html). To demonstrate custom entity recognition capability, we are focusing on Skill entities inside these resumes. We have the labeled and cleaned the data with Amazon GroundTruth (see: entity_list.csv). If you are interested, you can follow this blog to [add data labeling workflow for named entity recognition](https://aws.amazon.com/blogs/machine-learning/adding-a-data-labeling-workflow-for-named-entity-recognition-with-amazon-sagemaker-ground-truth/).
#
# Before we start training, let's upload the entity list onto S3
# +
# Uploading Entity List to S3
entity_list_file = './entity_list.csv'
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix+'/entity_list/', 'entity_list.csv')).upload_file(entity_list_file)
comprehend_input_entity_list = 's3://' + bucket+'/'+prefix+'/entity_list/'+'entity_list.csv'
print('Entity List:\t', comprehend_input_entity_list)
# -
# ## Comprehend Custom Entity Training <a class="anchor" id="Comprehend-Custom-Entity-Training"></a>
#
# Now we have both raw and labeled data, and ready to train our model. You can kick off the process with create_entity_recognizer API call.
# +
comprehend_client = boto3.client('comprehend')
custom_recognizer_name = 'resume-entity-recognizer-'+ str(int(time.time()))
comprehend_custom_recognizer_response = comprehend_client.create_entity_recognizer(
RecognizerName = custom_recognizer_name,
DataAccessRoleArn=role,
InputDataConfig={
'EntityTypes': [
{
'Type': 'SKILLS'
},
],
'Documents': {
'S3Uri': comprehend_input_doucuments
},
'EntityList': {
'S3Uri': comprehend_input_entity_list
}
},
LanguageCode='en'
)
print(json.dumps(comprehend_custom_recognizer_response, indent=2))
# -
# Once the training job is submitted, you can see the recognizer is being trained on Comprehend Console.
# This will take approxiamately 20 minutes to train
# +
comprehend_model_response = comprehend_client.describe_entity_recognizer(
EntityRecognizerArn= comprehend_custom_recognizer_response['EntityRecognizerArn']
)
print('ARN:\t', comprehend_model_response['EntityRecognizerProperties']['EntityRecognizerArn'])
print('Training Job Status:\t', comprehend_model_response['EntityRecognizerProperties']['Status'])
# -
# ## Model Performance <a class="anchor" id="Model-Performance"></a>
#
# In the training, Comprehend will divide the dataset into training documents and test documents. Once the recognizer is trained, you can see the recognizer’s overall performance, as well as the performance for each entity.
if comprehend_model_response['EntityRecognizerProperties']['Status'] == 'TRAINED':
print('Number of Document Trained:\t', comprehend_model_response['EntityRecognizerProperties']['RecognizerMetadata']['NumberOfTrainedDocuments'])
print('Number of Document Tested:\t', comprehend_model_response['EntityRecognizerProperties']['RecognizerMetadata']['NumberOfTestDocuments'])
print('\n-------------- Evaluation Metrics: ----------------')
print('Precision:\t', comprehend_model_response['EntityRecognizerProperties']['RecognizerMetadata']['EvaluationMetrics']['Precision'])
print('ReCall:\t\t', comprehend_model_response['EntityRecognizerProperties']['RecognizerMetadata']['EvaluationMetrics']['Recall'])
print('F1 Score:\t', comprehend_model_response['EntityRecognizerProperties']['RecognizerMetadata']['EvaluationMetrics']['F1Score'])
else:
print('Please wait for previous step to be completed')
# ## Inference
#
# Next, we have prepared a small sample of text to test out our newly trained custom entity recognizer. First, we will upload the document onto S3 and start a custom recognizer job. Once the job is submitted, you can see the progress in console under Amazon Comprehend → Analysis Jobs.
# ### Uploading Test PDF resumes to S3 for OCR++
# +
pdfResumeFileList = glob.glob("./test_document/*.pdf")
prefix_resume_pdf = prefix + "/test_document/"
for filePath in tqdm(pdfResumeFileList):
file_name = os.path.basename(filePath)
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix_resume_pdf, file_name)).upload_file(filePath)
resume_pdf_bucket_name = 's3://'+bucket+'/'+prefix+'/'+'test_document/'
print('Uploaded Resume PDFs :\t', resume_pdf_bucket_name)
# -
# ### Performing OCR++ Using Textract
# +
pdf_object_list = []
pdf_object_list.append(prefix_resume_pdf+"test_document.pdf")
output_dir = './test_document/'
for file_obj in tqdm(pdf_object_list):
print('Textract Processing PDF: \t'+ file_obj)
job_id = StartDocumentTextDetection(bucket, file_obj)
print('Textract Job Submitted: \t'+ job_id)
response = getDocumentTextDetection(job_id)
# renaming .pdf to .text
text_output_name = file_obj.replace('.pdf', '.txt')
text_output_name = text_output_name[(text_output_name.rfind('/')+1):]
print('Output Name:\t', text_output_name)
# Writing Textract Output to Text Files:
with codecs.open(output_dir + text_output_name, "w", "utf-8") as output_file:
for item in response["Blocks"]:
if item["BlockType"] == "LINE":
print('\033[94m' + item["Text"] + '\033[0m')
output_file.write(item["Text"]+'\n')
output_file.close()
# -
# ### Uploading the Textract Result for Inference
# +
# Uploading test document onto S3:
test_document = './test_document/test_document.txt'
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix+'/test_document/', 'test_document.txt')).upload_file(test_document)
s3_test_document = 's3://' + bucket+'/'+prefix+'/test_document/'+'test_document.txt'
s3_test_document_output = 's3://' + bucket+'/'+prefix+'/test_document/'
print('Test Document Input: ', s3_test_document)
print('Test Document Output: ', s3_test_document_output)
# +
# Start a recognizer Job:
custom_recognizer_job_name = 'recognizer-job-'+ str(int(time.time()))
recognizer_response = comprehend_client.start_entities_detection_job(
InputDataConfig={
'S3Uri': s3_test_document,
'InputFormat': 'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': s3_test_document_output
},
DataAccessRoleArn=role,
JobName=custom_recognizer_job_name,
EntityRecognizerArn=comprehend_model_response['EntityRecognizerProperties']['EntityRecognizerArn'],
LanguageCode='en'
)
# -
# Use follow code to check if the Detection Job for completion
job_response = comprehend_client.describe_entities_detection_job(
JobId=recognizer_response['JobId']
)
print('Detection Job Name:\t', job_response['EntitiesDetectionJobProperties']['JobName'])
print('Detection Job ID:\t', job_response['EntitiesDetectionJobProperties']['JobId'])
print('Detection Job Status:\t', job_response['EntitiesDetectionJobProperties']['JobStatus'])
output_url=job_response['EntitiesDetectionJobProperties']['OutputDataConfig']['S3Uri']
print('S3 Output URL:\t', output_url)
# ## Results
#
# Once the Analysis job is done, you can download the output and see the results. Here we converted the json result into table format.
# +
from urllib.parse import urlparse
# Downloading Output File
if job_response['EntitiesDetectionJobProperties']['JobStatus'] == 'COMPLETED':
filename = './test_document_output/output.tar.gz'
output_url_o = urlparse(output_url, allow_fragments=False)
s3_client.download_file(output_url_o.netloc, output_url_o.path.lstrip('/'), filename)
# !cd test_document_output; tar -xvzf output.tar.gz
print("Output downloaded ... ")
else:
print("Please wait for the analysis job to be completed!")
# +
from IPython.display import HTML, display
output_file_name = './test_document_output/output'
data = [['Start Offset', 'End Offset', 'Confidence', 'Text', 'Type']]
with open(output_file_name, 'r', encoding='utf-8') as input_file:
for line in input_file.readlines():
json_line = json.loads(line) # converting line of text into JSON
entities = json_line['Entities']
if(len(entities)>0):
for entry in entities:
entry_data = [entry['BeginOffset'], entry['EndOffset'], entry['Score'], entry['Text'],entry['Type']]
data.append(entry_data)
display(HTML(
'<table><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
# -
| Textract_Comprehend_Custom_Entity_Recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: IPython (Python 3)
# language: python
# name: python3
# ---
# This notebook reproduces (to rough visual accuracy), Figure 11 of [Frougier 2015]( https://doi.org/10.1109/VLSIT.2016.7573445)
# +
# %matplotlib inline
from ipywidgets import interact, FloatSlider, HTML
from IPython.display import display
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc('font',size=18)
import numpy as np
import warnings
from hyperfet.devices import SCMOSFET,VO2,HyperFET
import hyperfet.approximations as appr
from hyperfet.references import si
# +
vo2_params={
"rho_m":si("1e-4 ohm cm"),
"rho_i":si("1e1 ohm cm"),
"J_MIT":si("10**-2.9 A/(70um)**2"),
"J_IMT":si("10**-3.9 A/(70um)**2"),
"v_met":0,
"L":si("70um"),
"W":si("70um"),
"T":si("235nm")
}
vo2=VO2(**vo2_params)
VDD=.3
# -
fet=None
out=HTML()
@interact(VT0=FloatSlider(value=.35,min=0,max=1,step=.05,continuous_update=False),
W=FloatSlider(value=50,min=10,max=100,step=10,continuous_update=False),
Cinv_vxo=FloatSlider(value=3000,min=1000,max=5000,step=400,continuous_update=False),
SS=FloatSlider(value=.065,min=.05,max=.09,step=.005,continuous_update=False),
alpha=FloatSlider(value=2.5,min=0,max=5,step=.5,continuous_update=False),
beta=FloatSlider(value=1.8,min=0,max=4,step=.1,continuous_update=False),
VDD=FloatSlider(value=.3,min=.3,max=1,step=.05,continuous_update=False),
VDsats=FloatSlider(value=.1,min=.1,max=2,step=.1,continuous_update=False),
delta=FloatSlider(value=.1,min=0,max=.5,step=.1,continuous_update=False),
log10Gleak=FloatSlider(value=-12,min=-14,max=-5,step=1,continuous_update=False)
)
def show_hf(VT0,W,Cinv_vxo,SS,alpha,beta,VDsats,VDD,delta,log10Gleak):
global fet
plt.figure(figsize=(11.8,4.5))
fet=SCMOSFET(
W=W*1e-9,Cinv_vxo=Cinv_vxo,
VT0=VT0,alpha=alpha,SS=SS,delta=delta,
VDsats=VDsats,beta=beta,Gleak=10**log10Gleak)
shift=appr.shift(HyperFET(fet,vo2),VDD)
fet2=SCMOSFET(
W=W*1e-9,Cinv_vxo=Cinv_vxo,
VT0=VT0+shift,alpha=alpha,SS=SS,delta=delta,
VDsats=VDsats,beta=beta,Gleak=10**log10Gleak)
hf2=HyperFET(fet2,vo2)
VD=np.array(VDD)
VG=np.linspace(0,VDD,100)
plt.subplot(131)
I=np.ravel(fet.ID(VD=VD,VG=VG))
plt.plot(VG,I/fet.W,'r')
hf=HyperFET(fet,vo2)
If,Ib=[np.ravel(i) for i in hf.I_double(VD=VD,VG=VG)]
plt.plot(VG[~np.isnan(If)],If[~np.isnan(If)]/fet.W,'b')
plt.plot(VG[~np.isnan(Ib)],Ib[~np.isnan(Ib)]/fet.W,'b')
floor=10**log10Gleak*VD
plt.yscale('log')
plt.ylim(1e-2,5e2)
plt.xlabel("$V_{GS}\;\mathrm{[V]}$")
plt.ylabel("$I/W\;\mathrm{[mA/mm]}$")
plt.subplot(132)
plt.plot(VG,I/fet2.W,'r')
If2,Ib2=[np.ravel(i) for i in hf2.I_double(VD=VD,VG=VG)]
plt.plot(VG[~np.isnan(If2)],If2[~np.isnan(If2)]/fet2.W,'b')
plt.plot(VG[~np.isnan(Ib2)],Ib2[~np.isnan(Ib2)]/fet2.W,'b')
plt.yscale('log')
plt.ylim(1e-2,5e2)
plt.yticks([])
#plt.subplot(133)
#plt.plot(VG,I/fet.W,'r')
#plt.plot(VG[~np.isnan(If)],If[~np.isnan(If)]/fet.W,'b')
#plt.plot(VG[~np.isnan(Ib)],Ib[~np.isnan(Ib)]/fet.W,'b')
#plt.xlim(.4,.5)
#plt.ylim(400,600)
#plt.tight_layout()
out.value="Approx shift is {:.2g}mV, which equates the IOFF within {:.2g}%."\
" This is expected to increase ION by {:.2g}% and actually increases it by {:.2g}%"\
.format(shift*1e3,(If2[0]-I[0])/I[0]*100,appr.shiftedgain(hf,VDD)*100-100,(If2[-1]-I[-1])/I[-1]*100)
display(out)
| DRC2017/Exploration/Reproduce_Frougier_Fig11-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Acoplamento eletroacústico (microfone capacitivo de medição)
#
# O desenho esquemático do microfone simplificado é:
#
# 
#
# O circuito eletromecânico do sistema é dado na figura:
#
# 
#
# Vamos considerar uma pressão sonora de entrada e medir a tensão de saída do sensor.
# +
import numpy as np
from scipy import special
from matplotlib import pyplot as plt
plt.rcParams.update({'font.size': 14})
def ac_compliance(vol, rho0 = 1.21, c0 = 343):
Ca = vol/(rho0*c0**2)
return Ca
def ac_oo_smalltube(a, l, freq, flanged = False, rho0 = 1.21, c0 = 343, eta = 1.84e-5):
if flanged:
ll = l + 0.85*a
else:
ll = l + 0.61*a
# compliance and mass
Ra = 8*eta*ll/(np.pi*a**4)
Ma = 4*rho0*ll/(3*np.pi*a**2)
# impedance
omega = 2 *np.pi*freq
Za = Ra + 1j*omega*Ma
return Ra, Ma, Za
def ac_slit(h, b, l, freq, rho0 = 1.21, c0 = 343, eta = 1.84e-5):
# compliance and mass
Ra = 32*eta*l/((h**3)*b)
Ma = 6*rho0*l/(5*h*b)
# impedance
omega = 2 *np.pi*freq
Za = Ra + 1j*omega*Ma
return Ra, Ma, Za
def zrad_cylinder(a, freq, rho0 = 1.21, c0 = 343):
# Equivalent circuit
Ra1 = (0.504 * rho0 * c0) / (np.pi * a**2)
Ra2 = (rho0 * c0) / (np.pi * a**2)
Ca1 = (5.44 * a**3) / (rho0 * c0**2)
Ma1 = (0.1952 * rho0) / a
# Equivalent impedance
omega = 2*np.pi*freq
Zrad_eq=(-(omega**2)*Ma1*Ra1*Ra2*Ca1+1j*omega*Ma1*(Ra1+Ra2))/\
(Ra1+Ra2-(omega**2)*Ma1*Ra1*Ca1+1j*omega*(Ra1*Ra2*Ca1+Ma1))
return Ra1, Ra2, Ca1, Ma1, Zrad_eq
# -
# # Parâmetros do microfone
# +
# Parametros do diafragma
a = 4.45e-3; # meia polegada
S = np.pi*a**2 # area
E0 = 200 # Polarization voltage
h0 = 20.77e-6 # Backplate-diaphragm distance 19e-6
Tm0 = 3160 # Tensão Mec
rhom = 8900 # Density of diaphragm
tm = 5e-6 # thickness of diaphragm
Mm = 4*rhom*tm*S/3 # Massa do diafragma
Mad = Mm/S**2
Cm = (1/(8*np.pi*Tm0))
Cad = Cm*S**2
Rm = 0.4
Ra1, Ra2, Ca1, Ma1, Zrad_eq = zrad_cylinder(a, 1, rho0 = 1.21, c0 = 343)
print(r'Mm é {} [kg]'.format(Mm))
print(r'Cm é {} [m/N]'.format(Cm))
print(r'Rm é {} [Ns/m]'.format(Rm))
#############################################
print(r'Ra1 é {} [Pa s/m^3]'.format(Ra1))
print(r'Ra2 é {} [Pa s/m^3]'.format(Ra2))
print(r'Ca1 é {} [m^3/Pa]'.format(Ca1))
print(r'Ma1 é {} [kg/m^4]'.format(Ma1))
# -
# # Slit
#
# Até aqui não haviamos falado sobre o slit, dado no esquema
#
# 
#
# Cuja impedância é dada por uma Massa acústica em série com uma Resistência acústica
#
# \begin{equation}
# Z_{A} = \frac{32 \eta l}{h^3 b} + \mathrm{j} \omega \frac{6 \rho_0 l}{5 h b}
# \end{equation}
#
# Podemos considerar
# - $b$ circunferência do sensor;
# - $h$ distância entre carcaça e início do backplate;
# - $l$ comprimento do backplate
# +
# Parametros do circuito acústico
# Backplate
ab = 3.61e-3 # raio do backplate
afb = 0.51e-3
Sfb = np.pi*afb**2
lfb = 0.84e-3
Rab_1f, Mab_1f, Zab_1f = ac_oo_smalltube(afb, lfb, 1, flanged = False)
#Mab = (4/3) * 1.21 * lfb/(np.pi*afb**2)
#Rab = 8*1.84e-5*lfb/(np.pi*afb**4);
Rab = Rab_1f/6
Mab = Mab_1f/6
# Slit
hs = a-ab # distancia entre carcaça e backplate
ls = 0.3e-3;
bs = 2*np.pi*a
Ras, Mas, Zas = ac_slit(hs, bs, ls, 1)
# Tubo de eq
aT = 0.04e-3
ST = np.pi*aT**2
lT = 2e-3
Rat, Mat, Zat = ac_oo_smalltube(aT, lT, 1, flanged = True)
# Cavidades
V_A1 = S * h0
Ca1 = ac_compliance(V_A1)
Hmic = 3.35e-2
V_A2 = 1.264e-7;
Ca2 = ac_compliance(V_A2)
print(r'Rab é {} [Pa s/m^3]'.format(Rab))
print(r'Mab é {} [kg/m^4]'.format(Mab))
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print(r'Ras é {} [Pa s/m^3]'.format(Ras))
print(r'Mas é {} [kg/m^4]'.format(Mas))
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print(r'Rat é {} [Pa s/m^3]'.format(Rat))
print(r'Mat é {} [kg/m^4]'.format(Mat))
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print(r'Ca1 é {} [m^3/Pa]'.format(Ca1))
print(r'Ca2 é {} [m^3/Pa]'.format(Ca2))
# -
# # Referenciando ao lado acústico
print(r'Mma é {} [kg/m^4] '.format(Mm/S**2))
print(r'Cma é {} [m^3/Pa] '.format(Cm*S**2))
print(r'Rma é {} [Pas/m^3]'.format(Rm/S**2))
Ce0 = 17.9e-12
print(r'Ce0 é {} [m^3/Pa]'.format(Ce0*(((S*E0*Cm)/h0)**2)))
Rout = 10e6
print(r'Rma é {} [Pas/m^3]'.format(Rout*(h0/(S*E0*Cm))**2))
print('Fator de conversão {}'.format((h0/(S*E0*Cm))))
# # Sensibilidade
# +
sim_1_file = 'vout_bfore_corr.txt'
data = np.loadtxt(sim_1_file, skiprows=1)
freq_s = data[:,0]
V_s1 = (data[:,1] + 1j*data[:,2])/(h0/(S*E0*Cm))
###############################################################################
fig = plt.figure(figsize = (6,4.5))
ax = fig.add_axes([0, 0, 1, 1])
ax.semilogx(freq_s, 20*np.log10(np.abs(V_s1)), '-r', linewidth = 2,
label = "Circuit")
ax.set_xlabel('Frequency [Hz]', labelpad=10)
ax.set_ylabel(r'$20 \mathrm{log}(|\tilde{V}/\tilde{P}|)$ [dB]', labelpad=10)
ax.legend(frameon=True, fontsize=16)
ax.grid(b=None, which='both', axis='both',linestyle = '--')
ax.set_xlim((1,50000))
ax.set_ylim((-59,-38))
ax.set_xticks([1, 10, 100, 1000, 10000, 50000])
ax.set_xticklabels(('1', '10', '100', '1000', '10000', '50000'));
| u7_microfones/mic_pressao_capacitivo/.ipynb_checkpoints/mic_capacitivo_medicao-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# Importing the dataset
dataset = read.csv('Wine.csv')
head(dataset)
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Customer_Segment, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-14] = scale(training_set[-14])
test_set[-14] = scale(test_set[-14])
# Applying PCA
# install.packages('caret')
library(caret)
# install.packages('e1071')
library(e1071)
pca = preProcess(x = training_set[-14], method = 'pca', pcaComp = 2)
training_set = predict(pca, training_set)
training_set = training_set[c(2, 3, 1)]
test_set = predict(pca, test_set)
test_set = test_set[c(2, 3, 1)]
# Fitting SVM to the Training set
# install.packages('e1071')
library(e1071)
classifier = svm(formula = Customer_Segment ~ .,
data = training_set,
type = 'C-classification',
kernel = 'linear')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 2, 'deepskyblue', ifelse(y_grid == 1, 'springgreen3', 'tomato')))
points(set, pch = 21, bg = ifelse(set[, 3] == 2, 'blue3', ifelse(set[, 3] == 1, 'green4', 'red3')))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 2, 'deepskyblue', ifelse(y_grid == 1, 'springgreen3', 'tomato')))
points(set, pch = 21, bg = ifelse(set[, 3] == 2, 'blue3', ifelse(set[, 3] == 1, 'green4', 'red3')))
| Dimensionality Reduction/PCA/R21.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# # Analyze each frame of a video for cars using a pretrained Neural Net
from keras.models import load_model
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
import pdb
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.ndimage.measurements import label as scipyLabel
model = load_model('model.h5')
filename = './data/vehicles/3.png'
image = cv2.imread(filename)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
float(model.predict(image[None, :, :, :], batch_size=1))
# +
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
# -
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, x_start_stop=(None, None), y_start_stop=(None, None),
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
window_dim = 50
class BigRect:
def __init__(self, window): # Window is in ((startx, starty), (endx, endy)) form
self.top_left = list(window[0])
self.bottom_right = list(window[1])
def add_rect(self, window):
"""Update the Big Rectangle Dimensions to include the small window"""
self.top_left[0] = min(self.top_left[0], window[0][0])
self.top_left[1] = min(self.top_left[1], window[0][1])
self.bottom_right[0] = max(self.bottom_right[0], window[1][0])
self.bottom_right[1] = max(self.bottom_right[1], window[1][1])
def is_touching(self, window):
"""Determine if a sliding window should be added to the Big Rectangle"""
tmp_TL = [o-window_dim*0.7 for o in self.top_left]
tmp_BR = [o+window_dim*0.7 for o in self.bottom_right]
dx = min(self.bottom_right[0], window[1][0]) - max(self.top_left[0], window[0][0]) # Thanks stackoverflow
dy = min(self.bottom_right[1], window[1][1]) - max(self.top_left[1], window[0][1])
if (dx>=0) and (dy>=0):
return True
return False
# +
def search_windows(img, windows):
big_rects = []
for window in windows:
small_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
label = float(model.predict(small_img[None, :, :, :], batch_size=1))
if label > 0.7:
for big_rect in big_rects:
if big_rect.is_touching(window):
big_rect.add_rect(window)
break
else:
big_rects.append(BigRect(window))
return big_rects
def add_heat(heatmap, prev_frames):
for frame in prev_frames:
for big_rect in frame:
box = (big_rect.top_left, big_rect.bottom_right)
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
# -
h = 720
w = 1280
HIST_LEN = 20
HEAT_THRESH = 10
def process_image(img):
windows = []
windows += slide_window(img, (int(w*.6), w), (int(h*.5), int(h*.9)), xy_window=(window_dim,window_dim))
big_rects = search_windows(img, windows)
# Update the history
prev_big_rects.append(big_rects) # List of lists
if len(prev_big_rects) > HIST_LEN: prev_big_rects.pop(0)
# Create a heatmap over time to smooth the video
heat = np.zeros_like(img[:,:,0]).astype(np.float)
heat = add_heat(heat, prev_big_rects)
heat = apply_threshold(heat, HEAT_THRESH)
heatmap = np.clip(heat, 0, 255)
labels = scipyLabel(heatmap)
img_with_cars = draw_labeled_bboxes(np.copy(img), labels)
return img_with_cars
# +
prev_big_rects = []
output_file = 'output_images/video.mp4'
# input_clip = VideoFileClip('project_video.mp4').subclip(27,35) # Subclip
input_clip = VideoFileClip('project_video.mp4') # Full video
output_clip = input_clip.fl_image(process_image) # NOTE: this function expects color images
# %time output_clip.write_videofile(output_file, audio=False)
input_clip.reader.close()
input_clip.audio.reader.close_proc()
output_clip.reader.close()
output_clip.audio.reader.close_proc()
del input_clip
del output_clip
| Video.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#def input에 대하여 output을 출력시켜줌
def summation(a, b):
return a+b
def dofun(n):
print("This is", n)
# -
dofun(10)
dofun(summation(3, 11))
# +
#print 와 return의 차이
def add1(a, b):
print(a + b)
def add2(a, b):
return a + b
a = int(input("Enter the number1 : "))
b = int(input("Enter the number2 : "))
# -
add1(a, b)
add2(a, b)
def mul(a, b):
return a * b
def fun(a, b):
c = a + b + mul(a,b)
print(c)
def max_num(a, b):
if a > b:
return a
else:
return b
# +
def starprint(n):
print(n * "*")
n = int(input("Enter the number: "))
# -
starprint(n)
def operator(a, b):
return [a + b, a - b, a * b]
print(operator(7,4))
# +
num_list = [5,1,7,2,6,3]
def swap(i, j):
temp = num_list[i]
num_list[i] = num_list[j]
num_list[j] = temp
i, j = 3, 5
swap(i, j)
print(num_list)
# -
def surf(r):
a = r ** 2 * 3.14
print("The surface is {}".format(a))
surf(5)
def wordlist(string):
return string.upper().split(" ")
string = input("Enter the sentence: ")
print(wordlist(string))
def fun(n):
sum_fun = 0
for i in range(8):
sum_fun += i % 5
return sum_fun
n = 8
print("fun{}의 return값은 {}이다.".format(n, fun(n)))
fun(8)
# +
#211(1)
# greatest common denominator = gcd
# least common multiple = lcm
# %time
def gcd(n, m):
for i in range(max(n,m), 0, -1):
if n % i ==0 and m% i ==0:
return i
def lcm(n, m):
"""Compute the lowest common multiple of n and m"""
return n * m / gcd(n, m)
# +
#211(2)
# greatest common denominator = gcd
# least common multiple = lcm
# %time
def gcd(n, m):
for i in range(max(n,m), 0, -1):
if n % i ==0 and m% i ==0:
return i
def lcm(n, m):
i = max(n, m)
while True:
if i % n ==0 and i % m == 0:
break
i += 1
return i
# -
# %time
lcm(52,36)
# +
n = input("insert number")
def rev_num(n):
return int(n[::-1])
print(rev_num(n))
# -
print(num)
# +
def add_all(n):
sum = 0
while n != 0:
sum += n % 10
print("+", n % 10, end= " ")
n = (n - (n % 10)) // 10
print("=", sum)
return sum
n = int(input("Enter the number : "))
while n // 10 != 0:
n = add_all(n)
# -
# +
def ismulThree(n):
return n % 3 == 0
def ismulFive(n):
return n % 5 == 0
sum = 0
for i in range(1, 100000):
if ismulFive(i) or ismulThree(i):
sum += i
print(sum)
# +
def allnum(n):
for i in range(0, 10):
case = 0
num = n
while num != 0:
if num % 10 == i:
case += 1
print("case:", case)
num = num //10
print("num:", num)
if case != 1:
return False
return True
n = int(input("Enter the number: "))
print(allnum(n))
# -
1256 % 10
1234567809 // 10
# +
def num_1(n):
case = 0
for i in range(1, n+1):
num = i
while num != 0:
if num % 10 == 1:
case += 1
num = num // 10
return case
n = int(input("Enter the number : "))
print(num_1(n))
# -
#217
def inf_d(n):
case = int(n)
for i in range(len(n)):
case += int(n[i])
print(case)
inf_d("1")
# +
#217
def d(n):
num = n
while n != 0:
num += n % 10
n = n //10
return num
def isSelfnum(n):
boolean = True
for i in range(1, n):
if n == d(i):
boolean = False
return boolean
n = int(input("Enter the number : "))
print(isSelfnum(n))
# -
22 % 10
# +
def fun_Double(N):
max = 1
while max ** 2 <= N:
max += 1
for i in range(max):
for j in range(max):
for k in range(max):
if(i**2 + j**2 + k**2 == N):
a = [i,j,k]
while 0 in a:
a.remove(0)
return a
print("Sorry")
print(fun_Double(14))
# -
def fun_recurring(N):
nstr = str(1/N)[2:]
if len(nstr) <= 10:
return 0
else:
rec = ""
for i in nstr:
rec += i
if rec == nstr[len(rec) : 2*len(rec)]:
return rec
fun_recurring(8)
1/17
# ####
| function (201~219).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Kubeflow pipelines
#
# This notebook goes through the steps of using Kubeflow pipelines using the Python3 interpreter (command-line) to preprocess, train, tune and deploy the babyweight model.
# ### 1. Create cluster
# + language="bash"
# gcloud config set compute/zone us-central1-b
# gcloud container clusters create lakpipeline \
# --zone us-central1-b \
# --scopes cloud-platform \
# --enable-cloud-logging \
# --enable-cloud-monitoring \
# --machine-type n1-standard-2 \
# --num-nodes 4
# kubectl create clusterrolebinding ml-pipeline-admin-binding --clusterrole=cluster-admin --user=$(gcloud config get-value account)
# -
# Go the [Google Kubernetes Engine section of the GCP console](https://console.cloud.google.com/kubernetes) and make sure that the cluster is started and ready. This will take about 3 minutes.
# ### 2. Deploy Kubeflow pipeline to cluster
# + language="bash"
# PIPELINE_VERSION=0.1.2
# kubectl create -f https://storage.googleapis.com/ml-pipeline/release/$PIPELINE_VERSION/bootstrapper.yaml
# -
# ### 3. Install local interpreter
# + language="bash"
# PIPELINE_VERSION=0.1.2
# pip install python-dateutil https://storage.googleapis.com/ml-pipeline/release/$PIPELINE_VERSION/kfp.tar.gz --upgrade
# -
# After pip install, always <b>Reset Session</b> so that the new package gets picked up.
# ### 4. Set up port forward
# + language="bash"
# export NAMESPACE=kubeflow
# kubectl port-forward -n ${NAMESPACE} $(kubectl get pods -n ${NAMESPACE} --selector=service=ambassador -o jsonpath='{.items[0].metadata.name}') 8085:80
# -
# Now visit https://localhost:8085/pipeline
# ### 5. Do the DSL compile
# + language="bash"
# OUTDIR=pipelines/dsl
# rm -rf $OUTDIR
# mkdir -p $OUTDIR
# dsl-compile --py pipelines/mlp_babyweight.py --output $OUTDIR/mlp_babyweight.tar.gz
# ls -l $OUTDIR
# -
# ### 5. Upload and execute pipeline
#
# Start by navigating to localhost:8085 (as in port forward), create an experiment, upload the above pipeline and run it once.
# +
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| courses/machine_learning/deepdive/06_structured/7_pipelines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #!apt-get install -y libgl1-mesa-dev
# !pip3 install --upgrade pip
# !pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
# !pip3 install numpy
# !pip3 install pandas
# !pip3 install multiprocessing
# !pip3 install botocore
# !pip3 install boto3
# !pip3 install scikit-build
# !pip3 install cmake
# !pip3 install opencv-python
# !pip3 install tqdm
#確認
import os
os.environ.pop('CUDA_LAUNCH_BLOCKING', None)
import torch
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import cv2
import logging
import pandas as pd
import logging
import os
from multiprocessing import Pool, Manager
import functools
import tqdm
#make model
from vision.ssd.mobilenetv3_ssd_lite import create_mobilenetv3_small_ssd_lite
from vision.ssd.config import mobilenetv1_ssd_config
create_net = lambda num: create_mobilenetv3_small_ssd_lite(num, onnx_compatible=True)
config = mobilenetv1_ssd_config
# +
Param = {
"batch_size" : 8,
"base_net_lr" : 0.001, #tune
"num_epochs" : 200,
"validation_epochs" : 5,
"t_max" : 100, #tune
"lr" : 0.01, #tune
"scheduler" : "cosine", #<-変えてもOK #tune
"freeze_base_net" : False, #ファインチューニングするときはTrue,
"freeze_net" : False,
"num_workers" : 2, #並列処理 windows対応してないです…
"momentum" : 0.9, #tune
"weight_decay" : 5e-4, #tune
"t_max" : 100, #tune
"debug_steps" : 100,
}
import multiprocessing
cpu_count = multiprocessing.cpu_count()
print(cpu_count)
Param["num_workers"] = min(2, cpu_count)
# -
#prepare tranform
from vision.ssd.data_preprocessing import TrainAugmentation, TestTransform, orgAugmentation, cleanAugmentation3
from vision.ssd.ssd import MatchPrior
#全部color-shift入れる
train_transform = cleanAugmentation3(config.image_size, config.image_mean, config.image_std)
org_transform = cleanAugmentation3(config.image_size, config.image_mean, config.image_std)
target_transform = MatchPrior(config.priors, config.center_variance, config.size_variance, 0.5)
test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)
logging.info("Prepare training datasets.")
from vision.utils.misc import str2bool, Timer, freeze_net_layers, store_labels
from torch.utils.data import DataLoader, ConcatDataset
#TODO データセットのライブラリ作る、transformも -> OpenImagesDatasetをたたき台にする 最終的にsub-{}-annotation-bboxに統合する
from vision.datasets.open_images import OpenImagesDataset
from vision.datasets.open_images_v2 import OpenImagesDataset_v2
from vision.datasets.pseudo_dataset import pseudoDataset
from vision.datasets.pseudo_dataset_can import pseudoDatasetCan
import glob
check_point_path = "./models/gaku_cornv19_small_new_dataset"
os.makedirs(check_point_path, exist_ok=True)
datasets = []
valdatasets = []
# +
dataset_path_old = "./all_images"
dataset = OpenImagesDataset(dataset_path_old,transform=org_transform, target_transform=target_transform, dataset_type="train", balance_data=False)
datasets.append(dataset)
val_dataset = OpenImagesDataset(dataset_path_old, transform=test_transform, target_transform=target_transform, dataset_type="validation")
valdatasets.append(val_dataset)
# +
dataset_path_new = "./arliss_teacher2"
dataset = OpenImagesDataset_v2(dataset_path_new,transform=org_transform, target_transform=target_transform, dataset_type="train", balance_data=False)
datasets.append(dataset)
val_dataset = OpenImagesDataset_v2(dataset_path_new, transform=test_transform, target_transform=target_transform, dataset_type="validation")
valdatasets.append(val_dataset)
# +
label_file = os.path.join(check_point_path, "open-images-model-labels.txt")
if os.path.exists(label_file):
os.remove(label_file)
store_labels(label_file, dataset.class_names)
logging.info(dataset)
num_classes = len(dataset.class_names)
print("class name : ", dataset.class_names)
num_classes = len(dataset.class_names)
print(label_file) #models/open-images-model-labels.txtに対象載ってる
# -
train_dataset = ConcatDataset(datasets)
print(len(train_dataset))
train_loader = DataLoader(train_dataset, Param["batch_size"],num_workers=Param["num_workers"],shuffle=True)
val_datasets = ConcatDataset(valdatasets)
print(len(val_datasets))
val_loader = DataLoader(val_datasets, Param["batch_size"],num_workers=Param["num_workers"],shuffle=False)
# +
#if fine tuning, change this cell
import itertools
logging.info("Build network.")
net = create_net(num_classes)
min_loss = -10000.0
last_epoch = -1 #pretrain
#model_path = os.path.join(check_point_path, "mbv3-Epoch-070-Loss-2.1637175174859853.pth") #no
model_path = "no"
if model_path != "no":
net.init_from_pretrained_ssd(model_path)
print("load : ", model_path)
base_net_lr = Param["base_net_lr"]
extra_layers_lr = Param["lr"]
if Param["freeze_base_net"] == True:
logging.info("Freeze base net.")
freeze_net_layers(net.base_net)
params = itertools.chain(net.source_layer_add_ons.parameters(), net.extras.parameters(),
net.regression_headers.parameters(), net.classification_headers.parameters())
params = [
{'params': itertools.chain(
net.source_layer_add_ons.parameters(),
net.extras.parameters()
), 'lr': extra_layers_lr},
{'params': itertools.chain(
net.regression_headers.parameters(),
net.classification_headers.parameters()
)}
]
elif Param["freeze_base_net"] == True:
freeze_net_layers(net.base_net)
freeze_net_layers(net.source_layer_add_ons)
freeze_net_layers(net.extras)
params = itertools.chain(net.regression_headers.parameters(), net.classification_headers.parameters())
logging.info("Freeze all the layers except prediction heads.")
else:
params = [
{'params': net.base_net.parameters(), 'lr': base_net_lr},
{'params': itertools.chain(
net.source_layer_add_ons.parameters(),
net.extras.parameters()
), 'lr': extra_layers_lr},
{'params': itertools.chain(
net.regression_headers.parameters(),
net.classification_headers.parameters()
)}
]
# -
#pre-train使うなら argsは頑張る
#if fine tuning, change this cell
"""
if args.resume:
logging.info(f"Resume from the model {args.resume}")
net.load(args.resume)
elif args.base_net:
logging.info(f"Init from base net {args.base_net}")
net.init_from_base_net(args.base_net)
elif args.pretrained_ssd:
logging.info(f"Init from pretrained ssd {args.pretrained_ssd}")
net.init_from_pretrained_ssd(args.pretrained_ssd)
logging.info(f'Took {timer.end("Load Model"):.2f} seconds to load the model.')
"""
net.to(DEVICE)
# +
from vision.nn.multibox_loss import MultiboxLoss
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
criterion = MultiboxLoss(config.priors, iou_threshold=0.5, neg_pos_ratio=3,
center_variance=0.1, size_variance=0.2, device=DEVICE)
optimizer = torch.optim.SGD(params, lr=Param["lr"], momentum=Param["momentum"],
weight_decay=Param["weight_decay"])
scheduler = CosineAnnealingLR(optimizer, Param["t_max"], last_epoch=-1)
# -
"""
for _ in range(last_epoch):
print("skip")
scheduler.step()
"""
def train(loader, net, criterion, optimizer, device, debug_steps=100, epoch=-1):
net.train(True)
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
for i, data in enumerate(loader):
images, boxes, labels = data
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
optimizer.zero_grad()
confidence, locations = net(images)
#print(boxes.shape, labels.shape, confidence.shape, locations.shape)
regression_loss, classification_loss = criterion(confidence, locations, labels, boxes) # TODO CHANGE BOXES
loss = regression_loss + classification_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
running_regression_loss += regression_loss.item()
running_classification_loss += classification_loss.item()
if i and i % debug_steps == 0:
avg_loss = running_loss / debug_steps
avg_reg_loss = running_regression_loss / debug_steps
avg_clf_loss = running_classification_loss / debug_steps
logging.info(
f"Epoch: {epoch}, Step: {i}, " +
f"Average Loss: {avg_loss:.4f}, " +
f"Average Regression Loss {avg_reg_loss:.4f}, " +
f"Average Classification Loss: {avg_clf_loss:.4f}"
)
"""
print("epoch: ", epoch)
print("Average Loss: ", avg_loss)
print("Average Regression Loss: ", avg_reg_loss)
print("Average Classification Loss: ", avg_clf_loss)
"""
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
def test(loader, net, criterion, device):
print("start test")
net.eval()
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
num = 0
for _, data in enumerate(loader):
images, boxes, labels = data
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
num += 1
with torch.no_grad():
confidence, locations = net(images)
#print("validation", boxes.shape, labels.shape, confidence.shape, locations.shape)
regression_loss, classification_loss = criterion(confidence, locations, labels, boxes)
loss = regression_loss + classification_loss
running_loss += loss.item()
running_regression_loss += regression_loss.item()
running_classification_loss += classification_loss.item()
return running_loss / num, running_regression_loss / num, running_classification_loss / num
for epoch in range(last_epoch + 1, Param["num_epochs"]):
print("from", last_epoch + 1, " now : ", epoch, ' last : ', Param["num_epochs"])
scheduler.step()
train(train_loader, net, criterion, optimizer,
device=DEVICE, debug_steps=Param["debug_steps"], epoch=epoch)
if epoch % Param["validation_epochs"] == 0 or epoch == Param["num_epochs"] - 1:
val_loss, val_regression_loss, val_classification_loss = test(val_loader, net, criterion, device=DEVICE)
logging.info(
f"Epoch: {epoch}, " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Regression Loss {val_regression_loss:.4f}, " +
f"Validation Classification Loss: {val_classification_loss:.4f}"
)
model_path = os.path.join(check_point_path, "mbv3-Epoch-" + str(epoch).zfill(3) + "-Loss-" + str(val_loss) + ".pth")
net.save(model_path)
logging.info(f"Saved model {model_path}")
| trainv11_newdata_small.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="Derh_z5xRc4H"
# Copyright 2019 <NAME>
# + [markdown] colab_type="text" id="Fm-HVp5SRDa7"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/mlai-demo/TextGen-tf2/blob/master/TextGen_tf2pub.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td><td>
# <a target="_blank" href="https://github.com/mlai-demo/TextGen-tf2/blob/master/TextGen_tf2pub.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
# + [markdown] colab_type="text" id="gXnXsnMyYPUV"
# ## Prep work
# + [markdown] colab_type="text" id="yUjjttgRYWI9"
# ### Download relevant libraries and check the setup
# + colab={"base_uri": "https://localhost:8080/", "height": 541} colab_type="code" id="RjnAX03nXbWn" outputId="42170cc6-8aef-4daa-e42f-3fb1df97da1f"
from __future__ import absolute_import, division, print_function, unicode_literals
#Need to use the latest Tensorflow version - can find it at https://www.tensorflow.org/install/
# !pip install tensorflow-gpu==2.0.0-alpha0
import tensorflow as tf
import numpy as np
import os
import datetime
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="asvI4tVjXob-" outputId="b837f485-c8ca-4512-bf61-92aa5e9b3ff6"
print("TensorFlow version: ", tf.__version__)
# + [markdown] colab_type="text" id="5tSAEtZsZXlJ"
# Check if GPU is available - always good to double-check. When using Colab, I sometimes forget to change runtime type, so having this code will always catch it.
# + colab={"base_uri": "https://localhost:8080/", "height": 555} colab_type="code" id="He4OvefpZMr6" outputId="cb0510c2-5c8a-4e84-9f06-bfd0e0ed871f"
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# + [markdown] colab_type="text" id="MYMrkgt9X1t_"
# If need to remove logs from previous runs, uncomment and adjust the directory name:
# + colab={} colab_type="code" id="8xT0uOUoXued"
# #!rm -rf ./checkpoints_2019.04.21-20:48:58/ #if using Tensorboard or other logging
# + [markdown] colab_type="text" id="LamqnzOZYsu7"
# ### Download the dataset
# + [markdown] colab_type="text" id="5nU0wJvJlFGv"
# Check the current directory and upload the text file:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="MD49SegeY46C" outputId="e4b22ead-4e27-4cda-e3e3-7f4c3d8e87ed"
import os
path = os.getcwd()
print(path)
# + colab={"base_uri": "https://localhost:8080/", "height": 37, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": "OK"}}} colab_type="code" id="3eflFyPIZ21J" outputId="da993ac5-ac2e-4544-aefb-a659f7126d0f"
# if using Google Colab
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
# Click Files tab - the updload file(s) will be there
# + [markdown] colab_type="text" id="dhmPv312ej5X"
# In case you have multiple files that need to be merged:
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="Diu-RxbycCnQ" outputId="4daabbb9-4a60-4b2c-d60d-8e8276422898"
#If using a directory with multiple files
import glob
import codecs
books = sorted(glob.glob(path + "/*.txt"))
print("Found {} books".format(len(books)))
text = ""
for filename in books:
with codecs.open(filename, 'r', 'utf-8') as books:
text += books.read()
print("Text is {} characters long".format(len(text)))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ITkDCoKndL3k" outputId="10914546-cce0-4189-cac5-160218516acd"
#If using a single file
text = open(path + '/Iliad_v3.txt', 'rb').read().decode(encoding='utf-8')
print("Text is {} characters long".format(len(text)))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="y9pruxZJXlkX" outputId="05abf976-6a69-44fc-e417-b4970f3248bc"
words = [w for w in text.split(' ') if w.strip() != '' or w == '\n']
print("Text is {} words long".format(len(words)))
# + [markdown] colab_type="text" id="6haqpbyNahWL"
# Make sure the text sample is what you expected:
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="gdrWeIZCac2b" outputId="28ef0be4-7ddb-4d2b-c11c-898374358bd6"
print(text[:100])
# + [markdown] colab_type="text" id="YKI0wsZne8o9"
# ## Prepare the text
# + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="AgcMBNHxfn4D" outputId="67774f25-be94-42bf-830a-948b6be5d35a"
#Map unique characters to indices
vocab = sorted(set(text))
print ('There are {} unique characters'.format(len(vocab)))
char2int = {c:i for i, c in enumerate(vocab)}
int2char = np.array(vocab)
print('Vector:\n')
for char,_ in zip(char2int, range(len(vocab))):
print(' {:4s}: {:3d},'.format(repr(char), char2int[char]))
# + colab={"base_uri": "https://localhost:8080/", "height": 139} colab_type="code" id="ypjqjoHEfJXg" outputId="fe6a173b-3393-46c3-f993-e56f21348a23"
text_as_int = np.array([char2int[ch] for ch in text], dtype=np.int32)
print ('{}\n mapped to integers:\n {}'.format(repr(text[:100]), text_as_int[:100]))
# + colab={} colab_type="code" id="-FBpLzLYsqCb"
tr_text = text_as_int[:704000] #text separated for training, divisible by the batch size (64)
val_text = text_as_int[704000:] #text separated for validation
# + [markdown] colab_type="text" id="2gB2GHvvga26"
# Comfirm the shapes are what we expect:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1DU6Dwj8hEtI" outputId="54a9ee40-be9b-4570-e231-645b494f6b9e"
print(text_as_int.shape, tr_text.shape, val_text.shape)
# + [markdown] colab_type="text" id="uZZLCyTNHONf"
# ## Build the model
# + colab={} colab_type="code" id="Wy5yl7d2heq6"
# Populate the library of tunables - I like keeping them centralized in case I need to change things around:
batch_size = 64
buffer_size = 10000
embedding_dim = 256
epochs = 50
seq_length = 200
examples_per_epoch = len(text)//seq_length
#lr = 0.001 #will use default for Adam optimizer
rnn_units = 1024
vocab_size = len(vocab)
# + colab={"base_uri": "https://localhost:8080/", "height": 454} colab_type="code" id="1e9_DLdGsqC0" outputId="e1760804-fb1a-4785-ee18-8744d56a5605"
tr_char_dataset = tf.data.Dataset.from_tensor_slices(tr_text)
val_char_dataset = tf.data.Dataset.from_tensor_slices(val_text)
print(tr_char_dataset, val_char_dataset)
tr_sequences = tr_char_dataset.batch(seq_length+1, drop_remainder=True)
val_sequences = val_char_dataset.batch(seq_length+1, drop_remainder=True)
print(tr_sequences, val_sequences)
for item in tr_sequences.take(1):
print(repr(''.join(int2char[item.numpy()])))
print(item)
for item in val_sequences.take(1):
print(repr(''.join(int2char[item.numpy()])))
print(item)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="1Asy0RHPFzsc" outputId="c2869143-249e-4530-8a0f-a4ee23e6e0a4"
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
tr_dataset = tr_sequences.map(split_input_target).shuffle(buffer_size).batch(batch_size, drop_remainder=True)
val_dataset = val_sequences.map(split_input_target).shuffle(buffer_size).batch(batch_size, drop_remainder=True)
print(tr_dataset, val_dataset)
# + colab={} colab_type="code" id="7Ta6TWbuF7Fx"
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(vocab_size)
])
return model
# + colab={"base_uri": "https://localhost:8080/", "height": 89} colab_type="code" id="dXyjPJ1oG_N3" outputId="14ae9863-7a18-40fc-8169-b075c1705442"
model = build_model(
vocab_size = len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=batch_size)
# + [markdown] colab_type="text" id="6qLDewlkHcP4"
# ## Run the model
# + [markdown] colab_type="text" id="FwAn1CzeHu3m"
# Check the output shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="nkUviDjkHHkl" outputId="a8512eb5-aaca-447c-bf8d-84bb19813e5e"
for input_example_batch, target_example_batch in tr_dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "respectively: batch_size, sequence_length, vocab_size")
# + colab={"base_uri": "https://localhost:8080/", "height": 399} colab_type="code" id="LtEcXnYhHyVz" outputId="1eb3e1f7-9da9-430a-c26c-bf0ede4aaa31"
model.summary()
# + [markdown] colab_type="text" id="tYGhrBXKIM4x"
# Untrained model output:
# + colab={"base_uri": "https://localhost:8080/", "height": 124} colab_type="code" id="qq-g-DmKIBkp" outputId="c27ff7e9-0db0-4f8f-c42f-cd4e649668b5"
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()
print("Input: \n", repr("".join(int2char[input_example_batch[0]])))
print()
print("Predictions: \n", repr("".join(int2char[sampled_indices ])))
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="u744e_goIOzP" outputId="85524304-d144-4309-8260-99ee59f6ecf7"
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def accuracy(labels, logits):
return tf.keras.metrics.sparse_categorical_accuracy(labels, logits)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
example_batch_acc = accuracy(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("Loss: ", example_batch_loss.numpy().mean())
print("Accuracy: ", example_batch_acc.numpy().mean())
# + colab={} colab_type="code" id="DEvIj65tIshz"
optimizer = tf.keras.optimizers.Adam()
model.compile(optimizer=optimizer, loss=loss)
# + colab={} colab_type="code" id="QDzNCe8YSeXY"
patience = 10
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)
# + colab={} colab_type="code" id="gEGtTQMKJdgo"
checkpoint_dir = './checkpoints'+ datetime.datetime.now().strftime("_%Y.%m.%d-%H:%M:%S")
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 937} colab_type="code" id="m-o-W4MXI115" outputId="86f6a6ab-5a87-491c-d683-d85263c532db"
history = model.fit(tr_dataset, epochs=epochs, callbacks=[checkpoint_callback, early_stop] , validation_data=val_dataset)
print ("Training stopped as there was no improvement after {} epochs".format(patience))
# + colab={"base_uri": "https://localhost:8080/", "height": 567} colab_type="code" id="_sluYs8sJW5Y" outputId="ee0297b6-743c-4863-ad45-62169d3700c3"
import matplotlib.pyplot as plt
plt.figure(figsize=(12,9))
plt.plot(history.history['loss'], 'g')
plt.plot(history.history['val_loss'], 'rx') #use if have val data
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train'], loc='upper right')
plt.legend(['Train', 'Validation'], loc='upper right') #use if have val date
plt.show()
# + [markdown] colab_type="text" id="WWDBRn1rJ-FT"
# ## Generate text
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="u0UHhaktJ9Da" outputId="23bbd918-695d-4021-c32e-462f0edac422"
tf.train.latest_checkpoint(checkpoint_dir)
# + colab={"base_uri": "https://localhost:8080/", "height": 454} colab_type="code" id="GRp4fBGWKPew" outputId="33e032a6-be6c-450b-b9f9-1a86e36bcac0"
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
#model.load_weights('./checkpoints_2019.04.29-00:31:15/ckpt_17') #if the latest checkpoint is not your preferred
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) #if the latest checkpoint is what you want
model.build(tf.TensorShape([1, None]))
model.summary()
# + colab={} colab_type="code" id="E4wEjZn6KYSZ"
def generate_text(model, start_string):
print('Generating with seed: "' + start_string + '"')
num_generate = 1000
input_eval = [char2int[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
temperature = 1.0
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(int2char[predicted_id])
return (start_string + ''.join(text_generated))
# + colab={"base_uri": "https://localhost:8080/", "height": 416} colab_type="code" id="XvBbck9MK0Cb" outputId="73041393-7680-4ecb-d925-9de810ccc2b5"
print(generate_text(model, start_string="joy of gods"))
# + colab={} colab_type="code" id="ZiBHrzctK3Ap" outputId="d9721bbf-2bb0-4b2c-890b-86c7c9eecbfe"
with open('sampleTF2.txt', 'w') as f:
sampleTF2 = generate_text(model, start_string="joy of gods")
f.write(sampleTF2)
# + [markdown] colab_type="text" id="FQiEPW1usqD4"
# Free memory resources if needed:
# + colab={} colab_type="code" id="IbhTRpUsL3Bg"
import signal
os.kill(os.getpid(), signal.SIGKILL)
| TextGen_tf2pub.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from plot_graphs import plot_graphs
baseline_1 = [ '../experiments/exp_synthiaSF_baseline_128/']
baseline_2 = ['../experiments/exp_sun_baseline_128/']
depth = ['../experiments/exp_synthiaSF_depth_128_concatAtStart/',
'../experiments/exp_synthiaSF_depth_128_fusionAfterGDN1/',
'../experiments/exp_synthiaSF_depth_128_fusionAfterGDN1_allrelu/',
'../experiments/exp_synthiaSF_depth_128_fusionAfterGDN1_relu/',
'../experiments/exp_synthiaSF_depth_128_fusionAfterGDN1_SSMA_noBatchNorm/',
'../experiments/exp_synthiaSF_depth_128_fusionAfterGDN1_f32_corr/']
#depth = ['../experiments/exp_synthiaSF_depth_128_fusionAfterGDN1_relu_depthWnoise/']
#weighted_loss = ['../experiments/exp_synthiaSF_128_laplacian_weighted_loss/']
""" ========================================================================================= """
exps_dir = '/datatmp/Experiments/belbarashy/exps/'
#cityscapes = ['../experiments/test_100ksteps_500_val/']
#cityscapes = ['../experiments/test_seg_comp/']
cityscapes_com_rgb = ['/datatmp/Experiments/belbarashy/exps/seg_arch_for_compAndReconsrurction/']
cityscapes_seg_rgb = ['/datatmp/Experiments/belbarashy/exps/seg_com_rgb/']
cityscapes_seg_depth = ['/datatmp/Experiments/belbarashy/exps/seg_com_depth/']
cityscapes_seg_rgb_d = ['/datatmp/Experiments/belbarashy/exps/seg_com_rgb_d/']
cityscapes_seg_rgb_d_by2 = ['/datatmp/Experiments/belbarashy/exps/seg_comp_rgb_d_downby2/']
seg_wocomp_rgb_d_SSMA = ['/datatmp/Experiments/belbarashy/exps/seg_wocomp_rgbd/']
seg_wocomp_rgb_d_conc = ['/datatmp/Experiments/belbarashy/exps/seg_wocomp_rgbd_concat/']
seg_wocomp_rgb_d_sum = ['/datatmp/Experiments/belbarashy/exps/seg_wocomp_rgbd_sum/']
cityscapes_seg_wocomp_depth = ['/datatmp/Experiments/belbarashy/exps/seg_wocomp_depth/']
seg_com_reco_rgb_fixedlmbda2048 = ['/datatmp/Experiments/belbarashy/exps/seg_com_reco_rgb_fixedlmbda2048/']
seg_com_reco_rgb_fixedlmbda512 = ['/datatmp/Experiments/belbarashy/exps/seg_com_reco_rgb_fixedlmbda512/']
seg_com_reco_rgb_fixedlmbda8192 = ['/datatmp/Experiments/belbarashy/exps/seg_com_reco_rgb_fixedlmbda8192/']
seg_com_reco_rgb_fixedlmbda128 = ['/datatmp/Experiments/belbarashy/exps/seg_com_reco_rgb_fixedlmbda128/']
seg_com_reco_rgb_fixedlmbda16384 = ['/datatmp/Experiments/belbarashy/exps/seg_com_reco_rgb_fixedlmbda16384/']
beta_0dot01 = ['/datatmp/Experiments/belbarashy/exps/beta_0.01/']
beta_0dot1 = ['/datatmp/Experiments/belbarashy/exps/beta_0.1/']
beta_1 = ['/datatmp/Experiments/belbarashy/exps/beta_1/']
beta_10 =['/datatmp/Experiments/belbarashy/exps/beta_10/']
beta_100 = ['/datatmp/Experiments/belbarashy/exps/beta_100/']
#depth = ['../experiments/exp_synthiaSF_depth_128/','../experiments/exp_synthiaSF_depth_192/']
#baseline = ['../experiments/exp_synthiaSF_baseline_128/','../experiments/exp_synthiaSF_baseline_192/']
#plot_graphs(expsDir= cityscapes_seg_rgb + cityscapes_seg_depth + cityscapes_seg_rgb_d + cityscapes_com_rgb,
# outDir='../experiments/')
#plot_graphs(expsDir= cityscapes_seg_rgb_d + cityscapes_seg_rgb_d_by2,
# outDir= '../experiments/')
"""
RGB-D reco
"""
#plot_graphs(expsDir=baseline_1+depth, outDir='../experiments/')
"""
RGB-D segmentation only Exp
"""
#plot_graphs(expsDir= cityscapes_seg_rgb_d+cityscapes_seg_rgb + cityscapes_seg_depth +cityscapes_seg_rgb_d_by2,
# outDir= '../experiments/')
"""
RGB-D segmentation & resonctruction Exp
"""
#
plot_graphs(expsDir= seg_com_reco_rgb_fixedlmbda2048+seg_com_reco_rgb_fixedlmbda512+
seg_com_reco_rgb_fixedlmbda8192+seg_com_reco_rgb_fixedlmbda128+
seg_com_reco_rgb_fixedlmbda16384+cityscapes_seg_rgb,
outDir='../experiments/')
"""
#cityscapes_com_rgb
plot_graphs(expsDir= beta_0dot01+beta_0dot1+beta_1+beta_10+beta_100+cityscapes_seg_rgb,
outDir='../experiments/')
"""
"""
RGB-D segmentation fusion methods
"""
#plot_graphs(expsDir= seg_wocomp_rgb_d_SSMA+seg_wocomp_rgb_d_conc+ seg_wocomp_rgb_d_sum,
# outDir='../experiments/')
# -
| code/exps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
from fast_rl.core.basic_train import AgentLearner
from fast_rl.agents.dqn import *
from fast_rl.agents.dqn_models import *
from fast_rl.core.train import AgentInterpretation, GroupAgentInterpretation
from fast_rl.core.data_block import MDPDataBunch
from fast_rl.core.agent_core import ExperienceReplay, GreedyEpsilon
from fastai.basic_data import DatasetType
from fast_rl.core.metrics import *
from fastai.gen_doc.nbdoc import *
# -
show_doc(DoubleDuelingModule.__init__)
import os
model_dirs = ['data/cartpole_ddqn', 'data/cartpole_dddqn']
group_interp = GroupAgentInterpretation()
for model_dir in model_dirs:
for file in os.listdir(model_dir):
file = file.replace('.pickle', '')
group_interp.add_interpretation(GroupAgentInterpretation.from_pickle(model_dir, file))
group_interp.plot_reward_bounds(per_episode=True, show_average=True, hide_edges=True,smooth_groups=10)
model_dirs = ['data/cartpole_dueling dqn', 'data/cartpole_dddqn']
group_interp_2 = GroupAgentInterpretation()
for model_dir in model_dirs:
for file in os.listdir(model_dir):
file = file.replace('.pickle', '')
group_interp_2.add_interpretation(GroupAgentInterpretation.from_pickle(model_dir, file))
group_interp_2.plot_reward_bounds(per_episode=True,show_average=True, hide_edges=True, smooth_groups=10)
group_interp.add_interpretation(group_interp_2)
group_interp.analysis
model_dirs = ['data/lunarlander_fixed target dqn','data/lunarlander_dddqn','data/lunarlander_dueling dqn','data/lunarlander_ddqn', ]
group_interp_2 = GroupAgentInterpretation()
for model_dir in model_dirs:
for file in os.listdir(model_dir):
file = file.replace('.pickle', '')
group_interp_2.add_interpretation(GroupAgentInterpretation.from_pickle(model_dir, file))
group_interp_2.plot_reward_bounds(per_episode=True,show_average=True, hide_edges=True, smooth_groups=20)
group_interp.add_interpretation(group_interp_2)
| docs_src/rl.agents.dddqn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%capture
# !pip install wikidataintegrator
from wikidataintegrator import wdi_core, wdi_login
from getpass import getpass
import pandas as pd
from copy import deepcopy
# +
wbstack = "lux-saf-beta"
wikibase = "https://{}.wiki.opencura.com/".format(wbstack)
api = "https://{}.wiki.opencura.com/w/api.php".format(wbstack)
sparql = "https://{}.wiki.opencura.com/query/sparql".format(wbstack)
entityUri = wikibase.replace("https:", "http:")+"entity/"
WBUSER = "Andrawaag"
WBPASS = <PASSWORD>()
login = wdi_login.WDLogin(WBUSER, WBPASS, mediawiki_api_url=api)
# -
data_load_10_dead = pd.read_excel("Export_SAF_LUX_10_dead can dance.xlsx", header=0)
data_load_anlux = pd.read_excel("20210315-initial export anlux-jt.xlsx", header=0)
localEntityEngine = wdi_core.WDItemEngine.wikibase_item_engine_factory(api,sparql)
pd.set_option('display.max_columns', None)
data_load_10_dead.columns.difference(data_load_anlux.columns)
data_load_anlux.columns.difference(data_load_10_dead.columns)
person_item = localEntityEngine(new_item=True)
person_item.set_label("E21 Person", lang="en")
person_item.write(login)
qid = dict()
query = "PREFIX wdt: <http://{}.wiki.opencura.com/prop/direct/> SELECT ?item ?label WHERE {{?item rdfs:label ?label }}".format(wbstack)
wdi_core.WDItemEngine.execute_sparql_query(query, as_dataframe = True, endpoint=sparql)
for index, row in wdi_core.WDItemEngine.execute_sparql_query(query, as_dataframe = True, endpoint=sparql).iterrows():
qid[row["label"]] =row["item"].replace(entityUri, "")
# # Export_SAF_LUX_10_dead can dance
dataload = data_load_10_dead
for index, row in data_load.iterrows():
statements = []
references = []
good_reference = [{qid["source of information - url"]: None, qid["source of information - text"]: None},
{qid["source of information - text"]: None, qid["source of information - url"]: None},
{qid["source of information - url"]: None},
{qid["source of information - text"]: None}]
if not pd.isnull(row["Sources of information:Url"]):
if row["Sources of information:Url"].startswith("http"):
references.append(wdi_core.WDUrl(value=row["Sources of information:Url"], prop_nr=qid["source of information - url"], is_reference=True))
references.append(wdi_core.WDString(value=row["Sources of information:Text"], prop_nr=qid["source of information - text"], is_reference=True))
# is a person
statements.append(wdi_core.WDItemID(value=qid["E21 Person"], prop_nr=qid['instance of']))
# name qualifiers
qualifiers_name =[]
## type
if row["Heading:Type"] == "1":
qualifiers_name.append(wdi_core.WDItemID(value=qid["indirect name form"], prop_nr=qid["name format"], is_qualifier=True))
elif row["Heading:Type"] == "0":
qualifiers_name.append(wdi_core.WDItemID(value=qid["direct name form"], prop_nr=qid["name format"], is_qualifier=True))
## numeration
if not pd.isnull(row["Heading:Numeration"]):
qualifiers_name.append(wdi_core.WDString(value=row["Heading:Numeration"], prop_nr=qid["numeration"], is_qualifier=True))
## title
if not pd.isnull(row["Heading:Title"]):
qualifiers_name.append(wdi_core.WDString(value=row["Heading:Title"], prop_nr=qid["title"], is_qualifier=True))
# add name
statements.append(wdi_core.WDString(value=row["Heading:Name"], prop_nr=qid["name"], qualifiers=deepcopy(qualifiers_name), references=[deepcopy(references)]))
if not pd.isnull(row["Alternative / other form(s) of name:Name"]):
alternative_names = row["Alternative / other form(s) of name:Name"].split(";")
print(row["Alternative / other form(s) of name:Type"])
an_types = str(row["Alternative / other form(s) of name:Type"]).split(";")
if not pd.isnull(row["Alternative / other form(s) of name:Numeration"]):
an_numerations = row["Alternative / other form(s) of name:Numeration"].split(";")
else:
an_numerations = None
if not pd.isnull(row["Alternative / other form(s) of name:Title"]):
an_titles = row["Alternative / other form(s) of name:Title"].split(";")
print(an_titles)
else:
an_titles = dict()
index = 0
for an in alternative_names:
qualifiers_aname = []
## type
if len(an_types)>0:
if index < len(an_types):
if an_types[index] == "1":
qualifiers_aname.append(wdi_core.WDItemID(value=qid["indirect name form"], prop_nr=qid["name format"], is_qualifier=True))
elif an_types[index] == "0":
qualifiers_aname.append(wdi_core.WDItemID(value=qid["direct name form"], prop_nr=qid["name format"], is_qualifier=True))
## numeration
if not pd.isnull(an_numerations):
if index in an_numerastions.keys():
qualifiers_aname.append(wdi_core.WDString(value=an_numeration[index], prop_nr=qid["numeration"], is_qualifier=True))
if len(an_titles)>0:
if index < len(an_titles):
qualifiers_aname.append(wdi_core.WDString(value=an_titles[index], prop_nr=qid["title"], is_qualifier=True))
# add name
statements.append(wdi_core.WDString(value=deepcopy(an), prop_nr=qid["alternative name"], qualifiers=deepcopy(qualifiers_aname), references=[deepcopy(references)]))
index +=1
#gender
if not pd.isnull(row["Gender:Gender"]):
if str(int(row["Gender:Gender"])) == "1":
statements.append(wdi_core.WDItemID(value=qid["male"], prop_nr=qid["gender"], references=[deepcopy(references)]))
elif str(int(row["Gender:Gender"])) == "2":
statements.append(wdi_core.WDItemID(value=qid["female"], prop_nr=qid["gender"], references=[deepcopy(references)]))
elif str(int(row["Gender:Gender"])) == "0":
statements.append(wdi_core.WDItemID(value=qid["not known"], prop_nr=qid["gender"], references=[deepcopy(references)]))
elif str(int(row["Gender:Gender"])) == "9":
statements.append(wdi_core.WDItemID(value=qid["not applicable"], prop_nr=qid["gender"], references=[deepcopy(references)]))
#birthdate
#statements.append(wdi_core.WDEdtf(value=row["Associated identification information:Birth date"], prop_nr=qid["date of birth"]))
#birthplace
if not pd.isnull(row["Associated identification information:Birth place"]):
statements.append(wdi_core.WDString(value=row["Associated identification information:Birth place"], prop_nr=qid["place of birth"], references=[deepcopy(references)]))
# place of death
if not pd.isnull(row["Associated identification information:Death place"]):
statements.append(wdi_core.WDString(value=row["Associated identification information:Death place"], prop_nr=qid["place of death"], references=[deepcopy(references)]))
# profession
if not pd.isnull(row["Profession:Type"]):
statements.append(wdi_core.WDString(value=row["Profession:Type"], prop_nr=qid["profession"], references=[deepcopy(references)]))
if not pd.isnull(row["Activity:Type"]):
activities = row["Activity:Type"].split(";")
if not pd.isnull(row["Activity:Beginning"]):
activities_start = str(row["Activity:Beginning"]).split(";")
if not pd.isnull(row["Activity:End"]):
activities_start = str(row["Activity:End"]).split(";")
j = 0
for activity in activities:
statements.append(wdi_core.WDString(value=activity, prop_nr=qid["activity"], references=[deepcopy(references)]))
# TODO add qualifiers once EDTF is supported
# internal identifier
statements.append(wdi_core.WDExternalID(value=str(row["Administration fields:Internal standard identifier"]), prop_nr=qid["AFL identifier"], references=[deepcopy(references)]))
# external identifiers
identifiers = str(row["Administration fields:External standard identifier:ID"]).split(";")
sources = str(row["Administration fields:External standard identifier:Name"]).split(";")
if len(identifiers) != len(sources):
print("ERROR: number of identifiers do not match number of sources")
continue
i = 0
for identifier in identifiers:
if sources[i] == "ARK":
statements.append(wdi_core.WDUrl(value=identifier, prop_nr=qid["ARK"], references=[deepcopy(references)]))
elif sources[i] == "VIAF":
statements.append(wdi_core.WDExternalID(value=identifier, prop_nr=qid["VIAF"], references=[deepcopy(references)]))
elif sources[i] == "ISNI":
statements.append(wdi_core.WDExternalID(value=identifier, prop_nr=qid["ISNI"], references=[deepcopy(references)]))
print(identifier, sources[i])
i +=1
print(row["Heading:Name"])
if row["Heading:Name"] not in qid.keys():
item = localEntityEngine(new_item=True, data=statements)
else:
item = localEntityEngine(wd_item_id=qid[row["Heading:Name"]], data=statements, good_refs=good_reference)
item.set_label(row["Heading:Name"], lang="lb")
item.set_label(row["Heading:Name"], lang="de")
item.set_label(row["Heading:Name"], lang="en")
item.set_label(row["Heading:Name"], lang="fr")
qid[row["Heading:Name"]] = item.write(login)
print(row["Heading:Name"], qid[row["Heading:Name"]])
else:
print(row["Heading:Name"], "url does not contain header")
# # ANLux data
# +
import traceback
data_load = data_load_anlux
good_reference = [{qid["source of information - url"]: None, qid["source of information - text"]: None},
{qid["source of information - text"]: None, qid["source of information - url"]: None},
{qid["source of information - url"]: None},
{qid["source of information - text"]: None}]
for index, row in data_load.iterrows():
statements = []
references = []
if not pd.isnull(row["Sources of information:Text"]):
for reference in row["Sources of information:Text"].split('\n'):
m = re.search(r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)', reference)
if m:
print(m.group(0))
references.append(wdi_core.WDUrl(value=m.group(0), prop_nr=qid["source of information - url"], is_reference=True))
else:
references.append(wdi_core.WDString(value=reference, prop_nr=qid["source of information - text"], is_reference=True))
# is a person
statements.append(wdi_core.WDItemID(value=qid["E21 Person"], prop_nr=qid['instance of']))
# name qualifiers
qualifiers_name =[]
## type
if row["Heading:Type"] == "1":
qualifiers_name.append(wdi_core.WDItemID(value=qid["indirect name form"], prop_nr=qid["name format"], is_qualifier=True))
elif row["Heading:Type"] == "0":
qualifiers_name.append(wdi_core.WDItemID(value=qid["direct name form"], prop_nr=qid["name format"], is_qualifier=True))
## numeration
if not pd.isnull(row["Heading:Numeration"]):
qualifiers_name.append(wdi_core.WDString(value=row["Heading:Numeration"], prop_nr=qid["numeration"], is_qualifier=True))
## title
if not pd.isnull(row["Heading:Title"]):
qualifiers_name.append(wdi_core.WDString(value=row["Heading:Title"], prop_nr=qid["title"], is_qualifier=True))
# add name
statements.append(wdi_core.WDString(value=row["Heading:Name"], prop_nr=qid["name"], qualifiers=deepcopy(qualifiers_name), references=[deepcopy(references)]))
if not pd.isnull(row["Alternative / other form(s) of name:Name"]):
alternative_name_statement = []
alternative_names = row["Alternative / other form(s) of name:Name"].split(";")
print(row["Alternative / other form(s) of name:Type"])
an_types = str(row["Alternative / other form(s) of name:Type"]).split(";")
if not pd.isnull(row["Alternative / other form(s) of name:Numeration"]):
an_numerations = row["Alternative / other form(s) of name:Numeration"].split(";")
else:
an_numerations = None
if not pd.isnull(row["Alternative / other form(s) of name:Title"]):
an_titles = row["Alternative / other form(s) of name:Title"].split(";")
print(an_titles)
else:
an_titles = dict()
index = 0
for an in alternative_names:
qualifiers_aname = []
## type
if len(an_types)>0:
if index < len(an_types):
if an_types[index].strip() == "1":
qualifiers_aname.append(wdi_core.WDItemID(value=qid["indirect name form"], prop_nr=qid["name format"], is_qualifier=True))
elif an_types[index].strip() == "0":
qualifiers_aname.append(wdi_core.WDItemID(value=qid["direct name form"], prop_nr=qid["name format"], is_qualifier=True))
## numeration
if not pd.isnull(an_numerations):
if index in an_numerastions.keys():
qualifiers_aname.append(wdi_core.WDString(value=an_numeration[index], prop_nr=qid["numeration"], is_qualifier=True))
if len(an_titles)>0:
if index < len(an_titles):
qualifiers_aname.append(wdi_core.WDString(value=an_titles[index], prop_nr=qid["title"], is_qualifier=True))
# add name
statements.append(wdi_core.WDString(value=deepcopy(an.strip()), prop_nr=qid["alternative name"], qualifiers=deepcopy(qualifiers_aname), references=[deepcopy(references)]))
index +=1
#gender
if not pd.isnull(row["Gender:Gender"]):
if str(int(row["Gender:Gender"])) == "1":
statements.append(wdi_core.WDItemID(value=qid["male"], prop_nr=qid["gender"], references=[deepcopy(references)]))
elif str(int(row["Gender:Gender"])) == "2":
statements.append(wdi_core.WDItemID(value=qid["female"], prop_nr=qid["gender"], references=[deepcopy(references)]))
elif str(int(row["Gender:Gender"])) == "0":
statements.append(wdi_core.WDItemID(value=qid["not known"], prop_nr=qid["gender"], references=[deepcopy(references)]))
elif str(int(row["Gender:Gender"])) == "9":
statements.append(wdi_core.WDItemID(value=qid["not applicable"], prop_nr=qid["gender"], references=[deepcopy(references)]))
#birthdate
#statements.append(wdi_core.WDEdtf(value=row["Associated identification information:Birth date"], prop_nr=qid["date of birth"]))
#birthplace
if not pd.isnull(row["Associated identification information:Birth place"]):
statements.append(wdi_core.WDString(value=row["Associated identification information:Birth place"], prop_nr=qid["place of birth"], references=[deepcopy(references)]))
# place of death
if not pd.isnull(row["Associated identification information:Death place"]):
statements.append(wdi_core.WDString(value=row["Associated identification information:Death place"], prop_nr=qid["place of death"], references=[deepcopy(references)]))
# profession
if not pd.isnull(row["Profession:Type"]):
statements.append(wdi_core.WDString(value=row["Profession:Type"], prop_nr=qid["profession"], references=[deepcopy(references)]))
activities = []
if not pd.isnull(row["Activity:Type"]):
activities = row["Activity:Type"].split(";")
if not pd.isnull(row["Activity:Beginning"]):
activities_start = str(row["Activity:Beginning"]).split(";")
if not pd.isnull(row["Activity:End"]):
activities_start = str(row["Activity:End"]).split(";")
j = 0
for activity in activities:
statements.append(wdi_core.WDString(value=activity, prop_nr=qid["activity"], references=[deepcopy(references)]))
# TODO add qualifiers once EDTF is supported
# internal identifier
statements.append(wdi_core.WDExternalID(value=str(row["Administration fields:Internal standard identifier"]), prop_nr=qid["AFL identifier"], references=[deepcopy(references)]))
# external identifiers
identifiers = str(row["Administration fields:External standard identifier:ID"]).split(";")
sources = str(row["Administration fields:External standard identifier:Name"]).split(";")
if len(identifiers) != len(sources):
print("ERROR: number of identifiers do not match number of sources")
continue
i = 0
for identifier in identifiers:
if sources[i] == "ARK":
statements.append(wdi_core.WDUrl(value=identifier, prop_nr=qid["ARK"], references=[deepcopy(references)]))
elif sources[i] == "VIAF":
statements.append(wdi_core.WDExternalID(value=identifier, prop_nr=qid["VIAF"], references=[deepcopy(references)]))
elif sources[i] == "ISNI":
statements.append(wdi_core.WDExternalID(value=identifier, prop_nr=qid["ISNI"], references=[deepcopy(references)]))
print(identifier, sources[i])
i +=1
print(row["Heading:Name"])
if row["Heading:Name"] not in qid.keys():
item = localEntityEngine(new_item=True, data=statements)
else:
item = localEntityEngine(wd_item_id=qid[row["Heading:Name"]], data=statements, good_refs=good_reference, keep_good_ref_statements=True)
item.set_label(row["Heading:Name"], lang="lb")
item.set_label(row["Heading:Name"], lang="de")
item.set_label(row["Heading:Name"], lang="en")
item.set_label(row["Heading:Name"], lang="fr")
try:
qid[row["Heading:Name"]] = item.write(login)
except:
print("Failed: "+row["Heading:Name"])
print("traceback: "+traceback.print_exc())
print(row["Heading:Name"], qid[row["Heading:Name"]])
# -
import re
for reference in row["Sources of information:Text"].split("\n"):
m = re.search(r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)', reference)
if m:
print(m.group(0))
good_reference
tjoepie = data_load[data_load["Heading:Name"] =="Willibrordus"]
for index, row in data_load.iterrows():
print()
item.get_wd_json_representation()
| import_scripts/example_data/.ipynb_checkpoints/Import example data-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Change visibility status of projects
# +
# #!pip install PyGithub
# -
from github import Github
import sys
import datetime
import pandas as pd
import numpy as np
import random
from collections import Counter
# +
# Load personal access token
with open("/Users/jzk870/Documents/NumericalMethods - Harddisk/token/git_access_token.txt", mode = "r") as file:
token = file.read()
year = "2022"
class_name = "projects-" + year
# -
# ### Load all repositories in this year's class room
# +
# a. Access github through access token. This one has been destroyed by Github for safety reasons.
gh = Github(token)
org = gh.get_organization('NumEconCopenhagen')
all_repos = org.get_repos()
# b. Locate all repositories in current class
current_class = [repo.name for repo in all_repos if class_name in repo.name]
# -
# See this years' repos
for r in current_class:
print(r.removeprefix(class_name+"-"))
# ### Set visibility of all repositories
# +
# c. Use edit command and privat property to set visibility
is_private = False
# Set privacy status
for repo in all_repos:
if repo.name not in current_class:
continue
# Update status
repo.edit(private = is_private)
# +
#test = org.get_repo('projects-2022-crashtest')
#test.edit(private=True)
# -
# ### Check out visibility
# d. Detect status of all repositories
for repo in all_repos:
if repo.name not in current_class:
continue
if repo.private:
s = "private"
else:
s = "public "
print(s + ": " + repo.name.removeprefix(class_name+"-"))
| projects/Change visibility of repos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
import sklearn.datasets
boston_data = sklearn.datasets.load_boston()
X = sklearn.preprocessing.scale(boston_data.data)
y = boston_data.target
kf = KFold(len(X), n_folds=5, shuffle=True, random_state=42)
p_values = np.linspace(1.0, 10.0, num=200)
# +
cv_accuracy = [cross_val_score(estimator=KNeighborsRegressor(n_neighbors=5, weights='distance', p=p_i, metric='minkowski'), X=X, y=y, cv=kf).mean() for p_i in p_values]
best_p = p_values[int(max(cv_accuracy))]
# -
print(best_p)
| week_2/choose_point.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LogisticRegression with Cross Validation
# import libraries
import pandas as pd
from sklearn.model_selection import train_test_split
# data doesn't have headers, so let's create headers
_headers = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'car']
# read in cars dataset
df = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter07/Dataset/car.data', names=_headers, index_col=None)
df.info()
# encode categorical variables
_df = pd.get_dummies(df, columns=['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety'])
_df.head()
# separate features and labels DataFrames
features = _df.drop(['car'], axis=1).values
labels = _df[['car']].values
from sklearn.linear_model import LogisticRegressionCV
model = LogisticRegressionCV(max_iter=2000, multi_class='auto', cv=5)
model.fit(features, labels.ravel())
print(model.score(features, labels.ravel()))
| Chapter07/Exercise7.06/Exercise7.06.ipynb |