code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from owslib.wps import WebProcessingService, monitorExecution, printInputOutput
wps = WebProcessingService(url="http://localhost:8094/wps", verbose=False)
print wps.identification.title
for process in wps.processes:
print '%s : \t %s' % (process.identifier, process.abstract, )
# get infos about process inout
p = wps.describeprocess(identifier='inout')
for input in p.dataInputs:
printInputOutput(input)
for input in p.dataInputs:
print '**************'
print 'identifier:', input.identifier
print 'title:', input.title
print 'abstract:', input.abstract
print 'dataType:', input.dataType
print 'defaultValue:', input.defaultValue
print 'allowedValues:', input.allowedValues
print 'supportedValues:', input.supportedValues
print 'minOccurs:', input.minOccurs
print 'maxOccurs:', input.maxOccurs
| notebooks/pingutests/owslib.wsp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.9 64-bit
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import numpy as np
import seaborn as sns
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.model_selection import cross_validate, cross_val_score, train_test_split
from dsexamples.house_prices import data as df
# ### Question 1
#
# Display the data types of each column using the function dtypes, then take a screenshot and submit it, include your code in the image.
df.info()
# ### Question 2
#
# Drop the columns <code>"id"</code> and <code>"Unnamed: 0"</code> from axis 1 using the method <code>drop()</code>, then use the method <code>describe()</code> to obtain a statistical summary of the data. Take a screenshot and submit it, make sure the <code>inplace</code> parameter is set to <code>True</code>
df.drop(columns=["id", ""], inplace=True)
df.describe()
print("number of NaN values for the column bedrooms :",
df['bedrooms'].isnull().sum())
print("number of NaN values for the column bathrooms :",
df['bathrooms'].isnull().sum())
# +
def replace_nan_by_mean(column):
"""
Replace NAs with the mean of the column over the entire dataset.
"""
avg_col = column.astype("float").mean(axis=0)
print("Average of {0}:".format(column.name), avg_col)
return column.replace(np.nan, avg_col, inplace=True)
replace_nan_by_mean(df['bedrooms'])
replace_nan_by_mean(df['bathrooms'])
# -
print("number of NaN values for the column bedrooms :",
df['bedrooms'].isnull().sum())
print("number of NaN values for the column bathrooms :",
df['bathrooms'].isnull().sum())
# ### Question 3
#
# Use the method <code>value_counts</code> to count the number of houses with unique floor values, use the method <code>.to_frame()</code> to convert it to a dataframe.
df[['floors']].value_counts().to_frame()
# ### Question 4
#
# Use the function <code>boxplot</code> in the seaborn library to determine whether houses with a waterfront view or without a waterfront view have more price outliers.
#
sns.boxplot(x="waterfront", y="price", data=df)
# ### Question 5
#
# Use the function <code>regplot</code> in the seaborn library to determine if the feature <code>sqft_above</code> is negatively or positively correlated with price.
#
sns.regplot(x="sqft_above", y="price", data=df)
df.corr()['price'].sort_values()
# ### Question 6
#
# Fit a linear regression model to predict the <code>'price'</code> using the feature <code>'sqft_living'</code> then calculate the R^2. Take a screenshot of your code and the value of the R^2.
#
X = df[['sqft_living']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
# ### Question 7
#
# Fit a linear regression model to predict the <code>'price'</code> using the list of features:
#
features = ["floors", "waterfront", "lat", "bedrooms", "sqft_basement",
"view", "bathrooms", "sqft_living15", "sqft_above", "grade", "sqft_living"]
X = df[features]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
# ### Question 8
#
# Use the list to create a pipeline object to predict the 'price', fit the object using the features in the list <code>features</code>, and calculate the R^2.
# +
Input = [('scale', StandardScaler()), ('polynomial', PolynomialFeatures(
include_bias=False)), ('model', LinearRegression())]
p=Pipeline(Input, verbose=True)
p.fit(X, Y)
p.score(X,Y)
# -
# ### Question 9
#
# Create and fit a Ridge regression object using the training data, set the regularization parameter to 0.1, and calculate the R^2 using the test data.
#
# +
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.15, random_state=1)
print("number of test samples:", x_test.shape[0])
print("number of training samples:", x_train.shape[0])
lr = Ridge(alpha=0.1)
lr.fit(x_train, y_train)
lr.score(x_test, y_test)
# -
# ### Question 10
#
# Perform a second order polynomial transform on both the training data and testing data. Create and fit a Ridge regression object using the training data, set the regularisation parameter to 0.1, and calculate the R^2 utilising the test data provided. Take a screenshot of your code and the R^2.
#
# +
r = Pipeline([
('poly', PolynomialFeatures(degree=2, include_bias=False)),
('ridge', Ridge(alpha=0.1))
], verbose=True)
r.fit(x_train, y_train)
r.score(x_test, y_test)
| dsexamples/courses/ibm_data_analysis_exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 29 - Diagnosing Breast Cancer
#
# ### The following topics are discussed in this notebook:
# * A complete example of using classificaiton models to diagnose potentially cancerous tumors.
# ## Wisconsion Breast Cancer Dataset
#
# In this example, we will be working with the Wisconsin Breast Cancer Dataset. Each of the 569 observations in this dataset contains 30 measurements taken from images of cell nuclei drawn from a potentially cancerous breast mass. Each observation is labeled as being benign (B) or malignant (M).
#
# Our goal will be to build a model for the purposes of predicting the diagnosis of the tutor using the 30 measurements as features.
# ## Import Packages and Tools
#
# We will begin by importing the packages and tools that we will use in this example.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# -
# ## Load the Data
#
# The data is stored in the coma-delimited file `breast_cancer.csv`. We will load that now.
wbc = pd.read_csv('data/breast_cancer.csv', sep=',')
print(wbc.columns)
# ## Prepare the Data
#
# We will extract the feature and label arrays, and split the dataset into training, validation and testing sets using a 60/20/20 split.
# +
X = wbc.iloc[:,2:].values
y = wbc.iloc[:,1].values
X_train, X_hold, y_train, y_hold = train_test_split(X, y, test_size=0.4, random_state=1, stratify=y)
X_valid, X_test, y_valid, y_test = train_test_split(X_hold, y_hold, test_size=0.5, random_state=1, stratify=y_hold)
print(y_train.shape)
print(y_valid.shape)
print(y_test.shape)
# -
# ## Create Logistic Regression Model
#
# In the cell below, we create a logistic regression model, and then calculate its training and validation accuracy.
# +
logreg_mod = model_2 = LogisticRegression(solver='lbfgs', penalty='none', max_iter=5000)
logreg_mod.fit(X_train, y_train)
print('Training Accuracy: ', round(logreg_mod.score(X_train, y_train),4))
print('Validation Accuracy:', round(logreg_mod.score(X_test, y_test),4))
# -
# ## Create Decision Tree Model
#
# We will now perform hyperparameter tuning to select the optimal value for the `max_depth` parameter for a decision tree.
# +
tr_acc = []
va_acc = []
depth_list = range(1,11)
for d in depth_list:
temp_mod = DecisionTreeClassifier(max_depth=d, random_state=1)
temp_mod.fit(X_train, y_train)
tr_acc.append(temp_mod.score(X_train, y_train))
va_acc.append(temp_mod.score(X_valid, y_valid))
plt.figure(figsize=([6,4]))
plt.plot(depth_list, tr_acc, label='Training Accuracy')
plt.plot(depth_list, va_acc, label='Validation Accuracy')
plt.xlabel('Max Depth')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# -
# It appears that we get the best performance on the validation set when `max_depth=2`. We confirm this below.
ix_best = np.argmax(va_acc)
best_md = depth_list[ix_best]
print('Optimal Value of max_depth:', best_md)
# We will now create and score our decision tree model.
# +
tree_mod = DecisionTreeClassifier(max_depth=best_md, random_state=1)
tree_mod.fit(X_train, y_train)
print('Training Accuracy: ', round(tree_mod.score(X_train, y_train),4))
print('Validation Accuracy:', round(tree_mod.score(X_valid, y_valid),4))
# -
# ## Create Random Forest Model
#
# We will now create a random forest model consisting of 500 trees, each with a `max_depth` of 32.
# +
forest_mod = RandomForestClassifier(n_estimators=500, max_depth=32, random_state=1)
forest_mod.fit(X_train, y_train)
print('Training Accuracy: ', round(forest_mod.score(X_train, y_train),4))
print('Validation Accuracy:', round(forest_mod.score(X_valid, y_valid),4))
# -
# ## Scoring Final Model
#
# The logistic regression model had the highest validation accuracy of any of our models, so we will select it to be our final model. We will now calculate this model's accuracy on the test set.
print('Test Set Accuracy:', round(logreg_mod.score(X_test, y_test),4))
| files/notebooks/dsci_303_503/29 - Breast Cancer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia
# language: julia
# name: julia-1.5
# ---
# # Piecewise linear interpolation
#
# We generate a piecewise linear interpolant of $f(x)=e^{\sin 7x}$.
using FundamentalsNumericalComputation
# +
f = x -> exp(sin(7*x))
plot(f,0,1,label="function",xlabel="x",ylabel="f(x)")
# -
# First we sample the function to create the data.
# +
t = [0, 0.075, 0.25, 0.55, 0.7, 1] # nodes
y = f.(t) # function values
scatter!(t,y,label="nodes")
# -
# Now we create a callable function that will evaluate the piecewise linear interpolant at any $x$.
p = FNC.plinterp(t,y)
plot!(p,0,1,label="interpolant",title="PL interpolation")
| book/localapprox/demos/pwlin-usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Convexity
#
# - convex and nonconvex
# - sets and functions
#
# +
import numpy as np
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# shape copied from matplotlib example
fig, ax = plt.subplots()
Path = mpath.Path
path_data = [
(Path.MOVETO, (1.58, -2.57)),
(Path.CURVE4, (0.35, -1.1)),
(Path.CURVE4, (-1.75, 2.0)),
(Path.CURVE4, (0.375, 2.0)),
(Path.LINETO, (0.85, 1.15)),
(Path.CURVE4, (2.2, 3.2)),
(Path.CURVE4, (3, 0.05)),
(Path.CURVE4, (2.0, -0.5)),
(Path.CLOSEPOLY, (1.58, -2.57)),
]
codes, verts = zip(*path_data)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, facecolor='r', alpha=0.5)
ax.add_patch(patch)
ax.axis('equal')
plt.savefig('nonconvex-set.pdf')
# +
fig, ax = plt.subplots()
Path = mpath.Path
path_data = [
(Path.MOVETO, (0,0)),
(Path.CURVE4, (2,3)),
(Path.CURVE4, (1,6)),
(Path.CURVE4, (-2,5)),
(Path.LINETO, (-3,1)),
(Path.CLOSEPOLY, (0,0)),
]
codes, verts = zip(*path_data)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, facecolor='g', alpha=0.5)
ax.add_patch(patch)
ax.axis('equal')
plt.savefig('convex-set.pdf')
# -
| Loss/convex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setting oil prices in the 70s
# In the 1970s, the OPEP countries adopted a price-linking strategy and oil prices went from €3/barrel to a level above €30/barrel. Analysts foresaw that prices could rise to over €100/barrel by the end of the twentieth century. Unexpectedly however, the cartel failed and prices went down again. Therefore, what are the conditions for these cartels to survive? In other words, what determines the equilibrium between cooperation and competition? This problem is similar to the prisoner’s dilemma.
#
# To simplify matters, it is assumed that only two producers exist, Saudi Arabia and Iran, and that there are only three possible production levels: 2, 4 and 6 million barrels/day. So, depending on the two players’ decisions, total world production is 4, 6, 8, 10 and 12 million barrels/day. Accordingly, world prices are 35, 30, 25, 15 or only 10€/barrel. The production costs of one barrel are considered to be €2 for Saudi Arabia and €4 for Iran.
#
# **a)** The intention is to maximise the difference between the profit of a producer and that of its competitor. Consider and solve the case as if it were a zero-sum game with two players.
#
# **b)** If the idea is to seek OPEP’s prime objective by avoiding mutual competition and regulating oil production, sales and prices, what is the best option for Saudi Arabia and for Iran if they cooperate with each other and try to
# simultaneously obtain the best profit each?
# ## Solution
#
# **a)** Pay-off Matrix:
#
#
# | b) Iran <br> a) Saudi Arabia | 2 million barrels / day | 4 million barrels / day | 6 million barrels/ day | Saudi Arabia <br>Minimax |
# |------------------------- |------------------------- |------------------------- |------------------------ |-------------------------- |
# | 2 million barrels day | 4 | -48 | -80 | -80 |
# | 4 million barrels / day | 60 | 8 | -14 | -14 |
# | 6 million barrels / day | 96 | 34 | 12 | 12 <- |
# | Iran MiniMax | 96 | 34 | 12 <- | |
#
#
# This is a Pure Strategy Game. Thus, even though the dominated strategies are eliminated and the MiniMax approach is used, the production level of 6 million barrels/day will always be more profitable for Saudi Arabia. When both players adopt a dominant strategy, which is a production of 6 million barrels/day in this case, the players gain 48 and 36 million dollars, respectively.
#
# **b)** OPEP's Pay-off Matrix:
#
#
# | b) Iran <br> a) Saudi Arabia | 2 million barrels / day | 4 million barrels / day | 6 million barrels/ day
# |------------------------- |------------------------- |------------------------- |------------------------ |
# | 2 million barrels day | 66/62 | 56/104 | 46/126 |
# | 4 million barrels / day | 112/52 | 92/84 | 52/66 |
# | 6 million barrels / day | 138/42 | 78/44 | 48/36 |
#
# The best option is that which offers the best profit to them both at the same
# time; that is, production of 4 million barrels/day, which implies profits of 92 and 84 million dollars for Saudi Arabia and Iran, respectively.
| docs/source/Game Theory/Solved/Setting oil prices in the 70s (Solved).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#quite a good one - reaching about 0.935
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu', padding='valid', input_shape=INPUT_SHAPE))
m.add(kl.Dropout(0.15))
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='valid' ))
m.add(kl.MaxPool2D())
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='valid' ))
m.add(kl.Dropout(0.3))
m.add(kl.Flatten())
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.5))
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.4))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# -
plot_history(history, start_x_from=20)
# +
# crappy (.907)
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=5, padding='same', activation='relu', input_shape=INPUT_SHAPE))
m.add(kl.Conv2D(filters=30, kernel_size=5, padding='same', activation='relu'))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Conv2D(filters=20, kernel_size=5, padding='same', activation='relu'))
m.add(kl.Conv2D(filters=30, kernel_size=3, activation='relu'))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Conv2D(filters=20, kernel_size=3, padding='same', activation='relu'))
m.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu'))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Flatten())
m.add(kl.Dense(units=80, activation='relu'))
m.add(kl.Dense(units=80, activation='relu'))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# -
plot_history(history, start_x_from=20)
# +
# looks good but seem have too small capacity (0.922)
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu', kernel_regularizer=kl.regularizers.l2(), padding='valid', input_shape=INPUT_SHAPE))
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', kernel_regularizer=kl.regularizers.l2(), padding='valid' ))
m.add(kl.MaxPool2D())
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', kernel_regularizer=kl.regularizers.l2(), padding='valid' ))
m.add(kl.MaxPool2D())
m.add(kl.Dropout(0.3))
m.add(kl.Flatten())
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.5))
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.4))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# -
plot_history(history, start_x_from=100)
# +
# very good (0.935)
# and still overfits - so finetunig the regularization might make it
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu', padding='same', input_shape=INPUT_SHAPE))
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m.add(kl.MaxPool2D())
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m.add(kl.MaxPool2D())
m.add(kl.Dropout(0.3))
m.add(kl.Flatten())
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.5))
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.4))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# -
plot_history(history, start_x_from=40)
# +
# quite poor just about 0.915
# LeNet
# https://www.pyimagesearch.com/2016/08/01/lenet-convolutional-neural-network-in-python/
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=5, padding='same', activation='relu', input_shape=INPUT_SHAPE))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Conv2D(filters=50, kernel_size=5, padding='same', activation='relu'))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Flatten())
m.add(kl.Dense(units=500, activation='relu', kernel_regularizer=kl.regularizers.l2()))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# -
plot_history(history, start_x_from=20)
# +
# just (0.925)
# LeNet
# https://www.pyimagesearch.com/2016/08/01/lenet-convolutional-neural-network-in-python/
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=5, padding='same', activation='relu', input_shape=INPUT_SHAPE))
m.add(kl.Dropout(0.3))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Conv2D(filters=50, kernel_size=5, padding='same', activation='relu'))
m.add(kl.Dropout(0.3))
m.add(kl.MaxPool2D(pool_size=2, strides=2))
m.add(kl.Flatten())
m.add(kl.Dropout(0.3))
m.add(kl.Dense(units=500, activation='relu', kernel_regularizer=kl.regularizers.l2()))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# -
plot_history(history, start_x_from=20)
# +
# very good (0.935)
# and still overfits - so finetunig the regularization might make it
m = keras.Sequential()
m.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu', padding='same', input_shape=INPUT_SHAPE))
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m.add(kl.Dropout(0.2))
m.add(kl.MaxPool2D())
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m.add(kl.Dropout(0.2))
m.add(kl.MaxPool2D(strides=1))
m.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m.add(kl.Dropout(0.2))
m.add(kl.MaxPool2D(strides=1))
m.add(kl.Flatten())
m.add(kl.Dropout(0.3))
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.5))
m.add(kl.Dense(units=100, activation='relu'))
m.add(kl.Dropout(0.4))
m.add(kl.Dense(units=10, activation='softmax'))
m.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m.summary()
epoch = 0
history = {k:[] for k in m.metrics_names}
history.update({'val_'+k:[] for k in m.metrics_names})
# +
# very good (0.935)
# and still overfits - so finetunig the regularization might make it
m2 = keras.Sequential()
m2.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu', padding='same', input_shape=INPUT_SHAPE))
m2.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m2.add(kl.Dropout(0.2))
m2.add(kl.MaxPool2D())
m2.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
m2.add(kl.Dropout(0.15))
m2.add(kl.MaxPool2D())
m2.add(kl.Flatten())
m2.add(kl.Dropout(0.3))
m2.add(kl.Dense(units=100, activation='relu'))
m2.add(kl.Dropout(0.5))
m2.add(kl.Dense(units=100, activation='relu'))
m2.add(kl.Dropout(0.4))
m2.add(kl.Dense(units=10, activation='softmax'))
m2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
m2.summary()
epoch2 = 0
history2 = {k:[] for k in m.metrics_names}
history2.update({'val_'+k:[] for k in m.metrics_names})
# -
avg = avg2 = 0
while True:
while (avg < avg2 + 0.01):
last_history = m.fit(**FIT_SETS, epochs=epoch+1, initial_epoch=epoch, batch_size=500, verbose=0)
for k, v in last_history.history.items():
history[k].append(v[0])
avg = np.mean(history['val_acc'][-6:])
print((STATS_STR + "{nl:} m1 avg: {m1:.4f} | m2 avg: {m2:.4f} | current {curr:}").format(
nl=' '*100, epoch=epoch, **last_history.history, m1=avg, m2=avg2, curr='m1'), end='\r')
epoch += 1
while (avg2 < avg + 0.01):
last_history2 = m2.fit(**FIT_SETS, epochs=epoch2+1, initial_epoch=epoch2, batch_size=500, verbose=0)
for k, v in last_history2.history.items():
history2[k].append(v[0])
avg2 = np.mean(history2['val_acc'][-6:])
print((STATS_STR + "{nl:} m1 avg: {m1:.4f} | m2 avg: {m2:.4f} | current {curr:}").format(
nl=' '*100, epoch=epoch2, **last_history2.history, m1=avg, m2=avg2, curr='m2'), end='\r')
epoch2 += 1
plot_history(history, start_x_from=20)
plot_history(history2, start_x_from=20)
# +
# very good (0.935)
# and still overfits - so finetunig the regularization might make it
ms = keras.Sequential()
ms.add(kl.InputLayer(INPUT_SHAPE))
# 35
ms.add(kl.Dropout(0.1)) # 13 .34
ms.add(kl.Conv2D(filters=20, kernel_size=3, activation='relu', padding='same' ))
ms.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
ms.add(kl.Dropout(0.2))
ms.add(kl.MaxPool2D())
ms.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
ms.add(kl.Dropout(0.2))
ms.add(kl.MaxPool2D(strides=1))
ms.add(kl.Conv2D(filters=40, kernel_size=3, activation='relu', padding='same' ))
ms.add(kl.Dropout(0.2))
ms.add(kl.MaxPool2D(strides=1))
ms.add(kl.Flatten())
ms.add(kl.Dropout(0.35))
ms.add(kl.Dense(units=130, activation='relu'))
ms.add(kl.Dropout(0.5))
ms.add(kl.Dense(units=105, activation='relu'))
ms.add(kl.Dropout(0.4))
ms.add(kl.Dense(units=10, activation='softmax'))
ms.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
ms.summary()
epoch = 0
history = {k:[] for k in ms.metrics_names}
history.update({'val_'+k:[] for k in ms.metrics_names})
# -
hist.history.keys()
# +
from keras.preprocessing.image import ImageDataGenerator
BS = 500
GEN_KWARGS = dict(samplewise_center=True, samplewise_std_normalization=True)
train_gen = ImageDataGenerator(**GEN_KWARGS, horizontal_flip=True)
train_data_flow = train_gen.flow(x_train, y_train, batch_size=BS)
valid_gen = ImageDataGenerator(**GEN_KWARGS)
valid_data_flow = valid_gen.flow(x_valid, y_valid, batch_size=BS)
# -
while epoch < 1000:
last_history = ms.fit_generator(generator=train_data_flow,
validation_data=train_data_flow,
steps_per_epoch=len(x_train) // BS,
epochs=epoch+1, initial_epoch=epoch,
verbose=0)
#ms.fit(**FIT_SETS, epochs=epoch+1, initial_epoch=epoch, batch_size=500, verbose=0)
for k, v in last_history.history.items():
history[k].append(v[0])
print(STATS_STR.format(nl=' '*100, epoch=epoch, **last_history.history), end='\r')
epoch += 1
plot_history(history, start_x_from=10)
test_gen = ImageDataGenerator(**GEN_KWARGS)
test_data_flow = valid_gen.flow(x_test, y_test, batch_size=BS)
ms.evaluate_generator(test_data_flow)
plot_history(history, start_x_from=10)
test_gen = ImageDataGenerator(**GEN_KWARGS)
test_data_flow = valid_gen.flow(x_test, y_test, batch_size=BS)
ms.evaluate_generator(test_data_flow)
plot_history(history, start_x_from=10)
test_gen = ImageDataGenerator(**GEN_KWARGS)
test_data_flow = valid_gen.flow(x_test, y_test, batch_size=BS)
ms.evaluate_generator(test_data_flow)
plot_history(history, start_x_from=10)
test_gen = ImageDataGenerator(**GEN_KWARGS)
test_data_flow = valid_gen.flow(x_test, y_test, batch_size=BS)
ms.evaluate_generator(test_data_flow)
while epoch < 1000:
last_history = ms.fit_generator(generator=train_data_flow,
validation_data=train_data_flow,
steps_per_epoch=len(x_train) // BS,
epochs=epoch+1, initial_epoch=epoch,
verbose=0)
#ms.fit(**FIT_SETS, epochs=epoch+1, initial_epoch=epoch, batch_size=500, verbose=0)
for k, v in last_history.history.items():
history[k].append(v[0])
print(STATS_STR.format(nl=' '*100, epoch=epoch, **last_history.history), end='\r')
epoch += 1
while epoch < 1000:
last_history = ms.fit_generator(generator=train_data_flow,
validation_data=train_data_flow,
steps_per_epoch=len(x_train) // BS,
epochs=epoch+1, initial_epoch=epoch,
verbose=0)
#ms.fit(**FIT_SETS, epochs=epoch+1, initial_epoch=epoch, batch_size=500, verbose=0)
for k, v in last_history.history.items():
history[k].append(v[0])
print(STATS_STR.format(nl=' '*100, epoch=epoch, **last_history.history), end='\r')
epoch += 1
plot_history(history, start_x_from=20)
def build_model(*layers, verbose=False,
optimizer='adam', loss='categorical_crossentropy', metrics=['acc'],
compile_kwargs={}):
model = keras.models.Sequential()
for layer in layers:
model.add(layer)
if verbose:
print("Model summary:")
model.summary()
for kw in ('optimizer', 'loss', 'metrics'):
if not kw in compile_kwargs:
compile_kwargs[kw] = locals()[kw]
model.compile(**compile_kwargs)
return model
# +
# baseline (validation accuracy about 0.90)
# its siblings were reaching accuracy up to 0.92 (and blazingly fast to train)
model_simple = build_model(
kl.InputLayer(INPUT_SHAPE),
kl.Conv2D(filters=6, kernel_size=5, activation='relu', padding='same'),
kl.MaxPool2D(pool_size=2, strides=2),
kl.Conv2D(filters=16, kernel_size=5, activation='relu', padding='valid'),
kl.MaxPool2D(pool_size=2, strides=2),
kl.Flatten(),
kl.Dense(units=120, activation='relu'),
kl.Dense(units=84, activation='relu'),
kl.Dense(units=10, activation='softmax'),
verbose=1
)
#$hist = model_simple.fit(**FIT_SETS, epochs=40, batch_size=500)
hist = model_simple.fit(**FIT_SETS, epochs=40, batch_size=1000)
plot_history(hist, start_x_from=5)
| Intro_to_Deep_Learning/session04/Fashion-MNIST_convolutional-Roznovjak-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.1 64-bit
# language: python
# name: python3
# ---
from luwiji.regularized_regression import illustration, demo
# # Regularization
# Kasih penalty pada weight yang dengan cara menambahkan weight pada loss sehingga
# - weight besar -> loss jadi besar
# - optimizer ingin loss yang kecil -> weight di push untuk kecil
demo.ridge_lasso()
# kalo diliat dari ituannya, l1 itu bisa dipake buat feature selection. mana yang ilang berarti itu feature gak penting. si sckitlearn ada kok feature selection pake l1
# trus gw juga baca td, kalo optimizer itu tuning. nih bisa baca disini nih <br> https://scikit-optimize.github.io/stable/ . gw juga baru tau bisa di visualisasiin itu hasil tuning
# # Mana yang lebih baik?
# Feature engineering tetap yang terbaik, kalau kita pakai Poly(3) tanpa regularization, score nya bisa 0.98. Hanya saja most of the time, kita tidak tahu harus pakai Poly berapa sehingga akan lebih practical kalau menggunakan regularization
#
# L1 lebih sering digunakan untuk feature selection (sparsity)<br>
# L2 lebih sering digunakan untuk regularization (simplicity)
# ## Alternatif: ElasticNet
illustration.elasticnet
demo.elastic_net()
# # Code in scikit-learn
#
# ### Regression + L1 regularization = LASSO Regression
#
# ```
# model = Pipeline([
# ("poly", PolynomialFeatures(10)),
# ("lr", Lasso(0.01))
# ])
# model.fit(X_train, y_train)
# ```
#
# ### Regression + L2 regularization = Ridge Regression
#
# ```
# model = Pipeline([
# ("poly", PolynomialFeatures(10)),
# ("lr", Ridge(0.01))
# ])
# model.fit(X_train, y_train)
# ```
#
# ### Regression + L1 + L2 = ElasticNet Regression
#
# ```
# model = Pipeline([
# ("poly", PolynomialFeatures(10)),
# ("lr", ElasticNet(0.01))
# ])
# model.fit(X_train, y_train)
# ```
| 08 - Regression Evaluation/Part 8 - Regression Coefficient and Regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import scipy as sc
import random as rand
from sklearn import preprocessing, linear_model
import matplotlib.pyplot as plt
from core.controllers import PDController
from core.dynamics import LinearSystemDynamics, ConfigurationDynamics
from koopman_core.controllers import OpenLoopController, MPCController,BilinearFBLinController, PerturbedController, LinearLiftedController
from koopman_core.dynamics import LinearLiftedDynamics, BilinearLiftedDynamics
from koopman_core.learning import Edmd, FlBilinearLearner
from koopman_core.basis_functions import PolySineBasis
from koopman_core.learning.utils import differentiate_vec
from koopman_core.systems import PlanarQuadrotorForceInput
class QuadrotorPdOutput(ConfigurationDynamics):
def __init__(self, dynamics, xd, t_d, n, m):
ConfigurationDynamics.__init__(self, dynamics, 1)
self.xd = xd
self.t_d = t_d
self.xd_dot = differentiate_vec(self.xd, self.t_d)
self.n = n
self.m = m
def proportional(self, x, t):
q, q_dot = x[:int(n/2)], x[int(n/2):]
return self.y(q) - self.y_d(t)
def derivative(self, x, t):
q, q_dot = x[:int(n/2)], x[int(n/2):]
return self.dydq(q)@q_dot - self.y_d_dot(t)
def y(self, q):
return q
def dydq(self, q):
return np.eye(int(self.n/2))
def d2ydq2(self, q):
return np.zeros((int(self.n/2), int(self.n/2), int(self.n/2)))
def y_d(self, t):
return self.desired_state_(t)[:int(self.n/2)]
def y_d_dot(self, t):
return self.desired_state_(t)[int(self.n/2):]
def y_d_ddot(self, t):
return self.desired_state_dot_(t)[int(self.n/2):]
def desired_state_(self, t):
return [np.interp(t, self.t_d.flatten(),self.xd[:,ii].flatten()) for ii in range(self.xd.shape[1])]
def desired_state_dot_(self, t):
return [np.interp(t, self.t_d.flatten(),self.xd_dot[:,ii].flatten()) for ii in range(self.xd_dot.shape[1])]
class QuadrotorTrajectoryOutput(ConfigurationDynamics):
def __init__(self, bilinear_dynamics, y_d, y_d_dot, y_d_ddot, dt, z_d=None, z_d_dot=None, z_d_ddot=None, C_h=None):
ConfigurationDynamics.__init__(self, bilinear_dynamics, 2)
self.bilinear_dynamics = bilinear_dynamics
self.ref = y_d
self.ref_dot = y_d_dot
self.ref_ddot = y_d_ddot
self.ref_z = z_d
self.ref_dot_z = z_d_dot
self.ref_ddot_z = z_d_ddot
self.C_h = C_h
self.dt = dt
self.t_d = self.dt * np.arange(0, self.ref.shape[1])
def eval_z(self, x, t):
z = self.bilinear_dynamics.phi_fun(x.reshape(1,-1)).squeeze()
return z - self.z_d(t)
def y(self, q):
return q
def dydq(self, q):
return np.array([[1, 0], [0, 1]])
def d2ydq2(self, q):
return np.zeros((1, 2, 2))
def y_d(self, t):
return self.interpolate_ref_(self.ref, t)
def y_d_dot(self, t):
return self.interpolate_ref_(self.ref_dot, t)
def y_d_ddot(self, t):
return self.interpolate_ref_(self.ref_ddot, t)
def z_d(self, t):
return self.interpolate_ref_(self.ref_z, t)
def z_d_dot(self, t):
return self.interpolate_ref_(self.ref_dot_z, t)
def z_d_ddot(self, t):
return self.interpolate_ref_(self.ref_ddot_z, t)
def interpolate_ref_(self, ref, t):
return np.array([np.interp(t, self.t_d, ref[ii, :]) for ii in range(ref.shape[0])])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Planar Quadrotor Example
# -
# Consider a planar quadrotor with states $\mathbf{x} = [y \, z \, \theta \, \dot{y} \, \dot{z} \, \dot{\theta}]^T$ and continuous-time dynamics
#
# \begin{equation}
# \begin{bmatrix} \ddot{y} \\ \ddot{z} \\ \ddot{\theta} \end{bmatrix}
# = \begin{bmatrix}
# 0\\-g\\0
# \end{bmatrix} +
# \begin{bmatrix}
# -\frac{1}{m}\text{sin}\theta & -\frac{1}{m}\text{sin}\theta\\
# \frac{1}{m}\text{cos}\theta & \frac{1}{m}\text{cos}\theta\\
# -\frac{l_{arm}}{I_{xx}} & \frac{l_{arm}}{I_{xx}}
# \end{bmatrix}
# \begin{bmatrix}
# T_1 \\ T_2
# \end{bmatrix}
# \end{equation}
#
# where $y,z$ describe the position of the vehicle in a fixed reference frame, $\theta$ is the orientation of the vehicle,
# $T_1, T_2$ are the thrust from each of the propellers, $g$ is the gravitational acceleration, $m$ is the vehicle mass,
# $l_{arm}$ is the distance from the vehicle's center of mass to the center of the propeller, and $I_{xx}$ is the inertia
# around the x-axis.
# + pycharm={"name": "#%%\n"}
# Cart pole system parameters
mass = 2.
inertia = 1.
prop_arm = 0.2
gravity = 9.81
quadrotor = PlanarQuadrotorForceInput(mass, inertia, prop_arm, g=gravity)
# Linearized system specification:
n, m = 6, 2 # Number of states, number of control inputs
A_nom = np.array([[0., 0., 0., 1., 0., 0.], # Linearization of the true system around the origin
[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., -gravity, 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.]])
B_nom = np.array([[0., 0.], # Linearization of the true system around the origin
[0., 0.],
[0., 0.],
[0., 0.],
[1./mass, 1./mass],
[-prop_arm/inertia, prop_arm/inertia]])
hover_thrust = mass*gravity/m
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Collect data for learning
# + [markdown] pycharm={"name": "#%% md\n"}
# To collect data, a nominal controller is designed with LQR on the dynamics's linearization around hover. However, any
# controller can be used and the method does not require the knowledge of model's linearization. In addition, a
# exploratory white noise is added to the controller to ensure that the data is sufficiently excited. Note that the system
# is underactuated and that trajectory optimization is necessary to control the position of the vehicle. We use a
# simplified trajectory generator based on a model predictive controller for the linearized dynamics. More careful design
# of the desired trajectory may be necessary for more demanding applications and this is readily compatible with our method.
#
#
# + pycharm={"name": "#%%\n"}
q_dc, r_dc = 1e2, 1 # State and actuation penalty values, data collection
Q_dc = q_dc * np.identity(n) # State penalty matrix, data collection
R_dc = r_dc*np.identity(m) # Actuation penalty matrix, data collection
P_dc = sc.linalg.solve_continuous_are(A_nom, B_nom, Q_dc, R_dc) # Algebraic Ricatti equation solution, data collection
K_dc = np.linalg.inv(R_dc)@B_nom.T@P_dc # LQR feedback gain matrix, data collection
K_dc_p = K_dc[:,:int(n/2)] # Proportional control gains, data collection
K_dc_d = K_dc[:,int(n/2):] # Derivative control gains, data collection
nominal_sys = LinearSystemDynamics(A=A_nom, B=B_nom)
# Data collection parameters:
dt = 1.0e-2 # Time step length
traj_length_dc = 2. # Trajectory length, data collection
n_pred_dc = int(traj_length_dc/dt) # Number of time steps, data collection
t_eval = dt * np.arange(n_pred_dc + 1) # Simulation time points
n_traj_dc = 50 # Number of trajectories to execute, data collection
noise_var = 1. # Exploration noise to perturb controller, data collection
xmax = np.array([2, 2, np.pi/3, 2.,2.,2.]) # State constraints, trajectory generation
xmin = -xmax
umax = np.array([50., 50.]) - hover_thrust # Actuation constraint, trajectory generation
umin = np.array([0., 0.]) - hover_thrust
x0_max = np.array([xmax[0], xmax[1], xmax[2], 1., 1., 1.]) # Initial value limits
Q_trajgen = sc.sparse.diags([0,0,0,0,0,0]) # State penalty matrix, trajectory generation
QN_trajgen = sc.sparse.diags([5e1,5e1,5e1,1e1,1e1,1e1]) # Final state penalty matrix, trajectory generation
R_trajgen = sc.sparse.eye(m) # Actuation penalty matrix, trajectory generation
sub_sample_rate = 5 # Rate to subsample data for training
model_fname = 'examples/planar_quad_models' # Path to save learned models
n_cols = 10 # Number of columns in training data plot
# + pycharm={"name": "#%%\n"}
xd = np.empty((n_traj_dc, n_pred_dc + 1, n))
xs = np.empty((n_traj_dc, n_pred_dc + 1, n))
us = np.empty((n_traj_dc, n_pred_dc, m))
plt.figure(figsize=(12, 12 * n_traj_dc / (n_cols ** 2)))
for ii in range(n_traj_dc):
x0 = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
set_pt_dc = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
mpc_trajgen = MPCController(nominal_sys, n_pred_dc, dt, umin, umax, xmin, xmax, QN_trajgen, R_trajgen,
QN_trajgen, set_pt_dc)
mpc_trajgen.eval(x0, 0)
xd[ii, :, :] = mpc_trajgen.parse_result().T
while abs(x0[0]) < 1.25 or np.any(np.isnan(xd[ii, :, :])):
x0 = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
set_pt_dc = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
mpc_trajgen = MPCController(nominal_sys, n_pred_dc, dt, umin, umax, xmin, xmax, QN_trajgen, R_trajgen,
QN_trajgen, set_pt_dc)
mpc_trajgen.eval(x0, 0)
xd[ii, :, :] = mpc_trajgen.parse_result().T
output = QuadrotorPdOutput(quadrotor, xd[ii, :, :], t_eval, n, m)
pd_controller = PDController(output, K_dc_p, K_dc_d)
perturbed_pd_controller = PerturbedController(quadrotor, pd_controller, noise_var, const_offset=hover_thrust)
xs[ii, :, :], us[ii, :, :] = quadrotor.simulate(x0, perturbed_pd_controller, t_eval)
plt.subplot(int(np.ceil(n_traj_dc / n_cols)), n_cols, ii + 1)
plt.plot(t_eval, xs[ii, :, 0], 'b', label='$y$')
plt.plot(t_eval, xs[ii, :, 1], 'g', label='$z$')
plt.plot(t_eval, xs[ii, :, 2], 'r', label='$\\theta$')
plt.plot(t_eval, xd[ii, :, 0], '--b', label='$y_d$')
plt.plot(t_eval, xd[ii, :, 1], '--g', label='$z_d$')
plt.plot(t_eval, xd[ii, :, 2], '--r', label='$\\theta_d$')
plt.suptitle(
'Training data \nx-axis: time (sec), y-axis: state value, $x$ - blue, $xd$ - dotted blue, $\\theta$ - red, $\\theta_d$ - dotted red',
y=0.94)
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Learn a linear model with dynamic mode decomposition (DMD)
# + [markdown] pycharm={"name": "#%% md\n"}
# To compare our method with existing techniques, we first learn a linear state space model from data. This is dubbed
# dynamic mode decomposition. I.e. we use linear regression with LASSO regularization to learn an approximate linear model
# with model structure
#
# \begin{equation}
# \mathbf{\dot{x}} = A_{dmd}\mathbf{x} + B_{dmd}\mathbf{u}
# \end{equation}
# + pycharm={"name": "#%%\n"}
#DMD parameters:
alpha_dmd = 1.4e-2 # Regularization strength (LASSO) DMD
tune_mdl_dmd = False
# + pycharm={"name": "#%%\n"}
basis = lambda x: x
C_dmd = np.eye(n)
optimizer_dmd = linear_model.MultiTaskLasso(alpha=alpha_dmd, fit_intercept=False, selection='random')
cv_dmd = linear_model.MultiTaskLassoCV(fit_intercept=False, n_jobs=-1, cv=3, selection='random')
standardizer_dmd = preprocessing.StandardScaler(with_mean=False)
model_dmd = Edmd(n, m, basis, n, n_traj_dc, optimizer_dmd, cv=cv_dmd, standardizer=standardizer_dmd, C=C_dmd, first_obs_const=False)
xdmd, y_dmd = model_dmd.process(xs, us-hover_thrust, np.tile(t_eval,(n_traj_dc,1)), downsample_rate=sub_sample_rate)
model_dmd.fit(xdmd, y_dmd, cv=tune_mdl_dmd, override_kinematics=True)
sys_dmd = LinearLiftedDynamics(model_dmd.A, model_dmd.B, model_dmd.C, model_dmd.basis)
if tune_mdl_dmd:
print('$\\alpha$ DMD: ',model_dmd.cv.alpha_)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Learn a lifted linear model with extended dynamic mode decomposition (EDMD)
# + [markdown] pycharm={"name": "#%% md\n"}
# In addition, we compare our method with the current state of the art of Koopman based learning, the extended dynamic mode
# decomposition. We use a dictionary of nonlinear functions $\boldsymbol{\phi(x)}$ to lift the state variables and learn a lifted state space model
# of the dynamics. I.e. we first lift and then use linear regression with LASSO regularization to learn an approximate
# lifted linear model with model structure
#
# \begin{equation}
# \mathbf{\dot{z}} = A_{edmd}\mathbf{z} + B_{edmd}\mathbf{u}, \qquad \mathbf{z} = \boldsymbol{\phi(x)}
# \end{equation}
# + pycharm={"name": "#%%\n"}
#EDMD parameters:
alpha_edmd = 1.1e-1 # Regularization strength (LASSO) EDMD
tune_mdl_edmd = False
# + pycharm={"name": "#%%\n"}
basis = PolySineBasis(n, poly_deg=2, cross_terms=False)
basis.construct_basis()
poly_sine_features = preprocessing.FunctionTransformer(basis.basis)
poly_sine_features.fit(np.zeros((1,n)))
n_lift_edmd = poly_sine_features.transform((np.zeros((1,n)))).shape[1]
C_edmd = np.zeros((n,n_lift_edmd))
C_edmd[:,1:n+1] = np.eye(n)
optimizer_edmd = linear_model.MultiTaskLasso(alpha=alpha_edmd, fit_intercept=False, selection='random')
cv_edmd = linear_model.MultiTaskLassoCV(fit_intercept=False, n_jobs=-1, cv=3, selection='random')
standardizer_edmd = preprocessing.StandardScaler(with_mean=False)
model_edmd = Edmd(n, m, basis.basis, n_lift_edmd, n_traj_dc, optimizer_edmd, cv=cv_edmd, standardizer=standardizer_edmd, C=C_edmd)
X_edmd, y_edmd = model_edmd.process(xs, us-hover_thrust, np.tile(t_eval,(n_traj_dc,1)), downsample_rate=sub_sample_rate)
model_edmd.fit(X_edmd, y_edmd, cv=tune_mdl_edmd, override_kinematics=True)
model_edmd.reduce_mdl()
sys_edmd = LinearLiftedDynamics(model_edmd.A, model_edmd.B, model_edmd.C, model_edmd.basis_reduced)
if tune_mdl_edmd:
print('$\\alpha$ EDMD: ',model_edmd.cv.alpha_)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Learn a lifted bilinear model with bilinear extended mode decomposition (bEDMD)
# + [markdown] pycharm={"name": "#%% md\n"}
# Finally, we use the method developed in the paper to learn a lifted bilinear model of the dynamics, dubbed bilinear
# extended mode decomposition (bEDMD). I.e. we first lift and then use linear regression with LASSO regularization to learn an approximate
# lifted linear model with model structure
#
# \begin{equation}
# \mathbf{\dot{z}}=F\mathbf{z}+\sum_{i=1}^m G_i\mathbf{z}\mathbf{u}_i, \qquad \mathbf{z} = \boldsymbol{\phi(x)}
# \end{equation}
# + pycharm={"name": "#%%\n"}
#Bilinear EDMD parameters:
alpha_bedmd_init = 1.9e-2 # Regularization strength (LASSO) bEDMD
alpha_bedmd = 1.9e-2
tune_mdl_bedmd = False
# + pycharm={"name": "#%%\n"}
n_lift_bedmd = n_lift_edmd
output_inds = np.array([1, 2]) # Output states, feedback linearizing controller
C_x_bedmd = np.zeros((n, n_lift_bedmd))
C_x_bedmd[:, 1:n + 1] = np.eye(n)
C_h_bedmd = C_x_bedmd[output_inds, :]
basis_bedmd = lambda x: poly_sine_features.transform(x)
optimizer_bedmd = linear_model.MultiTaskLasso(alpha=alpha_bedmd_init, fit_intercept=False, selection='random')
cv_bedmd = linear_model.MultiTaskLassoCV(fit_intercept=False, n_jobs=-1, cv=3, selection='random')
standardizer_bedmd = preprocessing.StandardScaler(with_mean=False)
model_bedmd = FlBilinearLearner(n, m, basis_bedmd, n_lift_bedmd, n_traj_dc, optimizer_bedmd, C_h_bedmd,
cv=cv_bedmd, standardizer=standardizer_bedmd, C=C_x_bedmd)
X_bedmd, y_bedmd = model_bedmd.process(xs, us - hover_thrust, np.tile(t_eval, (n_traj_dc, 1)),
downsample_rate=sub_sample_rate)
model_bedmd.fit(X_bedmd, y_bedmd, cv=tune_mdl_bedmd, override_kinematics=True, l1_reg=alpha_bedmd)
sys_bedmd = BilinearLiftedDynamics(model_bedmd.n_lift, m, model_bedmd.A, model_bedmd.B, model_bedmd.C,
model_bedmd.basis)
if tune_mdl_bedmd:
print('$\\alpha$ bilinear EDMD: ', model_bedmd.cv.alpha_)
# -
# ### Evaluate open loop prediction performance
# + [markdown] pycharm={"name": "#%% md\n"}
# We first evaluate the open loop prediction performance of the proposed method.
# This is done by generating a new data set in the same way as the training set, predicting the evolution of the system
# with the control sequence of each trajectory executed in the data set with each of the models, and finally comparing
# the mean and standard deviation of the error between the true and predicted evolution over the trajectories. The
# experimental results support what is to be expected from the theory as the error in the $y$ and $z$ terms are
# significantly lower for the bEDMD method than both DMD and EDMD. The reason for this
# improvement is that the bEDMD method can capture the nonlinearities present in the actuation matrix of the
# $(y,z)$-dynamics.
# + pycharm={"name": "#%%\n"}
# Prediction performance evaluation parameters:
folder_plots = 'examples/figures/' # Path to save plots
n_traj_ol = 100 # Number of trajectories to execute, open loop
# + pycharm={"name": "#%%\n"}
xs_ol = np.empty((n_traj_ol, t_eval.shape[0], n))
xs_dmd_ol = np.empty((n_traj_ol, t_eval.shape[0]-1, n))
xs_edmd_ol = np.empty((n_traj_ol, t_eval.shape[0]-1, n))
xs_bedmd_ol = np.empty((n_traj_ol, t_eval.shape[0]-1, n))
us_test = np.empty((n_traj_ol, t_eval.shape[0]-1, m))
for ii in range(n_traj_ol):
x0 = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
set_pt_dc = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
mpc_trajgen = MPCController(nominal_sys, n_pred_dc, dt, umin, umax, xmin, xmax, QN_trajgen, R_trajgen,
QN_trajgen, set_pt_dc)
mpc_trajgen.eval(x0, 0)
xd = mpc_trajgen.parse_result().T
while xd[0,0] is None:
x0 = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
set_pt_dc = np.asarray([rand.uniform(l, u) for l, u in zip(-x0_max, x0_max)])
mpc_trajgen = MPCController(nominal_sys, n_pred_dc, dt, umin, umax, xmin, xmax, QN_trajgen, R_trajgen,
QN_trajgen, set_pt_dc)
mpc_trajgen.eval(x0, 0)
xd = mpc_trajgen.parse_result().T
output = QuadrotorPdOutput(quadrotor, xd, t_eval, n, m)
pd_controller = PDController(output, K_dc_p, K_dc_d)
perturbed_pd_controller = PerturbedController(quadrotor, pd_controller, noise_var, const_offset=mass * gravity / 2)
xs_ol[ii,:,:], us_test[ii,:,:] = quadrotor.simulate(x0, perturbed_pd_controller, t_eval)
ol_controller_nom = OpenLoopController(sys_bedmd, us_test[ii,:,:]-hover_thrust, t_eval[:-1])
xs_dmd_ol[ii,:,:], _ = sys_dmd.simulate(x0, ol_controller_nom, t_eval[:-1])
z_0_edmd = sys_edmd.phi_fun(np.atleast_2d(x0)).squeeze()
zs_edmd_tmp, _ = sys_edmd.simulate(z_0_edmd, ol_controller_nom, t_eval[:-1])
xs_edmd_ol[ii,:,:] = np.dot(model_edmd.C, zs_edmd_tmp.T).T
z_0_bedmd = sys_bedmd.phi_fun(np.atleast_2d(x0)).squeeze()
zs_bedmd_tmp, _ = sys_bedmd.simulate(z_0_bedmd, ol_controller_nom, t_eval[:-1])
xs_bedmd_ol[ii,:,:] = np.dot(model_bedmd.C, zs_bedmd_tmp.T).T
error_dmd = xs_ol[:,:-1,:] - xs_dmd_ol
error_dmd_mean = np.mean(error_dmd, axis=0).T
error_dmd_std = np.std(error_dmd, axis=0).T
mse_dmd = np.mean(np.mean(np.mean(np.square(error_dmd))))
error_edmd = xs_ol[:,:-1,:] - xs_edmd_ol
error_edmd_mean = np.mean(error_edmd, axis=0).T
error_edmd_std = np.std(error_edmd, axis=0).T
mse_edmd = np.mean(np.mean(np.mean(np.square(error_edmd))))
error_bedmd = xs_ol[:,:-1,:] - xs_bedmd_ol
error_bedmd_mean = np.mean(error_bedmd, axis=0).T
error_bedmd_std = np.std(error_bedmd, axis=0).T
mse_bedmd = np.mean(np.mean(np.mean(np.square(error_bedmd))))
print('\nOpen loop performance statistics:')
print(' MSE DMD: ', "{:.3f}".format(mse_dmd),
'\n MSE EDMD: ', "{:.3f}".format(mse_edmd),
'\n MSE bEDMD: ', "{:.3f}".format(mse_bedmd))
print(' Improvement DMD -> EDMD: ', "{:.2f}".format((1 - mse_edmd / mse_dmd) * 100), ' %'
'\n Improvement DMD -> bEDMD: ', "{:.2f}".format((1 - mse_bedmd / mse_dmd) * 100), ' %'
'\n Improvement EDMD -> bEDMD: ', "{:.2f}".format((1 - mse_bedmd / mse_edmd) * 100), ' %')
# + pycharm={"name": "#%%\n"}
import matplotlib.pyplot as plt
import matplotlib
figwidth = 12
lw = 2
fs = 16
y_lim_gain = 1.2
#Plot open loop results:
ylabels = ['$e_{y}$', '$e_z$', '$e_{\\theta}$']
plt.figure(figsize=(figwidth,3))
for ii in range(3):
plt.subplot(1,3,ii+1)
plt.plot(t_eval[:-1], error_dmd_mean[ii,:], linewidth=lw, label='DMD')
plt.fill_between(t_eval[:-1], error_dmd_mean[ii,:] - error_dmd_std[ii,:], error_dmd_mean[ii,:] + error_dmd_std[ii,:], alpha=0.2)
plt.plot(t_eval[:-1], error_edmd_mean[ii, :], linewidth=lw, label='EDMD')
plt.fill_between(t_eval[:-1], error_edmd_mean[ii, :] - error_edmd_std[ii, :],error_edmd_mean[ii, :] + error_edmd_std[ii, :], alpha=0.2)
plt.plot(t_eval[:-1], error_bedmd_mean[ii, :], linewidth=lw, label='bEDMD')
plt.fill_between(t_eval[:-1], error_bedmd_mean[ii, :] - error_bedmd_std[ii, :],error_bedmd_mean[ii, :] + error_bedmd_std[ii, :], alpha=0.2)
ylim = max(max(np.abs(error_bedmd_mean[ii, :] - error_bedmd_std[ii, :])), max(np.abs(error_bedmd_mean[ii, :] + error_bedmd_std[ii, :])))
plt.ylim([-ylim * y_lim_gain, ylim * y_lim_gain])
plt.xlabel('$t$ (sec)', fontsize=fs)
plt.ylabel(ylabels[ii], fontsize=fs)
plt.grid()
plt.legend(loc='upper left', fontsize=fs-4)
suptitle = plt.suptitle('Open loop prediction error of DMD, EDMD and bilinear EDMD models', y=1.05, fontsize=18)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.tight_layout()
plt.savefig(folder_plots + 'planar_quad_prediction.pdf', format='pdf', dpi=2400, bbox_extra_artists=(suptitle,), bbox_inches="tight")
plt.show()
# -
# ### Evaluate closed loop performance
# + [markdown] pycharm={"name": "#%% md\n"}
# We now study the closed loop performance of the control design. A trajectory is designed
# to move the system from $(y_0,z_0) = (-1,0)$ to $(y_f,z_f) = (1,1)$. Then, the feedback linearizing controller is
# designed to track the trajectory in the $z,\theta$ coordinates. These coordinates are chosen to avoid singularities
# present in the $y,z$ coordinates. Comparing the closed loop performance of the feedback linearization based on the
# bEDMD with LQR controllers designed using the DMD and EDMD models, we observe that the trajectory tracking error is
# significantly reduced while expending only somewhat more control effort.
# + pycharm={"name": "#%%\n"}
#Closed loop performance evaluation parameters:
x0_cl = np.array([-1., 0., 0., 0., 0., 0.]) # Initial value, closed loop trajectory
set_pt_cl = np.array([1., 1., 0., 0., 0., 0.]) # Desired final value, closed loop trajectory
t_eval_cl = dt * np.arange(201) # Simulation time points, closed loop
Q_trajgen_cl = sc.sparse.diags([0,0,0,0,0,0]) # State penalty matrix, trajectory generation
QN_trajgen_cl = sc.sparse.diags([3e2,3e2,3e2,1e2,1e2,1e2]) # Final state penalty matrix, trajectory generation
R_trajgen_cl = sc.sparse.eye(m) # Actuation penalty matrix, trajectory generation
mpc_trajgen_cl = MPCController(nominal_sys,t_eval_cl.size,dt,umin,umax,xmin,xmax,QN_trajgen_cl,R_trajgen_cl,QN_trajgen_cl,set_pt_cl)
q_cl, r_cl = 2e1, 1 # State and actuation penalty values, closed loop
output_inds = np.array([1, 2]) # Output states, feedback linearizing controller
# Generate trajectory:
mpc_trajgen_cl.eval(x0_cl, 0)
xr_cl = mpc_trajgen_cl.parse_result()[:,:-1]
ur_cl = mpc_trajgen_cl.get_control_prediction()
xr_cl_dot = nominal_sys.eval_dot(xr_cl,ur_cl,0.)
# Define outputs:
y_d = xr_cl[output_inds,:]
y_d_dot = xr_cl[output_inds+int(n/2),:]
y_d_ddot = xr_cl_dot[output_inds+int(n/2),:]
# -
# ##### Design LQR controller based on DMD model
# + pycharm={"name": "#%%\n"}
Q_dmd = q_cl*np.identity(n)
R_dmd = r_cl*np.identity(m)
P_dmd = sc.linalg.solve_continuous_are(model_dmd.A, model_dmd.B, Q_dmd, R_dmd)
K_dmd = np.linalg.inv(R_dmd)@model_dmd.B.T@P_dmd
K_dmd_p, K_dmd_d = K_dmd[:,:int(n/2)], K_dmd[:,int(n/2):]
output_dmd = QuadrotorPdOutput(sys_dmd, xr_cl.T, t_eval_cl, n, m)
controller_dmd = PDController(output_dmd, K_dmd_p, K_dmd_d)
controller_dmd = PerturbedController(sys_dmd,controller_dmd,0.,const_offset=hover_thrust, umin=umin, umax=umax)
# -
# ##### Design LQR controller based on EDMD model
# + pycharm={"name": "#%%\n"}
z_d_edmd = np.array([sys_edmd.phi_fun(x.reshape(1,-1)).squeeze() for x in xr_cl.T]).T
z_d_dot_edmd = differentiate_vec(z_d_edmd.T, t_eval_cl).T
z_d_ddot_edmd = differentiate_vec(z_d_dot_edmd.T, t_eval_cl).T
output_edmd = QuadrotorTrajectoryOutput(sys_edmd, y_d, y_d_dot, y_d_ddot, dt, z_d_edmd, z_d_dot_edmd, z_d_ddot_edmd, model_edmd.C[output_inds,:])
Q_edmd = q_cl*np.identity(sys_edmd.n)
R_edmd = r_cl*np.identity(m)
P_edmd = sc.linalg.solve_continuous_are(sys_edmd.A, sys_edmd.B, Q_edmd, R_edmd)
K_edmd = np.linalg.inv(R_edmd)@model_edmd.B.T@P_edmd
controller_edmd = LinearLiftedController(output_edmd, K_edmd)
controller_edmd = PerturbedController(quadrotor, controller_edmd,0.,const_offset=hover_thrust, umin=umin, umax=umax)
# -
# ##### Design feedback linearizing controller based on bEDMD model
# + pycharm={"name": "#%%\n"}
k = m
n_lift_bedmd = sys_bedmd.n
Q_bedmd = q_cl*np.eye(int(2*n_lift_bedmd))
R_bedmd = r_cl*np.eye(n_lift_bedmd)
C_h = model_bedmd.C[output_inds,:]
z_d_bedmd = np.array([sys_bedmd.phi_fun(x.reshape(1,-1)).squeeze() for x in xr_cl.T]).T
z_d_dot_bedmd = differentiate_vec(z_d_bedmd.T, t_eval_cl).T
z_d_ddot_bedmd = differentiate_vec(z_d_dot_bedmd.T, t_eval_cl).T
output_bedmd = QuadrotorTrajectoryOutput(sys_bedmd, y_d, y_d_dot, y_d_ddot, dt, z_d_bedmd, z_d_dot_bedmd, z_d_ddot_bedmd, C_h)
f_eta = np.concatenate((np.zeros((n_lift_bedmd,n_lift_bedmd)), np.eye(n_lift_bedmd)), axis=1)
f_eta_dot = np.concatenate((sys_bedmd.F@sys_bedmd.F, np.zeros((n_lift_bedmd,n_lift_bedmd))), axis=1)
F_lin = np.concatenate((f_eta, f_eta_dot), axis=0)
G_lin = np.concatenate((np.zeros((n_lift_bedmd,n_lift_bedmd)), np.eye(n_lift_bedmd)), axis=0)
P_bedmd = sc.linalg.solve_continuous_are(F_lin, G_lin, Q_bedmd, R_bedmd)
K_bedmd = np.linalg.inv(R_bedmd)@G_lin.T@P_bedmd
controller_bedmd = BilinearFBLinController(sys_bedmd, output_bedmd, K_bedmd)
controller_bedmd = PerturbedController(sys_bedmd, controller_bedmd,0.,const_offset=hover_thrust, umin=umin, umax=umax)
# -
# ##### Compare closed loop performance
# + pycharm={"name": "#%%\n"}
# Simulate the system under closed loop control:
xs_cl_dmd, us_cl_dmd = quadrotor.simulate(x0_cl, controller_dmd, t_eval_cl)
xs_cl_edmd, us_cl_edmd = quadrotor.simulate(x0_cl, controller_edmd, t_eval_cl)
xs_cl_bedmd, us_cl_bedmd = quadrotor.simulate(x0_cl, controller_bedmd, t_eval_cl)
mse_cl_dmd = np.linalg.norm(xs_cl_dmd[1:,output_inds]-xr_cl[output_inds,1:].T, ord='fro')**2
mse_cl_edmd = np.linalg.norm(xs_cl_edmd[1:,output_inds]-xr_cl[output_inds,1:].T, ord='fro')**2
mse_cl_bedmd = np.linalg.norm(xs_cl_bedmd[1:,output_inds]-xr_cl[output_inds,1:].T, ord='fro')**2
ctrl_cost_dmd = np.linalg.norm(us_cl_dmd, ord='fro')**2
ctrl_cost_edmd = np.linalg.norm(us_cl_edmd, ord='fro')**2
ctrl_cost_bedmd = np.linalg.norm(us_cl_bedmd, ord='fro')**2
print('\nClosed loop performance statistics:')
print(' -Tracking error:')
print(' Tracking MSE DMD: ', "{:.3f}".format(mse_cl_dmd),
'\n Tracking MSE EDMD: ', "{:.3f}".format(mse_cl_edmd),
'\n Tracking MSE bEDMD: ', "{:.3f}".format(mse_cl_bedmd))
print(' Improvement DMD -> EDMD: ', "{:.2f}".format(100*(1-(mse_cl_edmd)/(mse_cl_dmd))), ' %'
'\n Improvement DMD -> bEDMD: ', "{:.2f}".format(100*(1-(mse_cl_bedmd)/(mse_cl_dmd))), ' %'
'\n Improvement EDMD -> bEDMD: ', "{:.2f}".format(100*(1-(mse_cl_bedmd)/(mse_cl_edmd))), ' %')
print(' -Control effort:')
print(' Control effort DMD: ', "{:.3f}".format(ctrl_cost_dmd),
'\n Control effort EDMD: ', "{:.3f}".format(ctrl_cost_edmd),
'\n Control effort bEDMD: ', "{:.3f}".format(ctrl_cost_bedmd))
print(' Improvement DMD -> EDMD: ', "{:.2f}".format(100*(1-(ctrl_cost_edmd)/(ctrl_cost_dmd))), ' %'
'\n Improvement DMD -> bEDMD: ', "{:.2f}".format(100*(1-(ctrl_cost_bedmd)/(ctrl_cost_dmd))), ' %'
'\n Improvement EDMD -> bEDMD: ', "{:.2f}".format(100*(1-(ctrl_cost_bedmd)/(ctrl_cost_edmd))), ' %')
# + pycharm={"name": "#%%\n"}
x_index = output_inds[0]
y_index = output_inds[1]
plt.figure(figsize=(figwidth, 4))
plt.subplot(2, 2, 1)
plt.plot(t_eval_cl, xr_cl[x_index,:], '--r', linewidth=2, label='Reference')
plt.plot(t_eval_cl,xs_cl_dmd[:, x_index], linewidth=lw, label='DMD')
plt.plot(t_eval_cl,xs_cl_edmd[:, x_index], linewidth=lw, label='EDMD')
plt.plot(t_eval_cl,xs_cl_bedmd[:, x_index], linewidth=lw, label='bEDMD')
plt.ylabel('$z$', fontsize=fs)
plt.title('Output states', fontsize=fs)
plt.grid()
plt.subplot(2, 2, 3)
plt.plot(t_eval_cl, xr_cl[y_index, :], '--r', linewidth=2, label='Reference')
plt.plot(t_eval_cl, xs_cl_dmd[:, y_index], linewidth=lw, label='DMD')
plt.plot(t_eval_cl, xs_cl_edmd[:, y_index], linewidth=lw, label='EDMD')
plt.plot(t_eval_cl, xs_cl_bedmd[:, y_index], linewidth=lw, label='bEDMD')
plt.ylabel('$\\theta$', fontsize=fs)
plt.xlabel('Time (sec)')
plt.grid()
plt.subplot(2, 2, 2)
plt.plot(t_eval_cl[:-1], us_cl_dmd[:, 0], linewidth=lw, label='DMD')
plt.plot(t_eval_cl[:-1], us_cl_edmd[:, 0], linewidth=lw, label='EDMD')
plt.plot(t_eval_cl[:-1], us_cl_bedmd[:, 0], linewidth=lw, label='bEDMD')
plt.ylabel('$u_1$', fontsize=fs)
plt.title('Control action', fontsize=fs)
plt.grid()
plt.subplot(2, 2, 4)
plt.plot(t_eval_cl[:-1], us_cl_dmd[:, 1], linewidth=lw, label='DMD')
plt.plot(t_eval_cl[:-1], us_cl_edmd[:, 1], linewidth=lw, label='EDMD')
plt.plot(t_eval_cl[:-1], us_cl_bedmd[:, 1], linewidth=lw, label='bEDMD')
plt.xlabel('Time (sec)', fontsize=fs)
plt.ylabel('$u_2$', fontsize=fs)
plt.grid()
suptitle = plt.suptitle('Trajectory tracking based on DMD, EDMD and bilinear EDMD models', y=1.05,fontsize=18)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.tight_layout()
plt.savefig(folder_plots + 'planar_quad_closedloop.pdf', format='pdf', dpi=2400, bbox_extra_artists=(suptitle,),
bbox_inches="tight")
plt.show()
# + pycharm={"name": "#%%"}
| working_files/planar_quadrotor_fl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
[file for file in os.listdir() if file.endswith(".csv")]
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
for file in os.listdir():
if file.endswith("one.csv"):
df = pd.read_csv(file, sep=';')
print(file, df.head())
[file for file in os.listdir() if file.endswith(".csv") and not file.endswith("one.csv")]
def plot_data(path: str, plot_mean: bool = True, plot_data: bool = False, sensors: set = None):
if sensors is None:
sensors = {f'SmartParking{i}' for i in range(1, 5)}
df = pd.read_csv(path, sep=';')
for i in range(1, 5):
df[f"SmartParking{i}"] = df[df["DEVICE"] == \
f"SmartParking{i}"]["RSSI"].apply(lambda x: abs(x))
# print(df.head())
colors = ['r', 'g', 'b', 'y']
i = 0
for column in df.columns:
if not column.startswith("SmartParking") or str(column) not in sensors:
continue
data = df[column].dropna().apply(lambda x: abs(x))
data = data.apply(lambda x: -1 * x).values.tolist()
if plot_data:
plt.plot(data, color = colors[i], label=str(column))
A = np.vstack([np.array(range(len(data))), np.ones(len(data))]).T
m, c = np.linalg.lstsq(A, data, rcond=None)[0]
if plot_mean:
plt.axhline(y=c, color = colors[i], linestyle='--', label=str(column))
i += 1
plt.legend()
# So, the parking loooked like:
# ```
# - (SP1) - (SP2) - (SP4)
#
# - (SP3)
# PC
# ```
plot_data("parking.csv", True, False)
# Then the sensors were lokated in such way:
# So, the parking loooked like:
# ```
# - (SP1) - (SP2) - (SP4)
#
#
#
# PC
#
# - (SP3)
# ```
plot_data("parking_other_lot.csv", True, False)
plot_data("parking_other_lot_closer_to_the_source.csv", True, False)
| proofs_of_concepts/rssi_localisation_proof/RSSI_from_several_nodes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="iSeZX-FqfHHA"
# # **<NAME> - O maior portal de Data Science do Brasil**
# www.minerandodados.com.br
# + id="MhrG9iS9fL4L" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9437d3e8-4407-4f4e-8473-341dbec8543f"
'''from google.colab import drive
drive.mount('/content/drive')'''
# + [markdown] id="Oks5bNFRfHHB"
# ## **Análise de Sentimentos usando Machine Learning**
# + [markdown] id="IJsB6eY0fHHC"
# * Criando modelos para análise de sentimentos de tweets
# + id="T4_K6NcqfHHD"
import nltk
import re
import pandas as pd
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
# + [markdown] id="r1pxDwaHfHHF"
# **Ler arquivo de dados e conta a quantidade de linhas**
# + id="GOYpa0vtfHHG"
df = pd.read_csv('/content/Tweets_Mg.csv', encoding='utf-8')
# + id="DzxUf5TOnF-4" colab={"base_uri": "https://localhost:8080/", "height": 581} outputId="ffad084c-af0e-46c7-b07c-1df3d35652dd"
df.head()
# + [markdown] id="S2VSXvUwfHHL"
# **Conta a quantidade de linhas de tweets neutros, positivos e negativos**
# + id="-8EpKNzMfwNH" colab={"base_uri": "https://localhost:8080/"} outputId="d09e798b-ceb4-40db-ade2-7a910a6d3d0e"
df['Classificacao'].value_counts()
# + id="aIj5Lin3fHHT" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="44a38047-e8c0-4924-958d-acc6a1d37eb6"
# %matplotlib inline
df.Classificacao.value_counts().plot(kind='bar')
# + id="fN7k0LCMfHHV" colab={"base_uri": "https://localhost:8080/"} outputId="4fc95934-9c08-4426-f41a-42064c15cf56"
df.count()
# + [markdown] id="IWenqwCnfHHZ"
# ## Pre-Processamento dos Dados
# + [markdown] id="2DuvWYo9fHHZ"
# * Remove linhas duplicadas na base de dados
# - Problema na coleta dos dados.
# * Remove Stopwords
# * Stemming ou Lemmatization
# * Remove caracteres indesejados como links etc.
# + id="m23ema2FfHHa"
df.drop_duplicates(['Text'], inplace=True)
# + id="SkPMW1UZfHHd" colab={"base_uri": "https://localhost:8080/"} outputId="099c1a86-f23a-464b-f95e-2b4946d4d022"
df.Text.count()
# + [markdown] id="J0uUvFajfHHf"
# ## **Separando tweets e suas Classes**
# + id="Uh7xRoHNfHHf"
tweets = df['Text']
classes = df['Classificacao']
# + [markdown] id="pdBk0tyefHHh"
# **Instala bibliotecas e baixa a base de dados**
# + id="M_opr6SFfHHi" colab={"base_uri": "https://localhost:8080/"} outputId="e254eb4d-3204-4950-f81b-9aa9207c067c"
import nltk
nltk.download('stopwords')
nltk.download('rslp')
nltk.download('punkt')
nltk.download('wordnet')
# + [markdown] id="2NV3syRNfHHk"
# **Funções de Pre-processamento de dados**
# + id="M6HDfRzcfHHk"
def RemoveStopWords(instancia):
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return (" ".join(palavras))
# + id="q8FS-ZoJfHHn"
def Stemming(instancia):
stemmer = nltk.stem.RSLPStemmer()
palavras = []
for w in instancia.split():
palavras.append(stemmer.stem(w))
return (" ".join(palavras))
# + id="NrLlbIaafHHp"
def Limpeza_dados(instancia):
# remove links, pontos, virgulas,ponto e virgulas dos tweets
instancia = re.sub(r"http\S+", "", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','')
return (instancia)
# + id="it75oz2vhCyy"
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
def Lemmatization(instancia):
palavras = []
for w in instancia.split():
palavras.append(wordnet_lemmatizer.lemmatize(w))
return (" ".join(palavras))
# + [markdown] id="TiATzqyZfHHs"
# **Entenda como funciona cada função**
# + id="Jn7uqh9KfHHs" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="87884fb6-1654-4a41-bf91-d87dd1ef6f09"
RemoveStopWords('Eu não gosto do partido, e também não votaria novamente nesse governante!')
# + id="YCEFse4ZfHHv" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="fc62da8f-2add-4ed1-daf8-2b0c4e69ce72"
Stemming('Eu não gosto do partido, e também não votaria novamente nesse governante!')
# + id="F3riOVgGfHHz" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="930ef7a8-757a-4c17-dd89-0453fb9be8d0"
Limpeza_dados('Assita aqui o video do Governador falando sobre a CEMIG https://www.uol.com.br :) ;)')
# + id="MEvt1kBghC9_" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="dfaccf40-9fb4-428b-8ba0-3bb1ef008206"
Lemmatization('Os carros são bonitos')
# + [markdown] id="s8JohAMlfHH3"
# **Aplica as 3 funções de Pre-processamento nos dados**
# + id="0C2bY97JfHH4"
def Preprocessing(instancia):
stemmer = nltk.stem.RSLPStemmer()
instancia = re.sub(r"http\S+", "", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','')
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [stemmer.stem(i) for i in instancia.split() if not i in stopwords]
return (" ".join(palavras))
# Aplica a função em todos os dados:
tweets = [Preprocessing(i) for i in tweets]
# + id="D77vjoRCfHH6" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="86125556-d06f-4965-dbba-e1ba814dfc93"
Preprocessing('Eu não gosto do partido, e também não votaria novamente nesse governante. Assita o video aqui https:// :)')
# + [markdown] id="ncXEOQOCfHH8"
# **Visualize os dados e veja como ficou após o pré-processamento**
# + id="4Fg7uWnpfHH9" colab={"base_uri": "https://localhost:8080/"} outputId="e074918a-9b2d-439e-ef40-5824441df57c"
tweets[:50]
# + [markdown] id="N27Ueb96lu7W"
# **Tokenização**
# - Atente para o tipo de dados que você está trabalhando.
# + id="CZ0eZu0HEFO9"
df = pd.read_csv('/content/Tweets_Mg.csv', encoding='utf-8')
tweets = df['Text']
classes = df['Classificacao']
# + id="Dq6O2GxKlz-6"
from nltk.tokenize import word_tokenize
# + id="hfkgKqmEmupe"
frase = 'A live do @blogminerando é show! :) :-) ;) =D'
# + id="ixDJyhR7l3-S" colab={"base_uri": "https://localhost:8080/"} outputId="8c21acc9-14ec-45ac-c34f-dc95a4961a1b"
word_tokenize(frase)
# + id="uJeCv1E4lA7O"
from nltk.tokenize import TweetTokenizer
# + id="PsW12SRmle1u"
tweet_tokenizer = TweetTokenizer()
# + id="rlDjKW46mV51" colab={"base_uri": "https://localhost:8080/"} outputId="b732e6a1-bbcc-49e4-e6b0-56d6ed53ca71"
tweet_tokenizer.tokenize(frase)
# + [markdown] id="y_42starfHH_"
# ## Criando o modelo
# + [markdown] id="eGBtrI9zfHH_"
# **Instancia o objeto que faz a vetorização dos dados de texto**
# + id="VOZ0Nej6fHIA"
vectorizer = CountVectorizer(analyzer="word", tokenizer=tweet_tokenizer.tokenize)
# vectorizer = CountVectorizer(analyzer="word", tokenizer=tweet_tokenizer.tokenize, max_features=1000) <-- bases muito grandes
# + [markdown] id="Kzs88XE1fHIC"
# **Aplica o vetorizador nos dados de texto**
# + id="bNmtvrUafHIC" colab={"base_uri": "https://localhost:8080/"} outputId="860f7884-8cc8-4ff6-dbaa-c16deecd052c"
freq_tweets = vectorizer.fit_transform(tweets)
type(freq_tweets)
# + [markdown] id="06Qgq0EIfHIH"
# **Formato (Linhas, Colunas) da matriz**
# + id="bEQi20ajfHII" colab={"base_uri": "https://localhost:8080/"} outputId="739449ba-48b6-442c-a485-edab81860b1d"
freq_tweets.shape
# + [markdown] id="lejKj5JCjKSx"
# **Treino do modelo de Machine Learning**
# + id="NY5FcgiCfHIE" colab={"base_uri": "https://localhost:8080/"} outputId="adeda658-b6a6-4a51-ea18-9c07d4ffff3f"
modelo = MultinomialNB()
modelo.fit(freq_tweets,classes)
# + [markdown] id="r_g3jIwCfHIL"
# **Matriz**
# + id="T5t9uHX7fHIL" colab={"base_uri": "https://localhost:8080/"} outputId="bab3776b-3153-46f5-9c1a-c63ce4511439"
freq_tweets.A
# + [markdown] id="GIHIF5wafHIN"
# **Testando o modelo com algumas instâncias simples**
# + id="RSDjpEc7fHIO"
# defina instâncias de teste dentro de uma lista
testes = ['Esse governo está no início, vamos ver o que vai dar',
'Estou muito feliz com o governo de Minas esse ano',
'O estado de Minas Gerais decretou calamidade financeira!!!',
'A segurança desse país está deixando a desejar',
'O governador de Minas é mais uma vez do PT']
# + [markdown] id="NhOg0rrQfHIP"
# **Aplica a função de Pré-processamento nos dados**
# + id="9JLAwKhsfHIQ"
#testes = [Preprocessing(i) for i in testes]
# + id="xX2jyVWnfHIS"
# Transforma os dados de teste em vetores de palavras.
freq_testes = vectorizer.transform(testes)
# + id="i1Oe0lfcfHIU" colab={"base_uri": "https://localhost:8080/"} outputId="b00f4f83-a1e0-4137-c438-ce5a59913357"
# Fazendo a classificação com o modelo treinado.
for t, c in zip (testes,modelo.predict(freq_testes)):
print (t +", "+ c)
# + id="7BsTaayEfHIW" colab={"base_uri": "https://localhost:8080/"} outputId="e051fe94-ab9e-4106-ac5e-a4926a2e14ee"
# Probabilidades de cada classe
print (modelo.classes_)
modelo.predict_proba(freq_testes).round(2)
# + [markdown] id="8lP-AkxtfHIZ"
# ## **Tags de Negações**
# + [markdown] id="hHCm2rqrfHIa"
# * Acrescenta uma tag _NEG encontrada após um 'não'.
# * Objetivo é dar mais peso para o modelo identificar uma inversão de sentimento da frase.
# * Exemplos:
# - Eu gosto de cachorros, positivo.
# - Eu **não** gosto de cachorros, negativo.
# + id="NRgqopgQfHIb"
def marque_negacao(texto):
negacoes = ['não','not']
negacao_detectada = False
resultado = []
palavras = texto.split()
for p in palavras:
p = p.lower()
if negacao_detectada == True:
p = p + '_NEG'
if p in negacoes:
negacao_detectada = True
resultado.append(p)
return (" ".join(resultado))
# + [markdown] id="4M4dM-IMfHId"
# **Exemplos de utilização da tag de negações**
# + id="wnpgrQRhfHId" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="da3fec28-8878-409a-a872-fa53d8285af8"
marque_negacao('Eu gosto do partido, votaria novamente nesse governante!')
# + id="8VAWpxI3fHIf" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c6623e4d-54a0-401f-9569-7772657f6c83"
marque_negacao('Eu Não gosto do partido e também não votaria novamente nesse governante!')
# + [markdown] id="2_7GZsu8fHIi"
# ## **Criando modelos com Pipelines**
# + [markdown] id="DDGtt4u0fHIj"
# * Pipelines são interessantes para reduzir código e automatizar fluxos
# + id="eQEWzXphfHIj"
from sklearn.pipeline import Pipeline
# + id="hbks3HWdfHIm"
pipeline_simples = Pipeline([
('counts', CountVectorizer()),
('classifier', MultinomialNB())
])
# + [markdown] id="Au6K0aJffHIs"
# * Pipeline que atribui tag de negacoes nas palavras
# + id="zn-x4LvLfHIt"
pipeline_negacoes = Pipeline([
('counts', CountVectorizer(tokenizer=lambda text: marque_negacao(text))),
('classifier', MultinomialNB())
])
# + id="hRbBD5HlfHIx" colab={"base_uri": "https://localhost:8080/"} outputId="73b7ef77-8705-449d-a7b9-fe66a99602a7"
pipeline_simples.fit(tweets,classes)
# + id="X5IGLvsefHIy" colab={"base_uri": "https://localhost:8080/"} outputId="b77f4042-340d-4aa1-f981-4475caa16925"
pipeline_simples.steps
# + [markdown] id="LyE8h7lEfHI0"
# * Gera o modelo de negações
# + id="AJnMHmwSfHI1" colab={"base_uri": "https://localhost:8080/"} outputId="cf67ff62-c1ad-48d0-f2d1-bc56370a5cc7"
pipeline_negacoes.fit(tweets,classes)
# + [markdown] id="1js2hfBdfHI6"
# * Etapas do pipeline
# + id="ytZ7h4lXfHI7" colab={"base_uri": "https://localhost:8080/"} outputId="89fd46f0-8e57-47a7-b171-d8330849eb01"
pipeline_negacoes.steps
# + [markdown] id="99PR3Zkxymew"
# Modelo com SVM
# + id="8FIDTiQKyk5l"
pipeline_svm_simples = Pipeline([
('counts', CountVectorizer()),
('classifier', svm.SVC(kernel='linear'))
])
# + id="b3q4BrMey2zh"
pipeline_svm_negacoes = Pipeline([
('counts', CountVectorizer(tokenizer=lambda text: marque_negacao(text))),
('classifier', svm.SVC(kernel='linear'))
])
# + [markdown] id="SGfKFbZRfHI-"
# ## Validando os Modelos com Validação Cruzada
# + [markdown] id="t-E9PpV6fHI_"
# * Fazendo o cross validation do modelo
# + id="4Gk7uMEtfHJA"
resultados = cross_val_predict(pipeline_simples, tweets, classes, cv=10)
# + [markdown] id="c8CqLggufHJD"
# * Medindo a acurácia média do modelo
# + id="y27p3EpxfHJD" colab={"base_uri": "https://localhost:8080/"} outputId="d824d5df-3b60-4f19-f70e-ed5b9304fd00"
metrics.accuracy_score(classes,resultados)
# + [markdown] id="049BZedMfHJG"
# * Medidas de validação do modelo
# + id="3DQOMkQZfHJH" colab={"base_uri": "https://localhost:8080/"} outputId="d8a0d980-0b8e-4da2-af36-ba88906f737b"
sentimento=['Positivo','Negativo','Neutro']
print (metrics.classification_report(classes,resultados,sentimento))
# + [markdown] id="vQuKPwm-fHJI"
# * Matriz de confusão
# + id="EKXS0gh-fHJI" colab={"base_uri": "https://localhost:8080/"} outputId="b9dbbc1e-6a59-4747-8ece-6fc778d4cfe9"
print (pd.crosstab(classes, resultados, rownames=['Real'], colnames=['Predito'], margins=True))
# + id="3mBacjlezjA1"
def Metricas(modelo, tweets, classes):
resultados = cross_val_predict(modelo, tweets, classes, cv=10)
return 'Acurácia do modelo: {}'.format(metrics.accuracy_score(classes,resultados))
# + id="YvChG4un0M_A" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="169ac940-ee41-4c8e-bac1-0dd0b400fb03"
# naive bayes simples
Metricas(pipeline_simples,tweets,classes)
# + id="S9_-UNWP1U4A" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6f054672-f0fe-41c0-8e92-b14f41073ab9"
# naive bayes com tag de negacoes
Metricas(pipeline_negacoes,tweets,classes)
# + id="_tImcLHZ1VDF" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="657b14ad-0870-4f74-e342-e6c02fd93ab7"
# svm linear simples
Metricas(pipeline_svm_simples,tweets,classes)
# + id="SOPkPKnT1xVm" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5ca67463-8317-4c67-fab1-d12f93b0fb32"
# svm linear com tag de negacoes
Metricas(pipeline_svm_negacoes,tweets,classes)
# + [markdown] id="KhJiRyUmfHJK"
# ## **Modelo com a Tag de Negações**
# + id="pb284ewqfHJM"
resultados = cross_val_predict(pipeline_negacoes, tweets, classes, cv=10)
# + [markdown] id="yqD7zS18fHJO"
# * Medindo a acurácia média do modelo
# + id="Ozi38FIlfHJP" colab={"base_uri": "https://localhost:8080/"} outputId="17262495-290b-460a-f6e0-88a1b80404f3"
metrics.accuracy_score(classes,resultados)
# + id="IFH-DuBofHJU" colab={"base_uri": "https://localhost:8080/"} outputId="7d4b7967-bffa-45dc-f6ab-737903b10e07"
sentimento=['Positivo','Negativo','Neutro']
print (metrics.classification_report(classes,resultados,sentimento))
# + [markdown] id="jtDzIhrhfHJW"
# * Matriz de confusão
# + id="KDYrF9v_fHJW" colab={"base_uri": "https://localhost:8080/"} outputId="adf9f6dc-d7cc-4b48-adde-274dd7aeb22e"
print (pd.crosstab(classes, resultados, rownames=['Real'], colnames=['Predito'], margins=True))
# + [markdown] id="wfLQeW6yfHJZ"
# ## **Avaliando modelo com Bigrams**
# + [markdown] id="q9bd7jK13krf"
# Eu gosto do Brasil -------------> **'eu gosto', 'gosto do' , 'do brasil'**
# + id="tqdMpvcXfHJb" colab={"base_uri": "https://localhost:8080/"} outputId="b2f888a9-ffc9-4134-b37a-d201306a0595"
# Bigrams
vectorizer = CountVectorizer(ngram_range=(2,2))
freq_tweets = vectorizer.fit_transform(tweets)
modelo = MultinomialNB()
modelo.fit(freq_tweets,classes)
# + id="D71t-pPGfHJc"
resultados = cross_val_predict(modelo, freq_tweets, classes, cv=10)
# + id="Eqv_fetJfHJg" colab={"base_uri": "https://localhost:8080/"} outputId="b5a60c8d-b404-4bd6-c469-13617305a2d3"
metrics.accuracy_score(classes,resultados)
# + id="WzoMUniTfHJi" colab={"base_uri": "https://localhost:8080/"} outputId="37934363-2dfe-4065-854a-e3e47a136cff"
sentimento=['Positivo','Negativo','Neutro']
print (metrics.classification_report(classes,resultados,sentimento))
# + [markdown] id="WiC2Ns5-fHJp"
# ## **Considerações Finais**
# + [markdown] id="W_HRElekfHJq"
# * Considere aumentar a quantidade de dados de treino.
#
# * Pela sua simplicidade o Naive Bayes pode ser usado perfeitamente como um algoritmo de Baseline.
#
# * Considere alterar os parâmetros do algoritmo.
# + [markdown] id="q7IEqCXffHJq"
# **www.minerandodados.com.br** - *A maior comunidade de Data Science do Brasil*
| NLP Tweets/notebooks/Live_Analise_Sentimentos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `depiction` - A meta-interpretability toolbox
# ## Why interpretability?
# In the last decade, the application of deep neural networks to long-standing problems has brought a break-through in performance and prediction power. However, high accuracy, deriving from the increased model complexity, often comes at the price of loss of interpretability, i.e., many of these models behave as black-boxes and fail to provide explanations on their predictions. While in certain application fields this issue may play a secondary role, in high risk domains, e.g., health care, it is crucial to build trust in a model and being able to understand its behaviour.
# ## What is interpretability?
# The definition of the verb *interpret* is "to explain or tell the meaning of : present in understandable terms" ([Merriam-Webster 2019](https://www.merriam-webster.com/dictionary/interpret)). Despite the apparent simplicity of this statement, the machine learning research community is struggling to agree upon a formal definition of the concept of interpretability/explainability. In the last years, in the room left by this lack of formalism, many methodologies have been proposed based on different "interpretations" (pun intended) of the above defintion. While the proliferation of this multitude of disparate algorithms has posed challenges on rigorously comparing them, it is nevertheless interesting and useful to apply these techniques to analyze the behaviour of deep learning models.
# ## `depiction`
# The group of Cognitive Health Care and Life Sciences at IBM Research Zürich has opensourced a python toolbox, [`depiction`](https://github.com/IBM/depiction), with the aim of providing a framework to ease the application of explainability methods on custom models, especially for less experienced users. The module provide wrappers for multiple algorithms and is continously updated including the latest algorithms from [AIX360](https://github.com/IBM/AIX360.git). The core concept behind `depiction` is to allow users to seamlessly run state-of-art interpretability methods with minimal requirements in terms of programming skills. Below an example of how `depiction` can be used to analyze a pretrained model.
# ### A simple example - Wrapping a pretrained Keras Model
# Let's assume to have a fancy model for classification of tabular data pretrained in Keras and available at a public url. Explaining its predictions with `depiction` is easy as implementing a lightweight wrapper of `depiction.models.uri.HTTPModel` where its `predict` method is overridden.
# ```python
# from depiction.core import Task, DataType
# from depiction.models.uri import HTTPModel
#
#
# class FancyModel(HTTPModel):
# """A fancy classifier."""
#
#
# def __init__(self,
# filename='fancy_model.h5',
# origin='https://url/to/my/fancy_model.h5',
# cache_dir='/path/to/cache/models',
# *args, **kwargs):
# """Initialize the FancyModel."""
# super().__init__(
# uri=origin,
# task=Task.CLASSIFICATION,
# data_type=DataType.TABULAR,
# cache_dir=cache_dir,
# filename=filename
# )
# self.model = keras.models.load_model(self.model_path)
#
# def predict(self, sample, *args, **kwargs):
# """
# Run the fancy model for inference on a given sample and with the provided
# parameters.
#
# Args:
# sample (object): an input sample for the model.
# args (list): list of arguments.
# kwargs (dict): list of key-value arguments.
#
# Returns:
# a prediction for the model on the given sample.
# """
# return self.model.predict(
# sample,
# batch_size=None, verbose=0,
# steps=None, callbacks=None
# )
# ```
# Once `FancyModel` is implemented, using any of the `depiction.interpreters` available in the library, is as easy as typing:
# ```python
# fancy_model = FancyModel()
# # NOTE: interpreters are implemented inheriting from
# # depiction.interpreters.base.base_interpreter.BaseInterpreter
# # and they share a common interface.
# explanations = interpreter.interpret(example)
# ```
# ### Run an intepreter from AIX360 - Rule-based interpretable method
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=ConvergenceWarning)
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', -1)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import neural_network
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_iris
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from depiction.core import Task, DataType
from depiction.models.base import BinarizedClassifier
from depiction.interpreters.aix360.rule_based_model import RuleAIX360
# Here we show as an example how *easy* is to use **Generalized linear rule models** in `depiction` [(Wei D. et al. 2019)](http://proceedings.mlr.press/v97/wei19a/wei19a.pdf).
# #### Get the data
# Let's get some data to play with.
# +
# Create a dataset.
dataset = load_iris()
# Rule-based explainers work only with binary classifiers.
# In this example, we want to explain class 'setosa'
X = pd.DataFrame(dataset.data, columns=dataset.feature_names)
y_one_hot = OneHotEncoder().fit_transform(dataset.target.reshape(-1, 1)).todense()
label = 'setosa'
label_index = np.where(dataset.target_names == label)[0].item()
y = np.array(dataset.target == label_index).astype(np.int).flatten()
# NOTE: for the purpose of this example, we avoid to generate a validation split.
X_train, X_test, y_train, y_test, y_train_one_hot, y_test_one_hot = train_test_split(
X, y, y_one_hot, test_size=0.33, random_state=42, stratify=y
)
# -
# #### Ante-hoc version
# In the following we are using the model as an *ante-hoc global interpretable method*.
# Create explainable model, interpreting the whole dataset
ante_hoc_model = RuleAIX360('glrm_linear', X=X, y=y)
# Explain model
ante_hoc_model.interpret();
# Since the *iris* dataset is a relatively easy one, i.e., it can be solved with a linear model, we can easily double check if the rules learned by the **interpretable model** make sense. In the following, we show the samples from the dataset on a 2D plot where the axes are the *sepal width* and *petal width*. The *setosa* samples are shown in blue, while the others are in red. The blue area denotes the region described by the most important rule inferred by the interpretable model (line 1 of the output).
sns.scatterplot(x='sepal width (cm)', y='petal width (cm)', data=X.iloc[y == 1], color='b')
sns.scatterplot(x='sepal width (cm)', y='petal width (cm)', data=X.iloc[y == 0], color='r')
ax = plt.gca()
_ = ax.axvspan(
xmin=2.70, xmax=ax.get_xlim()[1], ymin=0, ymax=(1.16 - ax.get_ylim()[0])/ax.get_ylim()[1],
facecolor='b', alpha=0.4
)
# 
# #### Post-hoc version
# In the following we are using the model as a *post-hoc global interpretable method*, by analysing a very bad MLP.
# train a dummy model to interpret
classifier = neural_network.MLPClassifier(hidden_layer_sizes=(8,), alpha=0.0001, random_state=84)
_ = classifier.fit(X_train, y_train_one_hot)
print("Accuracy: {}".format(accuracy_score(y_test_one_hot, classifier.predict(X_test))))
# This a pretty bad performance on the IRIS dataset. Let's assume that we are in a real word scenario, and we actually don't know that we are dealing with an MLP, i.e. let's assume we are dealing with a *black-box* model. Can we get a feeling of what is going wrong in the model? Let's use the Generalised Linear Rule Model in a post-hoc fashion!
# First, binarize the model to be compatible with the rule-based interpreter
model = BinarizedClassifier(
classifier, data_type=DataType.TABULAR,
label_index=label_index
)
# Create explainable model, interpreting the model
post_hoc_model = RuleAIX360(
'glrm_logistic', X=X_test,
# binarize the model to be compatible with the rule-based interpreter
model=model
)
# Explain model
post_hoc_model.interpret();
# Okay, our interpretability method unveiled the underlying classification rules of the MLP. Let's visualize them.
# Here we show the predictions of the MLP on the test samples (left) and the ground truth (right) on a 2D plot with *petal length* and *sepal width* on the axes. We use the same color coding as before: *setosa* samples are blue, the others are red, and the area described by the most important rule (line 1 of the rule list) is in blue.
# +
# Plot MLP predictions
ax = plt.subplot(1,2,1)
y_hat = model.predict(X_test)
sns.scatterplot(x="petal length (cm)", y="sepal width (cm)", data=X_test.iloc[y_hat == 1], color='b')
sns.scatterplot(x="petal length (cm)", y="sepal width (cm)", data=X_test.iloc[y_hat == 0], color='r')
_ = ax.axvspan(
xmin=ax.get_xlim()[0], xmax=5.23, ymin=(2.7 - ax.get_ylim()[0])/ax.get_ylim()[1], ymax=1.0,
facecolor='b', alpha=0.4
)
ax.set_title('Predictions for setosa')
# Plot ground truth
ax = plt.subplot(1,2,2)
sns.scatterplot(x="petal length (cm)", y="sepal width (cm)", data=X_test.iloc[y_test == 1], color='b')
sns.scatterplot(x="petal length (cm)", y="sepal width (cm)", data=X_test.iloc[y_test == 0], color='r')
_ = ax.axvspan(
xmin=ax.get_xlim()[0], xmax=5.23, ymin=(2.7 - ax.get_ylim()[0])/ax.get_ylim()[1], ymax=1.0,
facecolor='b', alpha=0.4
)
ax.set_title('Ground truth for setosa')
plt.subplots_adjust(wspace=0.5)
# -
# As we can see there are quite some samples that are misclassified. The interpretable method helps us see why: apparently the MLP is using the *petal length* in the "wrong way", by thresholding it incorrectly. This is evident from the misclassification of the points with *petal length* values between 3 and 5. We'd call this a successful debugging! *Hurray*!
# ## Want to know more?
# 
# This is the first post in a series from the Cognitive Health Care and Life Sciences at IBM Research Zürich and was authored by [<NAME>](http://researcher.watson.ibm.com/researcher/view.php?person=zurich-UYE) ([github](https://github.com/phineasng), [twitter](https://twitter.com/phineas_zu)), [<NAME>](https://researcher.watson.ibm.com/researcher/view.php?person=zurich-DOW) ([github](https://github.com/C-nit/), [twitter](https://twitter.com/tse_nit)), and [<NAME>](http://researcher.watson.ibm.com/researcher/view.php?person=zurich-TTE) ([github](https://github.com/drugilsberg), [twitter](https://twitter.com/drugilsberg)). If you found this blog post interesting and you want to know more about `depiction`. Just take a look at the repo, the examples and the workshop in our [repo](https://github.com/IBM/depiction).
| blogs/ibm_developer/01_depiction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Elfskot PyApi
# ## Implementation of PyApi
# The following code is the implementation of the Elfskot PyApi. It is adviced to store the code in a file `elfskotapi.py`, which can then be included in your Python projects. The usage of the PyApi is described in the remainder of this document.
# +
# Required packages:
# pip3 install requests pandas
import requests
import json
from enum import Enum
import pandas as pd
import math
def unpack(s): return list(s) if type(s) != 'list' else s
def head(s): return s[0] if len(s) > 0 else []
def tail(s): return s[1:]
def reverse(s): return s[::-1]
def last(s): return s[-1:]
def init(s): return s[0:len(s)-1]
def take(s, n): return s[:n]
def drop(s, n): return s[n:]
class TextType(Enum):
Description = 0
ExtendedDescription = 1
MoreInfo = 2
class ElfskotApi():
base_address = 'https://api.elfskot.cloud/api/2/'
def __init__(self, application_id, secret):
self.application_id = application_id
self.secret = secret
self.get_token()
def get_token(self):
payload = { 'clientId': self.application_id, 'secret': self.secret}
result = requests.post(self.get_url('auth/elfskotconnectlogin'), json=payload)
self.check_result(result)
self.token = json.loads(result.text)['accessToken']
def check_result(self, result):
if result.status_code != 200:
raise ValueError('Error: API returned status code {}'.format(result.status_code))
return True
def get_auth_header(self): return {'Authorization': 'bearer {}'.format(self.token)}
def get_url(self, endpoint): return self.base_address + endpoint
def http_return_if_valid(self, result):
self.check_result(result)
return json.loads(result.text)
def http_get(self, endpoint):
print(endpoint)
return self.http_return_if_valid(
requests.get(self.get_url(endpoint), headers=self.get_auth_header())
)
def http_post(self, endpoint, o):
return self.http_return_if_valid(
requests.post(self.get_url(endpoint), json=o, headers=self.get_auth_header())
)
def http_put(self, endpoint, o):
return self.http_return_if_valid(
requests.put(self.get_url(endpoint), json=o, headers=self.get_auth_header())
)
def http_delete(self, endpoint, key):
result = requests.delete(self.get_url(endpoint) + '/{}'.format(key), headers=self.get_auth_header())
if result.status_code != 200:
raise ValueError('Error: API returned status code {}'.format(result.status_code))
def query(self, endpoint): return Query(endpoint, self.http_get)
def all(self, endpoint): return self.query(endpoint)
def find(self, endpoint, p, v): return self.query(endpoint).filter(p, v)
def get(self, endpoint, k): return self.find(endpoint, 'id', k)
def new(self, endpoint, o): return self.http_post(endpoint, o)
def update(self, endpoint, o): return self.http_put(endpoint, o)
def delete(self, endpoint, k): self.http_delete(endpoint, k)
def help(self, endpoint): print('Model for {}:\r\n{}'
.format(endpoint, list(self.first(endpoint, 'id', '').keys())))
class Query():
def __init__(self, endpoint, http):
self.http = http
self.endpoint = endpoint
self.parameters = {}
self.data = []
self.index = 0
def raise_(self, t): raise ValueError(t)
def skip(self, n):
if 'skip' in self.parameters: self.raise_('Skip already set.')
self.parameters['skip'] = n
return self
def take(self, n):
if 'limit' in self.parameters: self.raise_('Take already called.')
self.parameters['limit'] = n
return self
# this does no require multiple includes yet, it is limited to
# one field.
def include(self, name):
self.parameters['include'] = name
return self
def filter(self, property, value):
self.parameters[property] = value
return self
def sort(self, property, descending = False):
self.parameters['orderby'] = property
return self
def descending(self):
self.parameters['descending'] = True
return self
def url_qry_params(self):
return '{}?{}'.format(self.endpoint, '&'.join(['{}={}'.format(k,v)
for k,v in dict(self.parameters).items()])).lower()
def __next__(self):
if self.data == []: self.data = self.http(self.url_qry_params())
try: result = self.data[self.index]
except IndexError: raise StopIteration
self.index += 1
return result
def __iter__(self):
return self
def list(self): return list(self)
def df(self): return pd.DataFrame(self.list())
def first(self): return head(self.list())
# -
# ## PyApi reference
# ### ElfskotApi
#
# The `ElfskotApi` object is a HTTP client which also handles the authorization token. It allows the `GET`, `POST`, `PUT`, and `DELETE` HTTP methods. The `ElfskotApi` is instantiated with a `appId` and `secret` which are found in the integration section of the EMS. The following example shows how to instantiate the object:
#
# ```python
# db = ElfskotApi('appId', 'secret')
# ```
#
# The following methods are available for the `ElfskotApi` object:
#
# |Expression|Description|Example|
# |-|-|-|
# |`query(endpoint)`|Return the `Query` object for the endpoint.|`db.query('features')`|
# |`all(endpoint)`|Returns the `Query` object for the endpoint.|`db.all('features')`|
# |`find(endpoint, property, value)`|Finds all objects with a property equal to the value.|`db.find('features','name','tire')`|
# |`get(endpoint, id)`|Finds a single object, based on an id.|`db.get('features', ...)`|
# |`new(endpoint, object)`|Creates a new object.|`db.new('features', feature)`|
# |`update(endpoint, object)`|Updates an object, it matches it by id.|`db.update('features',feature)`|
# |`delete(endpoint, id)`|Deletes an object, based on the id.|`db.delete('features', ...)`|
# |`help(endpoint`)|Retrieves the first object in the endpoint, and displays all column names.|`db.help('features')`|
#
# ### Query
#
# The `Query` class allows you to compose HTTP requests for our API. It supports lazy evaluation, and the data is only requested when the data is enumerated, or accessed. The `Query` object helps with composing the url for the request. The `Query` object will allow you to use the query parameters that are supported by our API. The query parameters that are supported are: `skip`, `limit`, `orderby`, `descending`, `include`, and `filter`.
#
# The following methods are available in the `Query` object:
#
# |Expression|Description|Example|
# |-|-|-|
# |`skip(int)`|Skips the first $n$ results.|`db.all('features').skip(10)`|
# |`take(int)`|Limit the query to $n$ results.|`db.all('features').take(10)`|
# |`include(string`|Includes the objects in a list of objects, for example, feature texts.|`db.all('features').include('texts')`|
# |`filter(string,any)`|Filter on a property in the model.|`db.all('features').filter('name','S6000')`|
# |`sort(string)`|Order the results on a property.|`db.all('features').sort('name')`|
# |`descending()`|Order the results in descending order.|`db.all('features').sort('name').descending()`|
# |`list()`|Returns the results as a list, this will evaluate the query.|`db.all('features').list()`|
# |`df()`|Returns the results as a `DataFrame` (requires `pandas`), this will evaluate the query.|`db.all('features').df()`|
# |`first()`|Returns the first element in the results, this will evaluate the query.|`db.all('features').first()`|
#
# **Note**: If you want to retrieve a single object, use the `take(1)` expression with `first()` to evaluate the query. This ensures that the API is only processing a single record, which improves the speed. For example: `db.all('features').sort('name').take(1).first()`.
# # Usage demonstration
# ## Getting started
# First instantiate a new `ElfskotApi` object with the `appId` and `secret` which are found in the integrations tab in your EMS.
db = ElfskotApi('e8d964e4-29aa-4ba4-beca-fa21b690dbf0', 'ws3voxlq')
# While initializing the object, an authorization token is requested from the API. When the object is initialized, it is now possible to query our REST API.
#
# As an example, we will request a sorted list (by name) of features. Also, the texts of the feature should be included in the query. Finally, we take the first feature that is found and print it.
db.all('features').sort('name').include('texts').take(1).first()
# In the same way, it is possible to request all the categories from the API, and display them by id and name. Do note that the name is a multilingual field, therefore it returns a list with the names in each language. It is also required to include the texts, because this is a different object.
list(map(lambda c: {c['texts'][0]['value']: c['id']}, db.all('categories').include('texts')))
| Programming/PyAPI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''PythonData'': conda)'
# language: python
# name: python361064bitpythondataconda13c33e7d00474160b8916212f83fb86d
# ---
import requests
import pandas as pd
import io
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
csv = requests.get("https://opendata.arcgis.com/datasets/37abda537d17458bae6677b8ab75fcb9_0.csv").content
main_df = pd.read_csv(io.StringIO(csv.decode('utf-8')))
filt = main_df["Hospitalized"] == "YES"
df = main_df[filt]
df.Case1 = df.loc[:,"Case1"].str.split()
df.Case1 = df.Case1.map(lambda x: x[0])
df.rename(columns = {"Case1":"CaseDate"},inplace=True)
hospitalized_df = df.groupby("CaseDate").count().reset_index()[['CaseDate','Hospitalized']]
data = df.groupby("CaseDate").count()['Hospitalized']
data = pd.DataFrame[{
""
}]
for col in 'xy':
plt.hist(data[col], alpha=0.5)
data
| Sandbox/Histograms, KDE, and densities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Building Your First Video On Demand Recommender
#
# This notebook will walk you through the steps to build a Domain dataset group and a
# recommender that returns movie recommendations based on data collected from the movielens data set. The goal is to recommend movies that are relevant based on a particular user.
#
# The data comes from the [MovieLens project](https://grouplens.org/datasets/movielens/). Follow the link to learn more about the data and potential uses.
#
# # How to Use the Notebook
#
# The code is broken up into cells like the one below. There's a triangular Run button at the top of this page that you can click to execute each cell and move onto the next, or you can press `Shift` + `Enter` while in the cell to execute it and move onto the next one.
#
# As a cell is executing you'll notice a line to the side showcase an `*` while the cell is running or it will update to a number to indicate the last cell that completed executing after it has finished exectuting all the code within a cell.
#
# Simply follow the instructions below and execute the cells to get started with Amazon Personalize using case optimized recommenders.
# ## Imports
# Python ships with a broad collection of libraries and we need to import those as well as the ones installed to help us like [boto3](https://aws.amazon.com/sdk-for-python/) (AWS SDK for python) and [Pandas](https://pandas.pydata.org/)/[Numpy](https://numpy.org/) which are core data science tools.
# Imports
import boto3
import json
import numpy as np
import pandas as pd
import time
import datetime
# Next you will want to validate that your environment can communicate successfully with Amazon Personalize, the lines below do just that.
# Configure the SDK to Personalize:
personalize = boto3.client('personalize')
personalize_runtime = boto3.client('personalize-runtime')
# ## Configure the data
# Data is imported into Amazon Personalize through Amazon S3, below we will specify a bucket that you have created within AWS for the purposes of this exercise.
# Below you will update the `bucket` variable to instead be set to the value that you created earlier in the CloudFormation steps, this should be in a text file from your earlier work. the `filename` does not need to be changed.
# ### Specify a Bucket and Data Output Location
# Update the `bucket` name to a unique name.
bucket_name = "CHANGE_BUCKET_NAME" # replace with the name of your S3 bucket
filename = "movie-lens-100k.csv"
# ## Download, Prepare, and Upload Training Data
# At present you do not have the MovieLens data loaded locally yet for examination, execute the lines below to download the latest copy and to examine it quickly.
#
# ### Download and Explore the Dataset
# !wget -N https://files.grouplens.org/datasets/movielens/ml-latest-small.zip
# !unzip -o ml-latest-small.zip
# !ls ml-latest-small
# !pygmentize ml-latest-small/README.txt
interactions_data = pd.read_csv('./ml-latest-small/ratings.csv')
pd.set_option('display.max_rows', 5)
interactions_data
interactions_data.info()
# ## Prepare the Data
#
# ### Interactions Data
# As you can see the data contains a UserID, ItemID, Rating, and Timestamp.
#
# We are now going to remove the items with low rankings, and remove the Rating column before we build our model.
#
# We are also adding the column EVENT_TYPE to all interactions.
interactions_data = interactions_data[interactions_data['rating'] > 3] # Keep only movies rated higher than 3 out of 5.
interactions_data = interactions_data[['userId', 'movieId', 'timestamp']]
interactions_data.rename(columns = {'userId':'USER_ID', 'movieId':'ITEM_ID',
'timestamp':'TIMESTAMP'}, inplace = True)
interactions_data['EVENT_TYPE']='watch' #Adding an EVENT_TYPE column that has the event type "watched" for all movies
interactions_data.head()
# ### Item Metadata
#
# Open the item data file and take a look at the first rows.
items_data = pd.read_csv('./ml-latest-small/movies.csv')
items_data.head(5)
items_data.info()
items_data['year'] = items_data['title'].str.extract('.*\((.*)\).*',expand = False)
items_data.head(5)
# Selecting a modern date as the creation timestamp for this example because the actual creation timestamp is unknown. In your use-case, please provide the appropriate creation timestamp.
ts= datetime.datetime(2022, 1, 1, 0, 0).strftime('%s')
print(ts)
items_data["CREATION_TIMESTAMP"] = ts
items_data
# +
# removing the title
items_data.drop(columns="title", inplace = True)
# renaming the columns to match schema
items_data.rename(columns = { 'movieId':'ITEM_ID', 'genres':'GENRES',
'year':'YEAR'}, inplace = True)
items_data
# -
# # User Metadata
#
# The dataset doe not have any user metadata so we will create an fake metadata field.
# +
# get user ids from the interaction dataset
user_ids = interactions_data['USER_ID'].unique()
user_data = pd.DataFrame()
user_data["USER_ID"]=user_ids
user_data
# -
# ## Adding Metadata
# The current dataset does not contain additiona user information. For this example, we'll randomly assign a gender to the users with equal probablity of male and female.
possible_genders = ['female', 'male']
random = np.random.choice(possible_genders, len(user_data.index), p=[0.5, 0.5])
user_data["GENDER"] = random
user_data
# ## Configure an S3 bucket and an IAM role
#
# So far, we have downloaded, manipulated, and saved the data onto the Amazon EBS instance attached to instance running this Jupyter notebook. However, Amazon Personalize will need an S3 bucket to act as the source of your data, as well as IAM roles for accessing that bucket. Let's set all of that up.
#
# The Amazon S3 bucket needs to be in the same region as the Amazon Personalize resources we have been creating so far. Simply define the region as a string below.
region = "eu-west-1" #Specify the region where your bucket will be domiciled
# +
s3 = boto3.client('s3')
account_id = boto3.client('sts').get_caller_identity().get('Account')
bucket_name = account_id + "-" + region + "-" + "personalizemanagedretailers"
print('bucket_name:', bucket_name)
try:
if region == "us-east-1":
s3.create_bucket(Bucket=bucket_name)
else:
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': region}
)
except:
print("Bucket already exists. Using bucket", bucket_name)
# -
# ### Upload data to S3
# Now that your Amazon S3 bucket has been created, upload the CSV file of our user-item-interaction data.
# +
interactions_filename = "interactions.csv"
interactions_data.to_csv(interactions_filename, index=False)
boto3.Session().resource('s3').Bucket(bucket_name).Object(interactions_filename).upload_file(interactions_filename)
items_filename = "items.csv"
items_data.to_csv(items_filename, index=False)
boto3.Session().resource('s3').Bucket(bucket_name).Object(items_filename).upload_file(items_filename)
user_filename = "users.csv"
user_data.to_csv(user_filename, index=False)
boto3.Session().resource('s3').Bucket(bucket_name).Object(user_filename).upload_file(user_filename)
# -
# ## Set the S3 bucket policy
# Amazon Personalize needs to be able to read the contents of your S3 bucket. So add a bucket policy which allows that.
#
# Note: Make sure the role you are using to run the code in this notebook has the necessary permissions to modify the S3 bucket policy.
# +
s3 = boto3.client("s3")
policy = {
"Version": "2012-10-17",
"Id": "PersonalizeS3BucketAccessPolicy",
"Statement": [
{
"Sid": "PersonalizeS3BucketAccessPolicy",
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::{}".format(bucket_name),
"arn:aws:s3:::{}/*".format(bucket_name)
]
}
]
}
s3.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
# -
# ## Create and Wait for Dataset Group
# The largest grouping in Personalize is a Dataset Group, this will isolate your data, event trackers, solutions, and campaigns. Grouping things together that share a common collection of data. Feel free to alter the name below if you'd like.
#
# ### Create Dataset Group
# +
response = personalize.create_dataset_group(
name='personalize-video-on-demand',
domain='VIDEO_ON_DEMAND'
)
dataset_group_arn = response['datasetGroupArn']
print(json.dumps(response, indent=2))
# -
# Wait for Dataset Group to Have ACTIVE Status
# Before we can use the Dataset Group in any items below it must be active, execute the cell below and wait for it to show active.
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_group_response = personalize.describe_dataset_group(
datasetGroupArn = dataset_group_arn
)
status = describe_dataset_group_response["datasetGroup"]["status"]
print("DatasetGroup: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ## Create Interactions Schema
# A core component of how Personalize understands your data comes from the Schema that is defined below. This configuration tells the service how to digest the data provided via your CSV file. Note the columns and types align to what was in the file you created above.
# +
schema = {
"type": "record",
"name": "Interactions",
"namespace": "com.amazonaws.personalize.schema",
"fields": [
{
"name": "USER_ID",
"type": "string"
},
{
"name": "ITEM_ID",
"type": "string"
},
{
"name": "EVENT_TYPE",
"type": "string"
},
{
"name": "TIMESTAMP",
"type": "long"
}
],
"version": "1.0"
}
create_interactions_schema_response = personalize.create_schema(
name='personalize-demo-interactions-schema',
schema=json.dumps(schema),
domain='VIDEO_ON_DEMAND'
)
interactions_schema_arn = create_interactions_schema_response['schemaArn']
print(json.dumps(create_interactions_schema_response, indent=2))
# -
# # Create Items (movies) schema
# +
schema = {
"type": "record",
"name": "Items",
"namespace": "com.amazonaws.personalize.schema",
"fields": [
{
"name": "ITEM_ID",
"type": "string"
},
{
"name": "GENRES",
"type": [
"string"
],
"categorical": True
},
{
"name": "CREATION_TIMESTAMP",
"type": "long"
}
],
"version": "1.0"
}
create_items_schema_response = personalize.create_schema(
name='personalize-demo-items-schema',
schema=json.dumps(schema),
domain='VIDEO_ON_DEMAND'
)
items_schema_arn = create_items_schema_response['schemaArn']
print(json.dumps(create_items_schema_response, indent=2))
# -
# # Create Users schema
# +
schema = {
"type": "record",
"name": "Users",
"namespace": "com.amazonaws.personalize.schema",
"fields": [
{
"name": "USER_ID",
"type": "string"
},
{
"name": "GENDER",
"type": "string",
"categorical": True
}
],
"version": "1.0"
}
create_users_schema_response = personalize.create_schema(
name='personalize-demo-users-schema',
schema=json.dumps(schema),
domain='VIDEO_ON_DEMAND'
)
users_schema_arn = create_users_schema_response['schemaArn']
print(json.dumps(create_users_schema_response, indent=2))
# -
# ## Create Datasets
# After the group, the next thing to create is the actual datasets.
# ### Create Interactions Dataset
# +
dataset_type = "INTERACTIONS"
create_dataset_response = personalize.create_dataset(
name = "personalize-demo-interactions",
datasetType = dataset_type,
datasetGroupArn = dataset_group_arn,
schemaArn = interactions_schema_arn
)
interactions_dataset_arn = create_dataset_response['datasetArn']
print(json.dumps(create_dataset_response, indent=2))
# -
# ### Create Items Dataset
# +
dataset_type = "ITEMS"
create_dataset_response = personalize.create_dataset(
name = "personalize-demo-items",
datasetType = dataset_type,
datasetGroupArn = dataset_group_arn,
schemaArn = items_schema_arn
)
items_dataset_arn = create_dataset_response['datasetArn']
print(json.dumps(create_dataset_response, indent=2))
# -
# ### Create Users Dataset
# +
dataset_type = "USERS"
create_dataset_response = personalize.create_dataset(
name = "personalize-demo-users",
datasetType = dataset_type,
datasetGroupArn = dataset_group_arn,
schemaArn = users_schema_arn
)
users_dataset_arn = create_dataset_response['datasetArn']
print(json.dumps(create_dataset_response, indent=2))
# -
# ## Create Personalize Role
# Also Amazon Personalize needs the ability to assume Roles in AWS in order to have the permissions to execute certain tasks, the lines below grant that.
#
# Note: Make sure the role you are using to run the code in this notebook has the necessary permissions to create a role.
# +
iam = boto3.client("iam")
role_name = "PersonalizeRoleDemoRecommender"
assume_role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
create_role_response = iam.create_role(
RoleName = role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)
)
# AmazonPersonalizeFullAccess provides access to any S3 bucket with a name that includes "personalize" or "Personalize"
# if you would like to use a bucket with a different name, please consider creating and attaching a new policy
# that provides read access to your bucket or attaching the AmazonS3ReadOnlyAccess policy to the role
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess"
iam.attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
# Now add S3 support
iam.attach_role_policy(
PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess',
RoleName=role_name
)
time.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate
role_arn = create_role_response["Role"]["Arn"]
print(role_arn)
# -
# ## Import the data
# Earlier you created the DatasetGroup and Dataset to house your information, now you will execute an import job that will load the data from S3 into Amazon Personalize for usage building your model.
# ### Create Interactions Dataset Import Job
# +
create_interactions_dataset_import_job_response = personalize.create_dataset_import_job(
jobName = "personalize-demo-import-interactions",
datasetArn = interactions_dataset_arn,
dataSource = {
"dataLocation": "s3://{}/{}".format(bucket_name, interactions_filename)
},
roleArn = role_arn
)
dataset_interactions_import_job_arn = create_interactions_dataset_import_job_response['datasetImportJobArn']
print(json.dumps(create_interactions_dataset_import_job_response, indent=2))
# -
# ### Create Items Dataset Import Job
# +
create_items_dataset_import_job_response = personalize.create_dataset_import_job(
jobName = "personalize-demo-import-items",
datasetArn = items_dataset_arn,
dataSource = {
"dataLocation": "s3://{}/{}".format(bucket_name, items_filename)
},
roleArn = role_arn
)
dataset_items_import_job_arn = create_items_dataset_import_job_response['datasetImportJobArn']
print(json.dumps(create_items_dataset_import_job_response, indent=2))
# -
# ### Create Users Dataset Import Job
# +
create_users_dataset_import_job_response = personalize.create_dataset_import_job(
jobName = "personalize-demo-import-users",
datasetArn = users_dataset_arn,
dataSource = {
"dataLocation": "s3://{}/{}".format(bucket_name, user_filename)
},
roleArn = role_arn
)
dataset_users_import_job_arn = create_users_dataset_import_job_response['datasetImportJobArn']
print(json.dumps(create_users_dataset_import_job_response, indent=2))
# -
# Wait for Dataset Import Job to Have ACTIVE Status
# It can take a while before the import job completes, please wait until you see that it is active below.
# +
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_import_job_response = personalize.describe_dataset_import_job(
datasetImportJobArn = dataset_interactions_import_job_arn
)
status = describe_dataset_import_job_response["datasetImportJob"]['status']
print("DatasetImportJob: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_import_job_response = personalize.describe_dataset_import_job(
datasetImportJobArn = dataset_items_import_job_arn
)
status = describe_dataset_import_job_response["datasetImportJob"]['status']
print("DatasetImportJob: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_import_job_response = personalize.describe_dataset_import_job(
datasetImportJobArn = dataset_users_import_job_arn
)
status = describe_dataset_import_job_response["datasetImportJob"]['status']
print("DatasetImportJob: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# -
# ## Choose a recommender use cases
#
# Each domain has different use cases. When you create a recommender you create it for a specific use case, and each use case has different requirements for getting recommendations.
#
available_recipes = personalize.list_recipes(domain='VIDEO_ON_DEMAND') # See a list of recommenders for the domain.
print (available_recipes["recipes"])
# We are going to create a recommender of the type "More like X". This type of recommender offers recommendations for videos that are similar to a video a user watched. With this use case, Amazon Personalize automatically filters videos the user watched based on the userId specified in the `get_recommendations` call. For better performance, record Click events in addition to the required Watch events.
create_recommender_response = personalize.create_recommender(
name = 'because_you_watched_x',
recipeArn = 'arn:aws:personalize:::recipe/aws-vod-more-like-x',
datasetGroupArn = dataset_group_arn
)
recommender_you_watched_x_arn = create_recommender_response["recommenderArn"]
print (json.dumps(create_recommender_response))
# We are going to create a second recommender of the type "Top picks for you". This type of recommender offers personalized streaming content recommendations for a user that you specify. With this use case, Amazon Personalize automatically filters videos the user watched based on the userId that you specify and `Watch` events.
#
# [More use cases per domain](https://docs.aws.amazon.com/personalize/latest/dg/domain-use-cases.html)
create_recommender_response = personalize.create_recommender(
name = 'top_picks_for_you',
recipeArn = 'arn:aws:personalize:::recipe/aws-vod-top-picks',
datasetGroupArn = dataset_group_arn
)
recommender_top_picks_arn = create_recommender_response["recommenderArn"]
print (json.dumps(create_recommender_response))
# We wait until the recomenders have finished creating and have status `ACTIVE`. We check periodically on the status of the recommender
# +
max_time = time.time() + 10*60*60 # 10 hours
while time.time() < max_time:
version_response = personalize.describe_recommender(
recommenderArn = recommender_you_watched_x_arn
)
status = version_response["recommender"]["status"]
if status == "ACTIVE":
print("Build succeeded for {}".format(recommender_you_watched_x_arn))
elif status == "CREATE FAILED":
print("Build failed for {}".format(recommender_you_watched_x_arn))
if status == "ACTIVE":
break
else:
print("The solution build is still in progress")
time.sleep(60)
while time.time() < max_time:
version_response = personalize.describe_recommender(
recommenderArn = recommender_top_picks_arn
)
status = version_response["recommender"]["status"]
if status == "ACTIVE":
print("Build succeeded for {}".format(recommender_top_picks_arn))
elif status == "CREATE FAILED":
print("Build failed for {}".format(recommender_top_picks_arn))
if status == "ACTIVE":
break
else:
print("The solution build is still in progress")
time.sleep(60)
# -
# # Getting recommendations with a recommender
# Now that the recommenders have been trained, lets have a look at the recommendations we can get for our users!
# reading the original data in order to have a dataframe that has both movie_ids
# and the corresponding titles to make out recommendations easier to read.
items_df = pd.read_csv('./ml-latest-small/movies.csv')
items_df.sample(10)
def get_movie_by_id(movie_id, movie_df):
"""
This takes in an movie_id from a recommendation in string format,
converts it to an int, and then does a lookup in a specified
dataframe.
A really broad try/except clause was added in case anything goes wrong.
Feel free to add more debugging or filtering here to improve results if
you hit an error.
"""
try:
return movie_df.loc[movie_df["movieId"]==int(movie_id)]['title'].values[0]
except:
print (movie_id)
return "Error obtaining title"
# ### Let us get some 'Because you Watched X' recommendations:
# +
# First pick a user
test_user_id = "1"
# Select a random item
test_item_id = "59315" #Iron Man 59315
# Get recommendations for the user for this item
get_recommendations_response = personalize_runtime.get_recommendations(
recommenderArn = recommender_you_watched_x_arn,
userId = test_user_id,
itemId = test_item_id,
numResults = 20
)
# Build a new dataframe for the recommendations
item_list = get_recommendations_response['itemList']
recommendation_list = []
for item in item_list:
movie = get_movie_by_id(item['itemId'], items_df)
recommendation_list.append(movie)
user_recommendations_df = pd.DataFrame(recommendation_list, columns = [get_movie_by_id(test_item_id, items_df)])
pd.options.display.max_rows =20
display(user_recommendations_df)
# -
# ### Get recommendations from the recommender returning "Top picks for you":
# Adding the user's metadata to our sample user, you can use this type of metadata to get insights on your users.
# +
users_data_df = pd.read_csv('./users.csv')
def get_gender_by_id(user_id, user_df):
"""
This takes in a user_id and then does a lookup in a specified
dataframe.
A really broad try/except clause was added in case anything goes wrong.
Feel free to add more debugging or filtering here to improve results if
you hit an error.
"""
return user_df.loc[user_df["USER_ID"]==int(user_id)]['GENDER'].values[0]
try:
return user_df.loc[user_df["USER_ID"]==int(user_id)]['GENDER'].values[0]
except:
print (user_id)
return "Error obtaining title"
# +
# First pick a user
test_user_id = "111" # samples users: 55, 75, 76, 111
# Get recommendations for the user
get_recommendations_response = personalize_runtime.get_recommendations(
recommenderArn = recommender_top_picks_arn,
userId = test_user_id,
numResults = 20
)
# Build a new dataframe for the recommendations
item_list = get_recommendations_response['itemList']
recommendation_list = []
for item in item_list:
movie = get_movie_by_id(item['itemId'], items_df)
recommendation_list.append(movie)
column_name = test_user_id+" ("+get_gender_by_id(test_user_id, users_data_df)+")"
user_recommendations_df = pd.DataFrame(recommendation_list, columns = [column_name])
pd.options.display.max_rows =20
display(user_recommendations_df)
# -
# ## Review
# Using the codes above you have successfully trained a deep learning model to generate movie recommendations based on prior user behavior. You have created two recommenders for two foundational use cases.
# Going forward, you can adapt this code to create other recommenders.
# ## Notes for the Next Notebook:
# There are a few values you will need for the next notebook, execute the cell below to store them so they can be used in the `Clean_Up_Resources.ipynb` notebook.
#
# This will overwite any data stored for those variables and set them to the values specified in this notebook.
# store for cleanup
# %store dataset_group_arn
# %store role_name
# %store interactions_schema_arn
# %store items_schema_arn
# %store users_schema_arn
# %store region
# If you have run the `Building_Your_First_Recommender_Ecommerce.ipynb` notebook, please make sure you re-run the previous step in the `Building_Your_First_Recommender_Ecommerce.ipynb` notebook and re-run the `Clean_Up_Resources.ipynb` to remove the resources created in that notebook after you run the `Clean_Up_Resources.ipynb` with the resources created here.
| getting_started/notebooks_managed_domains/Building_Your_First_Recommender_Video_On_Demand.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # README
# This notebook is shared FOR REFERENCE ONLY, as supporting material for the paper "Ubiquity of human-induced changes in climate variability", Rodger et al, Earth System Dynamics, 2021.
# (https://doi.org/10.5194/esd-2021-50)
#
# The notebook calculates the wavelet power spectrum of the Niño3.4 index, as part of Figure 3 in the main text.
#
# The calculations follow <NAME>., and <NAME>, 1998: A practical guide to wavelet analysis. Bull. Amer. Meteor. Soc., 79, 61–78.
#
# Wavelet analysis code translated to Python and provided here courtesy of:
#
# <NAME>
# predybaylo[DOT]evgenia[AT]gmail[DOT]com
# Earth Sciences and Engineering Program
# King Abdullah University of Science and Technology
# Kingdom of Saudi Arabia
#
# For questions regarding this notebook, please email the author, <NAME> at:
# iccp[DOT]stein[AT]gmail[DOT]com
from dask.distributed import Client
client = Client(scheduler_file='/proj/kstein/MPI/scheduler.json')
client
import numpy as np
import xarray as xr
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as matplotlib
import cartopy.crs as ccrs
import cartopy.feature as cf
import cmocean
import glob
import sys
sys.path.append('/proj/kstein/Testbed/wavelets')
from waveletFunctions import wavelet, wave_signif
from matplotlib.gridspec import GridSpec
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
def process_coords(ds, concat_dim='time', drop=True, extra_coord_vars=['time_bound']):
"""Preprocessor function to drop all non-dim coords, which slows down concatenation."""
coord_vars = [v for v in ds.data_vars if concat_dim not in ds[v].dims]
for ecv in extra_coord_vars:
if ecv in ds:
coord_vars += extra_coord_vars
if drop:
return ds.drop(coord_vars)
else:
return ds.set_coords(coord_vars)
from cartopy.util import add_cyclic_point
def xr_add_cyclic_point(da,tname=None):
"""
Inputs
da: xr.DataArray with dimensions (tname,lat,lon)
"""
# Use add_cyclic_point to interpolate input data
lon_idx = da.dims.index('lon')
wrap_data, wrap_lon = add_cyclic_point(da.values, coord=da.lon, axis=lon_idx)
# Generate output DataArray with new data but same structure as input
if tname!=None :
outp_da = xr.DataArray(data=wrap_data,
coords = { tname: da[tname], 'lat': da.lat, 'lon': wrap_lon},
dims=da.dims,
attrs=da.attrs)
else :
outp_da = xr.DataArray(data=wrap_data,
coords = {'lat': da.lat, 'lon': wrap_lon},
dims=da.dims,
attrs=da.attrs)
return outp_da
def load_CESM_lens_ds(ensemble_names,domain,freq,var):
import glob
import xarray as xr
ens_dir = '/proj/jedwards/archive/'
ens_files = []
for ens_name in ensemble_names:
datadir = ens_dir + ens_name + '/' + domain + '/proc/tseries/' + freq + '/'
ncfiles = glob.glob(datadir + ens_name + '.*.' + var + '.*.nc')
files = [ncfile for ncfile in sorted(ncfiles)]
ens_files.append(files)
ds = xr.open_mfdataset(ens_files,
combine='nested',
concat_dim=[[*ens_numbers],'time'],
preprocess=process_coords,
parallel='True',
decode_cf = False,
decode_times = False)
ds = ds.rename({'concat_dim' : 'ensemble'})
return ds
# +
proj_dir = '/proj/kstein/CESM2_LE/Presentation_paper/'
ens_dir = '/proj/jedwards/archive/'
ens_numbers = []
hist_ens_names = []
ssp_ens_names = []
# initial macro ensemble members
ic_times = range(1001,1202,20)
members = range(1,11)
macro_members = [str(ic_times[ind]) + '.' + str(members[ind]).zfill(3) for ind in range(10)]
ens_numbers.extend(macro_members)
hist_macro_names =['b.e21.BHISTcmip6.f09_g17.LE2-' + n for n in macro_members]
ssp_macro_names =['b.e21.BSSP370cmip6.f09_g17.LE2-' + n for n in macro_members]
hist_ens_names.extend(hist_macro_names)
ssp_ens_names.extend(ssp_macro_names)
# micro ensembles
ic_times = (1231,1251,1281,1301)
members = range(1,11)
cmip6_micro_members = [str(ict) + '.' + str(m).zfill(3) for ict in ic_times for m in members]
ens_numbers.extend(cmip6_micro_members)
hist_cmip6_ens_names = ['b.e21.BHISTcmip6.f09_g17.LE2-' + n for n in cmip6_micro_members]
ssp_cmip6_ens_names = ['b.e21.BSSP370cmip6.f09_g17.LE2-' + n for n in cmip6_micro_members]
hist_ens_names.extend(hist_cmip6_ens_names)
ssp_ens_names.extend(ssp_cmip6_ens_names)
members = range(11,21)
smbb_micro_members = [str(ict) + '.' + str(m).zfill(3) for ict in ic_times for m in members]
ens_numbers.extend(smbb_micro_members)
hist_smbb_ens_names = ['b.e21.BHISTsmbb.f09_g17.LE2-' + n for n in smbb_micro_members]
ssp_smbb_ens_names = ['b.e21.BSSP370smbb.f09_g17.LE2-' + n for n in smbb_micro_members]
hist_ens_names.extend(hist_smbb_ens_names)
ssp_ens_names.extend(ssp_smbb_ens_names)
# MOAR members
ic_times = range(1011,1201,20)
members = range(1,11)
moar_members = [str(ic_times[ind]) + '.' + str(members[ind]).zfill(3) for ind in range(10)]
ens_numbers.extend(moar_members)
hist_moar_names =['b.e21.BHISTsmbb.f09_g17.LE2-' + n for n in moar_members]
ssp_moar_names =['b.e21.BSSP370smbb.f09_g17.LE2-' + n for n in moar_members]
hist_ens_names.extend(hist_moar_names)
ssp_ens_names.extend(ssp_moar_names)
hist_ens_names = sorted(hist_ens_names)
ssp_ens_names = sorted(ssp_ens_names)
# +
domain = 'atm'
freq = 'month_1'
var = 'TS'
hist_ts_ds = load_CESM_lens_ds(hist_ens_names,domain,freq,var)
hist_ts_ds['time'] = xr.cftime_range(start='1850-01-01',
end = '2014-12-31',
freq = 'M',
calendar = 'noleap')
hist_ts_da = hist_ts_ds[var] - 273.15
ssp_ts_ds = load_CESM_lens_ds(ssp_ens_names,domain,freq,var)
ssp_ts_ds['time'] = xr.cftime_range(start='2015-01-01',
end = '2100-12-31',
freq = 'M',
calendar = 'noleap')
ssp_ts_da = ssp_ts_ds[var] - 273.15
ts_da = xr.concat([hist_ts_da,ssp_ts_da],dim = 'time')
ts_da
# +
domain = 'atm'
freq = 'month_1'
var = 'PRECT'
hist_p_ds = load_CESM_lens_ds(hist_ens_names,domain,freq,var)
hist_p_ds['time'] = xr.cftime_range(start='1850-01-01',
end = '2014-12-31',
freq = 'M',
calendar = 'noleap')
hist_p_da = hist_p_ds[var]
ssp_p_ds = load_CESM_lens_ds(ssp_ens_names,domain,freq,var)
ssp_p_ds['time'] = xr.cftime_range(start='2015-01-01',
end = '2100-12-31',
freq = 'M',
calendar = 'noleap')
ssp_p_da = ssp_p_ds[var]
unit_con = 60*60*24*1000
hist_p_da = hist_p_da * unit_con
ssp_p_da = ssp_p_da * unit_con
p_da = xr.concat([hist_p_da,ssp_p_da],dim = 'time')
p_da
# -
ts_nino_da = ts_da.sel(time=slice('1960','2100'))
ts_nino_da = ts_nino_da.sel(lat=slice(-5,5),lon=slice(190,240))
ts_nino_da = ts_nino_da.mean(dim=['lat','lon'])
ts_nino_da = ts_nino_da.compute()
p_nino_da = p_da.sel(time=slice('1960','2100'))
p_nino_da = p_nino_da.sel(lat=slice(-5,5),lon=slice(190,240))
p_nino_da = p_nino_da.mean(dim=['lat','lon'])
p_nino_da = p_nino_da.compute()
ts_anom_da = ts_nino_da - ts_nino_da.mean(dim='ensemble')
ts_anom_da = ts_anom_da.compute()
p_anom_da = p_nino_da - p_nino_da.mean(dim='ensemble')
p_anom_da = p_anom_da.compute()
ts_nino_yrmean_da = ts_nino_da.resample(time='1Y').mean()
ts_nino_yrmean_da = ts_nino_yrmean_da.resample(time='1M').interpolate('linear')
ts_detrend_da = ts_nino_da - ts_nino_yrmean_da
ts_detrend_da = ts_detrend_da.compute()
ts_detrend_da = ts_detrend_da.sel(time=slice('1961','2100'))
p_nino_yrmean_da = p_nino_da.resample(time='1Y').mean()
p_nino_yrmean_da = p_nino_yrmean_da.resample(time='1M').interpolate('linear')
p_detrend_da = p_nino_da - p_nino_yrmean_da
p_detrend_da = p_detrend_da.compute()
p_detrend_da = p_detrend_da.sel(time=slice('1961','2100'))
# ### Calculate wavelet spectrum of single time series to determine size of the output array
# +
ts = ts_detrend_da.isel(ensemble=0).values
ts = ts - np.mean(ts)
variance = np.std(ts, ddof=1) ** 2
n = len(ts)
dt = 1/12
time = np.arange(len(ts)) * dt + 1960.0 # construct time array
xlim = ([1960, 2100]) # plotting range
pad = 1 # pad the time series with zeroes (recommended)
dj = 0.125 # this will do 4 sub-octaves per octave
s0 = 6 * dt # this says start at a scale of 6 months
j1 = 5 / dj # this says do 7 powers-of-two with dj sub-octaves each
lag1 = 0.72 # lag-1 autocorrelation for red noise background
print("lag1 = ", lag1)
mother = 'MORLET'
# -
wave, period, scale, coi = wavelet(ts, dt, pad, dj, s0, j1, mother)
power = (np.abs(wave)) ** 2 # compute wavelet power spectrum
global_ws = (np.sum(power, axis=1) / n) # time-average over all times
# +
P,T = np.shape(wave)
wave_ens = np.zeros((100,P,T),dtype = 'complex_')
ens_std = np.zeros((100,))
for i in range(100):
ts = ts_detrend_da.isel(ensemble=i).values
ts = ts - np.mean(ts)
ens_std[i] = np.std(ts, ddof=1)
wave_i, period, scale, coi = wavelet(ts, dt, pad, dj, s0, j1, mother)
wave_ens[i,:,:] = wave_i
ts_variance = np.mean(ens_std)**2
power_ens = (np.abs(wave_ens)) ** 2 # compute wavelet power spectrum
ts_power = np.mean(power_ens,axis=0)/ts_variance
#global_ws = (np.sum(power, axis=1) / n) # time-average over all times
# signif = wave_signif(([variance]), dt=dt, sigtest=0, scale=scale,
# lag1=lag1, mother=mother)
# sig95 = signif[:, np.newaxis].dot(np.ones(n)[np.newaxis, :]) # expand signif --> (J+1)x(N) array
# sig95 = power / sig95 # where ratio > 1, power is significant
wave_ens = np.zeros((100,P,T),dtype = 'complex_')
ens_std = np.zeros((100,))
for i in range(100):
ts = p_detrend_da.isel(ensemble=i).values
ts = ts - np.mean(ts)
ens_std[i] = np.std(ts, ddof=1)
wave_i, period, scale, coi = wavelet(ts, dt, pad, dj, s0, j1, mother)
wave_ens[i,:,:] = wave_i
coi = coi[:-1]
p_variance = np.mean(ens_std)**2
power_ens = (np.abs(wave_ens)) ** 2 # compute wavelet power spectrum
p_power = np.mean(power_ens,axis=0)/p_variance
# -
wave_ds = xr.Dataset(
data_vars=dict(
ts_power=(["time", "period"], ts_power.T),
p_power=(["time", "period"], p_power.T),
coi=(["time"], coi)
),
coords=dict(
time=time,
period=period,
),
attrs=dict(description="CESM2 LENS Niño3.4 TS,PRECT wavelets."),
)
wave_ds
# +
out_dir = proj_dir + 'output/Nino34/'
nc_file = 'CESM2_LENS_Nino34_TS_PRECT_wavelets.nc'
wave_ds.to_netcdf(out_dir + nc_file, mode = 'w', format='NETCDF4_CLASSIC')
# +
proj_dir = '/proj/kstein/CESM2_LE/Presentation_paper/'
out_dir = proj_dir + 'output/Nino34/'
nc_file = 'CESM2_LENS_Nino34_TS_PRECT_wavelets.nc'
wave_ds = xr.open_dataset(out_dir + nc_file)
wave_ds
# +
time = wave_ds.time.values
period = wave_ds.period.values
coi = wave_ds.coi.values
ts_power = wave_ds.ts_power.values
plt.rcParams.update({'font.size': 7})
fig1 = plt.figure(num=1,figsize=(5, 3), dpi=300,
#constrained_layout=True,
facecolor='w', edgecolor='k')
levs = np.arange(1,17)
ax1 = plt.subplot(111)
cl = ax1.contourf(time, period, ts_power.T,
levels = levs, cmap = plt.cm.Reds)
ax1.plot(time, coi, 'k')
ax1.set_yscale('log', basey=2, subsy=None)
plt.ylim([np.min(period), np.max(period)])
ax1.invert_yaxis()
ax1.grid(True,alpha=.5)
ax = plt.gca().yaxis
ax.set_major_formatter(ticker.ScalarFormatter())
cb = plt.colorbar(cl,ax=ax1)
cb.ax.set_ylabel('Normalized variance', rotation=90)
ax1.set_title('Ensemble average Niño 3.4 SST Morlet wavelet spectrum')
ax1.set_ylabel('Period (years)')
ax1.set_xlabel('Time (year)')
# -
plt.rcParams.update({'font.size': 7})
fig1 = plt.figure(num=1,figsize=(5, 3), dpi=300,
#constrained_layout=True,
facecolor='w', edgecolor='k')
levs = np.arange(1,16)
ax1 = plt.subplot(111)
cl = ax1.contourf(time, period, p_power,
levels = levs, cmap = plt.cm.Reds)
ax1.plot(time, coi[:-1], 'k')
ax1.set_yscale('log', basey=2, subsy=None)
plt.ylim([np.min(period), np.max(period)])
ax1.invert_yaxis()
ax = plt.gca().yaxis
ax.set_major_formatter(ticker.ScalarFormatter())
cb = plt.colorbar(cl,ax=ax1)
ax1.grid(True,alpha=.5)
cb.ax.set_ylabel('Normalized variance',rotation=90)
ax1.set_title('Ensemble average Niño 3.4 PRECT Morlet wavelet spectrum')
ax1.set_ylabel('Period (years)')
ax1.set_xlabel('Time (year)')
| Fig3/CESM2_LENS_new_Fig3_wavelet_calc_for_Ryohei.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import re
from collections import Counter
import seaborn as sns
import matplotlib.pyplot as plt
biden_df = pd.read_csv('hashtag_joebiden.csv', lineterminator='\n')
biden_df.head()
trump_df = pd.read_csv('hashtag_donaldtrump.csv', lineterminator='\n')
trump_df.head()
# +
full = biden_df.append(trump_df)
# -
#most common hastags in biden df
words = []
for tweet in biden_df['tweet']:
words.extend(re.findall(r"#(\w+)", tweet))
Counter(words).most_common(50)
#most common hastags in trump df
words = []
for tweet in trump_df['tweet']:
words.extend(re.findall(r"#(\w+)", tweet))
Counter(words).most_common(50)
# +
#Election2020
#Elections2020
#ElectionDay
#USA
#USElection2020
#USAElections2020
#Elecciones2020
#ElectionNight
#Vote2020
#Pennsylvania
#Debates2020
# -
swing_trump_df = trump_df[(trump_df['state'] == 'Michigan') | (trump_df['state'] == 'Pennsylvania') |
(trump_df['state'] == 'Wisonsin')]
#most common hastags in swing_trump_df
words = []
for tweet in swing_trump_df['tweet']:
words.extend(re.findall(r"#(\w+)", tweet))
Counter(words).most_common(50)
swing_biden_df = biden_df[(biden_df['state'] == 'Michigan') | (biden_df['state'] == 'Pennsylvania') |
(biden_df['state'] == 'Wisonsin')]
#most common hastags in swing_biden_df
words = []
for tweet in swing_biden_df['tweet']:
words.extend(re.findall(r"#(\w+)", tweet))
Counter(words).most_common(50)
# +
#11/27
#get only swing states
swing_trump_df = trump_df[(trump_df['state'] == 'Michigan') | (trump_df['state'] == 'Pennsylvania') |
(trump_df['state'] == 'Wisconsin')]
#Vader text with sentimental value
sent = pd.read_csv('vader_lexicon.txt', sep = '\t', index_col = 0,
header = None).drop([2,3], axis = 1).rename(columns={1: 'polarity'})
#lower all tweet
swing_trump_df['tweet'] = swing_trump_df['tweet'].str.lower()
#Get rid of all punctutations
punct_re = r'[^(\w)(\s)]'
swing_trump_df['no_punc'] = swing_trump_df['tweet'].str.replace(punct_re, ' ', regex = True)
#Make new dataframe
tidy_format_trump_df = pd.DataFrame(swing_trump_df['no_punc'].str.split(expand = True).stack()).reset_index(level = 1).rename(columns = {'level_1' : 'num', 0 : 'word'})
tidy_format2_trump_df = tidy_format_trump_df
tidy_format2_trump_df['index'] = tidy_format_trump_df.index
#make polarity column
swing_trump_df['polarity'] = tidy_format2_trump_df.merge(sent, how = 'left', left_on = 'word', right_on = 0).fillna(0).groupby('index').sum()['polarity']
swing_trump_df.head()
# +
#11/27
#get only swing states
swing_biden_df = biden_df[(biden_df['state'] == 'Michigan') | (biden_df['state'] == 'Pennsylvania') |
(biden_df['state'] == 'Wisconsin')]
#Vader text with sentimental value
sent = pd.read_csv('vader_lexicon.txt', sep = '\t', index_col = 0,
header = None).drop([2,3], axis = 1).rename(columns={1: 'polarity'})
#lower all tweet
swing_biden_df['tweet'] = swing_biden_df['tweet'].str.lower()
#Get rid of all punctutations
punct_re = r'[^(\w)(\s)]'
swing_biden_df['no_punc'] = swing_biden_df['tweet'].str.replace(punct_re, ' ', regex = True)
#Make new dataframe
tidy_format_biden_df = pd.DataFrame(swing_biden_df['no_punc'].str.split(expand = True).stack()).reset_index(level = 1).rename(columns = {'level_1' : 'num', 0 : 'word'})
tidy_format2_biden_df = tidy_format_biden_df
tidy_format2_biden_df['index'] = tidy_format_biden_df.index
#make polarity column
swing_biden_df['polarity'] = tidy_format2_biden_df.merge(sent, how = 'left', left_on = 'word', right_on = 0).fillna(0).groupby('index').sum()['polarity']
swing_biden_df.head()
# +
#11/27
#get only swing states
swing_full = full[(full['state'] == 'Michigan') | (full['state'] == 'Pennsylvania') |
(full['state'] == 'Wisconsin')]
#Vader text with sentimental value
sent = pd.read_csv('vader_lexicon.txt', sep = '\t', index_col = 0,
header = None).drop([2,3], axis = 1).rename(columns={1: 'polarity'})
#lower all tweet
swing_full['tweet'] = swing_full['tweet'].str.lower()
#Get rid of all punctutations
punct_re = r'[^(\w)(\s)]'
swing_full['no_punc'] = swing_full['tweet'].str.replace(punct_re, ' ', regex = True)
#Make new dataframe
tidy_format_full = pd.DataFrame(swing_full['no_punc'].str.split(expand = True).stack()).reset_index(level = 1).rename(columns = {'level_1' : 'num', 0 : 'word'})
tidy_format2_full = tidy_format_full
tidy_format2_full['index'] = tidy_format_full.index
#make polarity column
swing_full['polarity'] = tidy_format2_full.merge(sent, how = 'left', left_on = 'word', right_on = 0).fillna(0).groupby('index').sum()['polarity']
swing_full.head()
# -
#divide swing_full by states
michigan = swing_full[swing_full['state'] == 'Michigan']
pennsylvania = swing_full[swing_full['state'] == 'Pennsylvania']
wisconsin = swing_full[swing_full['state'] == 'Wisconsin']
#most common hastags in swing_full df
words = []
for tweet in swing_full['tweet']:
words.extend(re.findall(r"#(\w+)", tweet))
Counter(words).most_common(50)
pos = swing_full.loc[swing_full['no_punc'] != 0]
con_trump = pos[pos['no_punc'].str.contains('trump')]['polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
#look at 'biden' vs 'trump' in michigan tweets
pos = michigan.loc[michigan['no_punc'] != 0]
con_trump = pos[pos['no_punc'].str.contains('trump')]['polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
#look at 'biden' vs 'trump' in penn tweets
pos = pennsylvania.loc[pennsylvania['no_punc'] != 0]
con_trump = pos[pos['no_punc'].str.contains('trump')]['polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
#look at 'biden' vs 'trump' in penn tweets
pos = wisconsin.loc[wisconsin['no_punc'] != 0]
con_trump = pos[pos['no_punc'].str.contains('trump')]['polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
# ## Using TextBlob
#
pip install -U textblob
# The sentiment property returns a namedtuple of the form Sentiment(polarity, subjectivity).
# The polarity score is a float within the range [-1.0, 1.0].
# The subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective.
from textblob import TextBlob
textblob_polarity = []
for tweet in swing_full['no_punc']:
testimonial = TextBlob(tweet)
textblob_polarity.append(testimonial.sentiment.polarity)
swing_full['textblob_polarity'] = textblob_polarity
swing_full.head()
pos = swing_full.loc[swing_full['no_punc'] != 0]
con_trump = pos[pos['no_punc'].str.contains('trump')]['textblob_polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['textblob_polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
#Michigan - textblob polarity
pos = swing_full[swing_full['state'] == 'Michigan']
con_trump = pos[pos['no_punc'].str.contains('trump')]['textblob_polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['textblob_polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
#Pennsylvania - textblob polarity
pos = swing_full[swing_full['state'] == 'Pennsylvania']
con_trump = pos[pos['no_punc'].str.contains('trump')]['textblob_polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['textblob_polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
#Wisconsin - textblob polarity
pos = swing_full[swing_full['state'] == 'Wisconsin']
con_trump = pos[pos['no_punc'].str.contains('trump')]['textblob_polarity']
con_biden = pos[pos['no_punc'].str.contains('biden')]['textblob_polarity']
sns.distplot(con_trump, label = 'trump')
sns.distplot(con_biden, label = 'biden')
plt.legend()
plt.title('distributions of sentiments for tweets containing trump and biden')
print("avg polarity for 'biden': ", np.mean(con_biden))
print("avg polarity for 'trump': ", np.mean(con_trump))
| exploration/explore_ai (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
# using my version - getting color
import bitranox_coloredlogs
import sys
import logging
logger=logging.getLogger()
bitranox_coloredlogs.install(logger=logger, stream=sys.stdout, isatty=True)
logger.error('some test')
# + pycharm={"name": "#%%\n"}
# using xolox original version . no color !
# !{sys.executable} -m pip install coloredlogs
# + pycharm={"name": "#%%\n"}
import coloredlogs
logger=logging.getLogger()
coloredlogs.install(logger=logger, stream=sys.stdout, isatty=True)
logger.error('some test')
| bitranox_coloredlogs.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .groovy
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Groovy
// language: groovy
// name: groovy
// ---
// ## Quantile
// +
//load ImageJ
// %classpath config resolver scijava.public https://maven.scijava.org/content/groups/public
// %classpath add mvn net.imagej imagej 2.0.0-rc-67
//create ImageJ object
ij = new net.imagej.ImageJ()
// -
// Given a decimal `x` such that `0 <= x < 100`, this `Op` provides the `x * 100`th [Quantile](https://en.wikipedia.org/wiki/Percentile) on any [`Iterable`](https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html). This `Op` is used by the [`percentile`](percentile.ipynb) `Op`, and most times it is preferable to use `percentile` instead.
// +
sinusoid32 = ij.op().run("create.img", [150, 100])
formula = "63 * (Math.cos(0.3*p[0]) + Math.sin(0.3*p[1])) + 127"
ij.op().image().equation(sinusoid32, formula)
ij.notebook().display(sinusoid32)
// -
// All `Img`s are `Iterable`s, so we can just pass through the `Img` to `quantile()`:
// +
import net.imglib2.type.numeric.real.DoubleType
percents = [0, 0.25, 0.5, 0.75, 0.999]
for(percent in percents)
println(((percent * 100) as int) + "th quantile = " + ij.op().stats().quantile(sinusoid32, percent))
| notebooks/1-Using-ImageJ/Ops/stats/quantile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:stan2tfp-dev]
# language: python
# name: conda-env-stan2tfp-dev-py
# ---
# # Simple example
#
# To illustrate the basic functionallity, we'll fit the eight schools model (the "hello world" of bayesian models).
#
# We begin by importing the Stan2tfp object, and plotting functionality:
from stan2tfp import Stan2tfp
import seaborn as sns
import matplotlib.pyplot as plt
import arviz as az
# The model itself is written in Stan, and is specified as a multiline string:
stan_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
vector[J] theta_tilde;
}
transformed parameters {
vector[J] theta = mu + tau * theta_tilde;
}
model {
mu ~ normal(0, 5);
tau ~ normal(0, 5);
theta_tilde ~ normal(0, 1);
y ~ normal(theta, sigma);
}
"""
# The data is specified using a dictionary:
eight_schools_data_dict = dict(
J=8,
y=[28, 8, -3, 7, -1, 1, 18, 12],
sigma=[15, 10, 16, 11, 9, 11, 10, 18]
)
# Finally, the model object itself is created:
model = Stan2tfp(stan_model_code=stan_code, data_dict=eight_schools_data_dict)
# We can also instantiate the model using an external `.stan` file. Passing the data dict at this point is not necessary, as it's not needed for the tranlation of Stan code; we could've used:
model = Stan2tfp(stan_model_code=stan_code);
# # do whatever you want here
model.init_model(data_dict=eight_schools_data_dict)
# ... instead.
# Note that since this is the first time we're calling the compiler, `stan2tfp` automatically downloads the latest pre-compiled binary, and informs you about its path.
#
# To inspect the emitted TFP code, call `get_tfp_code()`:
print(model.get_tfp_code())
# Internally, this code is now `eval`ed by the interpreter and creates the necessary _python_ objects in the current namespace.
# To fit the model, we call the `sample` method; it's a wrapper around TFP's MCMC machinery, aiming for sensible defaults and ease-of-use:
mcmc_trace, kernel_results = model.sample()
# `model.sample` returns the actual samples (`mcmc_trace`) and TFP's `kernel_results` (holding important sampler diagnostics). Since we're in TFP-world now, we can use any tool from the TFP ecosystem we like; specifically, we can use the excellent [Arviz](https://arviz-devs.github.io) library for plotting:
data_for_az = az.from_tfp(posterior=mcmc_trace, var_names=['mu','trace','theta_tilde'])
data_for_az.posterior
az.plot_forest(data_for_az)
| examples/eight schools example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
from kalmanfilter import KalmanFilter
from datapoint import DataPoint
from fusionekf import FusionEKF
from tools import get_RMSE, cartesian_to_polar
from helpers import parse_data, get_state_estimations, print_EKF_data
import numpy as np
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.models import HoverTool
from bokeh.models import ColumnDataSource
output_notebook()
# + deletable=true editable=true
lidar_R = np.matrix([[0.01, 0],
[0, 0.01]])
radar_R = np.matrix([[0.01, 0, 0],
[0, 1.0e-6, 0],
[0, 0, 0.01]])
lidar_H = np.matrix([[1, 0, 0, 0],
[0, 1, 0, 0]])
P = np.matrix([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1000, 0],
[0, 0, 0, 1000]])
Q = np.matrix(np.zeros([4, 4]))
F = np.matrix(np.eye(4))
d = {
'number_of_states': 4,
'initial_process_matrix': P,
'radar_covariance_matrix': radar_R,
'lidar_covariance_matrix': lidar_R,
'lidar_transition_matrix': lidar_H,
'inital_state_transition_matrix': F,
'initial_noise_matrix': Q,
'acceleration_noise_x': 5,
'acceleration_noise_y': 5
}
EKF = FusionEKF(d)
# + deletable=true editable=true
all_sensor_data, all_ground_truths = parse_data("data/data-1.txt")
all_state_estimations = get_state_estimations(EKF, all_sensor_data)
# + deletable=true editable=true
lidar_xs, lidar_ys = [], []
radar_xs, radar_ys, radar_angles = [], [], []
truth_xs, truth_ys, truth_angles = [], [], []
state_xs, state_ys, state_angles = [], [], []
for s, t, p in zip(all_sensor_data, all_ground_truths, all_state_estimations):
if s.get_name() == "lidar":
x, y = s.get_raw()
lidar_xs.append(x)
lidar_ys.append(y)
else:
x, y, vx, vy = s.get()
angle = np.arctan2(vy, vx)
radar_xs.append(x)
radar_ys.append(y)
radar_angles.append(angle)
x, y, vx, vy = t.get()
t_angle = np.arctan2(vy, vx)
truth_xs.append(x)
truth_ys.append(y)
truth_angles.append(t_angle)
x, y, vx, vy = p.get()
p_angle = np.arctan2(vy, vx)
state_xs.append(x)
state_ys.append(y)
state_angles.append(p_angle)
# + deletable=true editable=true
radar_source = ColumnDataSource(data = {
'x' : radar_xs,
'y' : radar_ys,
'angle': radar_angles,
})
truth_source = ColumnDataSource(data = {
'x' : truth_xs,
'y' : truth_ys,
'angle': truth_angles,
})
state_source = ColumnDataSource(data = {
'x' : state_xs,
'y' : state_ys,
'angle': truth_angles,
})
lidar_source = ColumnDataSource(data = {
'x' : lidar_xs,
'y' : lidar_ys,
})
hover = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
# + deletable=true editable=true
###################################################################
# RADAR MEASUREMENTS WITH ORIENTATION
###################################################################
hover2 = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
p = figure(plot_width = 1000, plot_height = 700, tools = [hover2])
p.triangle(
'x', 'y', size = 10,
fill_color = "firebrick",
line_color = "orange",
fill_alpha = 0.2,
angle = 'angle',
line_width = 1,
legend = "radar measurements",
source = radar_source)
p.cross(
'x', 'y', size = 5,
line_color = "grey",
angle = 'angle',
line_width = 1,
legend = "radar measured velocity direction",
source = radar_source)
p.legend.location = "bottom_right"
show(p)
# + deletable=true editable=true
###################################################################
# LIDAR MEASUREMENTS
###################################################################
hover3 = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
p = figure(plot_width = 1000, plot_height = 700, tools = [hover3])
p.circle(
'x', 'y', size = 10,
fill_color = "navy",
line_color = "teal",
fill_alpha = 0.2,
line_width = 1,
legend = "lidar measurements",
source = lidar_source)
p.legend.location = "bottom_right"
show(p)
# + deletable=true editable=true
###################################################################
# GROUND TRUTH WITH ORIENTATION
###################################################################
hover4 = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
p = figure(plot_width = 1000, plot_height = 700, tools = [hover4])
p.triangle(
'x', 'y', size = 15,
fill_color = "red",
line_color = "white",
fill_alpha = 0.2,
angle = 'angle',
line_width = 1,
legend = "ground truth",
source = truth_source)
p.cross(
'x', 'y', size = 5,
line_color = "grey",
angle = 'angle',
line_width = 1,
legend = "velocity direction",
source = truth_source)
p.legend.location = "bottom_right"
show(p)
# + deletable=true editable=true
###################################################################
# STATE WITH ORIENTATION
###################################################################
hover5 = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
p = figure(plot_width = 1000, plot_height = 700, tools = [hover5])
p.triangle(
'x', 'y', size = 20,
fill_color = "teal",
line_color = "white",
fill_alpha = 0.2,
angle = 'angle',
line_width = 1,
legend = "state prediction: location",
source = state_source)
p.x(
'x', 'y', size = 5,
line_color = "grey",
angle = 'angle',
line_width = 1,
legend = "state prediction: velocity directions",
source = state_source)
p.legend.location = "bottom_right"
show(p)
# + deletable=true editable=true
hover6 = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
p = figure(plot_width = 1000, plot_height = 700, tools = [hover6])
p.square(
'x', 'y', size = 3,
fill_color = "orange",
line_color = "orange",
fill_alpha = 1,
angle = 'angle',
line_width = 1,
legend = "radar measurements",
source = radar_source)
p.circle(
'x', 'y', size = 15,
fill_color = "green",
line_color = "white",
fill_alpha = 0.2,
line_width = 1,
legend = "lidar measurements",
source = lidar_source)
p.triangle(
'x', 'y', size = 7,
fill_color = "violet",
line_color = "violet",
fill_alpha = 1,
angle = 'angle',
line_width = 0.5,
legend = "ground truth",
source = truth_source)
p.triangle(
'x', 'y', size = 2,
fill_color = "black",
line_color = "black",
fill_alpha = 1,
angle = 'angle',
line_width = 1,
legend = "state predictions",
source = state_source)
p.legend.location = "bottom_right"
show(p)
# + deletable=true editable=true
hover7 = HoverTool( tooltips = [
("index", "$index"),
("x , y", "$x, $y"),
("angle radians", "@angle")])
p = figure(plot_width = 1000, plot_height = 700, tools = [hover7])
p.square(
'x', 'y', size = 3,
fill_color = "violet",
line_color = "violet",
fill_alpha = 1,
angle = 'angle',
line_width = 1,
legend = "radar measurements",
source = radar_source)
p.circle(
'x', 'y', size = 10,
fill_color = "green",
line_color = "white",
fill_alpha = 0.4,
line_width = 1,
legend = "lidar measurements",
source = lidar_source)
p.line(state_xs, state_ys, line_width = 2, color='orange', legend = "state predictions")
p.line(truth_xs, truth_ys, line_dash = "4 4", line_width = 1, color='navy', legend = "ground truth")
p.legend.location = "bottom_right"
show(p)
# + deletable=true editable=true
| Fusion-EKF-Sample-Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:generalmachinelearningforcpusvyes]
# language: python
# name: conda-env-generalmachinelearningforcpusvyes-py
# ---
# ### OCI Data Science - Useful Tips
# <details>
# <summary><font size="2">Check for Public Internet Access</font></summary>
#
# ```python
# import requests
# response = requests.get("https://oracle.com")
# assert response.status_code==200, "Internet connection failed"
# ```
# </details>
# <details>
# <summary><font size="2">Helpful Documentation </font></summary>
# <ul><li><a href="https://docs.cloud.oracle.com/en-us/iaas/data-science/using/data-science.htm">Data Science Service Documentation</a></li>
# <li><a href="https://docs.cloud.oracle.com/iaas/tools/ads-sdk/latest/index.html">ADS documentation</a></li>
# </ul>
# </details>
# <details>
# <summary><font size="2">Typical Cell Imports and Settings for ADS</font></summary>
#
# ```python
# # %load_ext autoreload
# # %autoreload 2
# # %matplotlib inline
#
# import warnings
# warnings.filterwarnings('ignore')
#
# import logging
# logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)
#
# import ads
# from ads.dataset.factory import DatasetFactory
# from ads.automl.provider import OracleAutoMLProvider
# from ads.automl.driver import AutoML
# from ads.evaluations.evaluator import ADSEvaluator
# from ads.common.data import ADSData
# from ads.explanations.explainer import ADSExplainer
# from ads.explanations.mlx_global_explainer import MLXGlobalExplainer
# from ads.explanations.mlx_local_explainer import MLXLocalExplainer
# from ads.catalog.model import ModelCatalog
# from ads.common.model_artifact import ModelArtifact
# ```
# </details>
# <details>
# <summary><font size="2">Useful Environment Variables</font></summary>
#
# ```python
# import os
# print(os.environ["NB_SESSION_COMPARTMENT_OCID"])
# print(os.environ["PROJECT_OCID"])
# print(os.environ["USER_OCID"])
# print(os.environ["TENANCY_OCID"])
# print(os.environ["NB_REGION"])
# ```
# </details>
# +
import os
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None, 'display.max_rows', 100)
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_context('notebook')
sns.set_style('whitegrid')
sns.set_palette('Blues_r')
from sklearn.preprocessing import StandardScaler
from scipy import stats
import random
import time
from datetime import datetime
import warnings
# warnings.filterwarnings('ignore')
# -
# Reading the CSV file.
data= pd.read_csv('../weather.csv')
df = data.copy()
df.shape
drop_col = []
for col in df.columns:
if df[col].nunique()<2:
drop_col.append(col)
print(drop_col)
for col in df.columns:
if df[col].isnull().sum() > 0.3*df.shape[0]:
drop_col.append(col)
print(drop_col)
drop_row =[]
for col in df.columns:
if df[col].isnull().sum() == 974274:
drop_row.append(col)
print(drop_row)
fill_col = []
for col in df.columns:
if df[col].isnull().sum() in range(1, int(0.01*df.shape[0])):
fill_col.append(col)
print(fill_col)
df.drop(drop_col, axis=1, inplace=True)
df['GAMEHOST'].fillna('Unknown', inplace=True)
for col in fill_col:
df[col].fillna(df[col].mode(), inplace=True)
df.dropna(inplace=True)
df.isnull().sum()
df.shape
df.to_csv('clean_data.csv', encoding='utf-8', index=False)
| earliest experiments/notebooks/F1_Data Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python3
# ---
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import random
def pps(n,ND) :
P=n*100/ND
PP= "{:.2f}".format(P)
print("\r",end="")
print("processes",PP,"% completed: ",end="")
i=50*n/ND
a=50-int(i)
b=(int(i)+1)*"🟩"
c=(a-1)*"⬜️"
print(b,end="")
print(c, end="")
if n==ND:
print("\r","computing successfully completed! ",50*"🟦")
def initial_condition(I0):
G=nx.read_gpickle('Graph.gpickle')
for n_0 in G.nodes:
G.nodes[n_0]['x']='s'
G.nodes[n_0]['tpe']=0
if random.random()<I0:
G.nodes[n_0]['x']='i'
return G
def Count_nodes(Graph):
enode=[]
inode=[]
for n_1 in Graph.nodes:
if Graph.nodes[n_1]['x']=='e':
enode.append(n_1)
if Graph.nodes[n_1]['x']=='i':
inode.append(n_1)
return (inode,enode)
def SEIR_dynamic(I0,beta,gamma,t_e,P1):
G = initial_condition(I0)
for dp in range(80):
(inode,enode)= Count_nodes(Graph=G)
for n_2 in G.nodes:
if G.nodes[n_2]['x']=='s' and random.random()<P1:
G.nodes[n_2]['x']='v'
if G.nodes[n_2]['x']=='e':
G.nodes[n_2]['tpe']+=1
if G.nodes[n_2]['x']=='q':
G.nodes[n_2]['tpq']+=1
for n_3 in enode:
if G.nodes[n_3]['tpe']>=np.random.poisson(t_e,size=1)[0]:
G.nodes[n_3]['x']='i'
for n_6 in inode:
if random.random()<gamma:
G.nodes[n_6]['x']='r'
for n_7 in G.adj[n_6]:
if ((G.nodes[n_7]['x']=='s') and (random.random()<beta)):
G.nodes[n_7]['x']='e'
rnum=0
for i in G.nodes:
if G.nodes[i]['x']=='r':
rnum+=1
return rnum
P1_list=np.linspace(0,0.2,50)
beta_list=np.linspace(0,1,50)
rdata=np.zeros((50,50,20))
rmean=np.zeros((50,50))
m=0
for p in P1_list:
n=0
for b in beta_list:
for i in range(20):
rdata[m][n][i]=SEIR_dynamic(I0= 0.01,beta= b,gamma= 1,t_e= 5,P1= p)
rmean[m][n]=np.mean(rdata[m][n])
n+=1
m+=1
pps(m,50)
plt.figure(figsize=(15,7))
c=plt.pcolormesh(beta_list,P1_list,rmean)
plt.ylabel('P1')
plt.xlabel('Transition probability')
plt.colorbar(c)
plt.savefig('Fig1-2')
np.save('1-2-(50-50-20).npy',rdata)
| SEIR-on-network-with-Vac-Quar/barabasi_albert_graph/Contour-Map/Code/SEIR-Network 1.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas
#Read txt file into dataframe -> txt file contains data on volcanoes
df = pandas.read_csv("Volcanoes.txt")
df
#Extract the latitude, longitude, names and elevations of the volcanoes into lists
#We can then use this data when plotting it onto the map
lat = list(df["LAT"])
long = list(df["LON"])
names = list(df["NAME"])
elev = list(df["ELEV"])
# +
#Function to give markers colours based on elevations
#Call function when adding colour to markers
def color_generator(elevation):
if elevation<1000:
return "green"
elif 1000<=elevation<3000:
return "purple"
else:
return "red"
#FOR HELP -> dir(folium)
#ADDITIONAL HELP -> help(folium.characteristic) where characteristic is Marker, CircleMarker etc
import folium
#Allows HTML code to be inserted in the popup for each marker-> for formatting text type,font,adding urls etc
# html = """
# Volcano name:<br>
# <a href="https://www.google.com/search?q=%%22%s%%22" target="_blank">%s</a><br>
# Height: %s m
# """
#Create a map object with a base location, a specific zoom and type
mp = folium.Map(location=[40.57,-115.49], zoom_start=6, tiles = "Stamen Terrain")
#Instead of adding characteristics/layers to the map separately -> We add them to a FeatureGroup
#This allows us to organise the characteristics/layers more easily
#FeatureGroup for volcanoes
fgv = folium.FeatureGroup(name="Volcanoes")
#Add point layer for volcano locations to the feauture group
#Zip allows us to traverse the 4 lists simultaneously
for la,lo,na,el in zip(lat,long,names,elev):
#The iframe function allows us to implement the html code into the map for the popup option
#iframe = folium.IFrame(html=html % (na, na, el), width=200, height=100)
#We then add a marker with specific data for location, text, colour etc
# fg.add_child(folium.Marker(location=[la,lo], popup=folium.Popup(iframe), parse_html=True,
# icon=folium.Icon(color=color_generator(el))))
# fg.add_child(folium.Marker(location=[la,lo], popup=folium.Popup(str(na)+" ("+str(el)+" mtrs)"),
# parse_html=True, icon=folium.Icon(color='red')))
#Circle marker with characteristics
fgv.add_child(folium.CircleMarker(location=[la,lo], popup=str(na)+" ("+str(el)+" mtrs)", radius=10,
fill_color=color_generator(el), color='grey', fill_opacity=0.7))
#Feature group for mapping countries and population
fgp = folium.FeatureGroup(name="Population")
#Add polygon layer for mapping countries to the feature group
#It adds the data stored in the world.json file to the map
#The json file stored data like coordinates for the countries shape and the population etc
#Colour the countries based on their population using lambda function
fgp.add_child(folium.GeoJson(data=open("world.json", "r", encoding = "utf-8-sig").read(),
style_function = lambda x: {'fillColor':'green' if x['properties']['POP2005']
< 10000000 else 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000
else 'red'}))
#We then add each FeatureGroup with the characteristics/layers to the map object thus building the final map
mp.add_child(fgv)
mp.add_child(fgp)
#Add layercontrol to the map object but only after adding the feature group
#Otherwise it wont have any layers to control
mp.add_child(folium.LayerControl())
#Save the map as an html file
mp.save("Map1.html")
| Population & Volcano Web Map/Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from haven import haven_jupyter as hj
from haven import haven_results as hr
from haven import haven_utils as hu
# path to where the experiments got saved
savedir_base = '/mnt/home/covid19_weak_supervision/results/experiments'
exp_list = None
# get experiments from the exp config
exp_config_fname = None
if exp_config_fname:
config = hu.load_py(exp_config_fname)
exp_list = []
for exp_group in [
"example"
]:
exp_list += config.EXP_GROUPS[exp_group]
# filter exps
filterby_list = None
# filterby_list =[{'dataset':'mnist'}]
# get experiments
rm = hr.ResultManager(exp_list=exp_list,
savedir_base=savedir_base,
filterby_list=filterby_list,
verbose=0,
exp_groups=None,
job_scheduler='toolkit'
)
# specify display parameters
filterby_list = None
legend_list = ['model.name', 'model.loss']
title_list = ['dataset.name']
y_metrics = ['train_loss', 'test_iou']
x_metric = 'epoch'
# launch dashboard
hj.get_dashboard(rm, vars(), wide_display=False, enable_datatables=False)
# +
# get table
rm.get_score_df().head()
# get latex
# print(rm.get_latex_table(legend=['dataset'], metrics=['train_loss'], decimals=1, caption="Results", label='tab:results'))
# get custom plots
fig = rm.get_plot_all(
# order='metrics_by_groups',
# avg_across='runs',
y_metric_list=y_metrics,
x_metric=x_metric,
# legend_fontsize=18,
# x_fontsize=20,
# y_fontsize=20,
# xtick_fontsize=20,
# ytick_fontsize=20,
# title_fontsize=24,
# legend_list=['model],
# title_list = ['dataset'],
# title_format='Dataset:{}',
# log_metric_list = ['train_loss'],
# groupby_list = ['dataset'],
# map_ylabel_list=[{'train_loss':'Train loss'}],
# map_xlabel_list=[{'epoch':'Epoch'}],
# figsize=(15,5),
# plot_confidence=False,
# savedir_plots='%s' % (name)
)
# -
# !pip install --upgrade git+https://github.com/haven-ai/haven-ai
| results/results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (tensorflow)
# language: python
# name: tensorflow
# ---
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_1_overview.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # T81-558: Applications of Deep Neural Networks
# **Module 1: Python Preliminaries**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Module 1 Material
#
# * **Part 1.1: Course Overview** [[Video]](https://www.youtube.com/watch?v=taxS7a-goNs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_1_overview.ipynb)
# * Part 1.2: Introduction to Python [[Video]](https://www.youtube.com/watch?v=czq5d53vKvo&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_2_intro_python.ipynb)
# * Part 1.3: Python Lists, Dictionaries, Sets and JSON [[Video]](https://www.youtube.com/watch?v=kcGx2I5akSs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_3_python_collections.ipynb)
# * Part 1.4: File Handling [[Video]](https://www.youtube.com/watch?v=FSuSLCMgCZc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_4_python_files.ipynb)
# * Part 1.5: Functions, Lambdas, and Map/Reduce [[Video]](https://www.youtube.com/watch?v=jQH1ZCSj6Ng&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_5_python_functional.ipynb)
#
# Watch one (or more) of these depending on how you want to setup your Python TensorFlow environment:
# * [How to Submit a Module Assignment locally](https://www.youtube.com/watch?v=hmCGjCVhYNc)
# * [How to Use Google CoLab and Submit Assignment](https://www.youtube.com/watch?v=Pt-Od-oBgOM)
# * [Installing TensorFlow, Keras, and Python in Windows CPU](https://www.youtube.com/watch?v=RgO8BBNGB8w&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN)
# * [Installing TensorFlow, Keras, and Python in Mac](https://www.youtube.com/watch?v=MpUvdLD932c&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN)
# * [Installing TensorFlow, Keras, and Python in Windows GPU, warning, somewhat complex](https://www.youtube.com/watch?v=qrkEYf-YDyI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN)
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
try:
from google.colab import drive
# %tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# # Part 1.1: Course Overview
#
# Deep learning is a group of exciting new technologies for neural networks. [[Cite:lecun2015deep]](https://www.nature.com/articles/nature14539) By using a combination of advanced training techniques neural network architectural components, it is now possible to train neural networks of much greater complexity. This course introduces the student to deep belief neural networks, regularization units (ReLU), convolution neural networks, and recurrent neural networks. High-performance computing (HPC) aspects demonstrate how deep learning can be leveraged both on graphical processing units (GPUs), as well as grids. Deep learning allows a model to learn hierarchies of information in a way that is similar to the function of the human brain. The focus is primarily upon the application of deep learning, with some introduction to the mathematical foundations of deep learning. Students make use of the Python programming language to architect a deep learning model for several real-world data sets and interpret the results of these networks. [[Cite:goodfellow2016deep]](https://www.deeplearningbook.org/)
# # Assignments
#
# Your grade is calculated according to the following assignments:
#
# Assignment |Weight|Description
# --------------------|------|-------
# Ice Breaker | 5%|Post a short get to know you discussion topic (individual)
# Group Selection | 5%|Choose groups for Kaggle final projects
# Class Assignments | 50%|10 small programming assignments (5% each, individual)
# Kaggle Project | 20%|"Kaggle In-Class" competition (team)
# Final Project | 20%|Deep Learning Implementation Report (team)
#
# The 10 class assignments correspond with each of the first 10 modules. Generally, each module assignment is due just before the following module date. Refer to the syllabus for exact due dates. The 10 class assignments are submitted using the Python submission script. Refer to assignment 1 for details.
#
# The Kaggle and Final Projects are completed in teams. The same teams will complete each of these.
#
# * **Module 1 Assignment**: [How to Submit an Assignment](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)
# * **Module 2 Assignment**: [Creating Columns in Pandas](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class2.ipynb)
# * **Module 3 Assignment**: [Data Preparation in Pandas](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class3.ipynb)
# * **Module 4 Assignment**: [Classification and Regression Neural Networks](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class4.ipynb)
# * **Module 5 Assignment**: [Predict Home Price](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class5.ipynb)
# * **Module 6 Assignment**: [Image Processing](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class6.ipynb)
# * **Module 7 Assignment**: [Computer Vision](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class7.ipynb)
# * **Module 8 Assignment**: [Feature Engineering](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class8.ipynb)
# * **Module 9 Assignment**: [Transfer Learning](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class9.ipynb)
# * **Module 10 Assignment**: [Time Series Neural Network](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class10.ipynb)
#
# # Your Instructor: <NAME>
#
# <NAME>, pictured in Figure 1.JH is the author of this book and developer of this course. A brief summary of his credentials is given here:
#
# * Master of Information Management (MIM), Washington University in St. Louis, MO
# * PhD in Computer Science, Nova Southeastern University in Ft. Lauderdale, FL
# * [Vice President and Data Scientist](http://www.rgare.com/knowledge-center/media/articles/rga-where-logic-meets-curiosity), Reinsurance Group of America (RGA)
# * Senior Member, IEEE
# * jtheaton at domain name of this university
# * Other industry certifications: FLMI, ARA, ACS
#
# Social media:
#
# * [Homepage](http://www.heatonresearch.com) - My home page. Includes my research interests and publications.
# * [YouTube Channel](https://www.youtube.com/user/HeatonResearch) - My YouTube Channel. Subscribe for my videos on AI and updates to this class.
# * [Discord Server](https://discord.gg/3bjthYv) - To discuss this class and AI topics.
# * [GitHub](https://github.com/jeffheaton) - My GitHub repositories.
# * [LinkedIn](https://www.linkedin.com/in/jeffheaton) - My Linked In profile.
# * [Twitter](https://twitter.com/jeffheaton) - My Twitter feed.
# * [Google Scholar](https://scholar.google.com/citations?user=1jPGeg4AAAAJ&hl=en) - My citations on Google Scholar.
# * [Research Gate](https://www.researchgate.net/profile/Jeff_Heaton) - My profile/research at Research Gate.
# * [Others](http://www.heatonresearch.com/about/) - About me and other social media sites that I am a member of.
#
# **Figure 1.JH: <NAME> Recording a Video**
# 
# # Course Resources
#
# * [Google CoLab](https://colab.research.google.com/) - Free web-based platform that includes Python, Juypter Notebooks, and TensorFlow [[Cite:GoogleTensorFlow]](http://download.tensorflow.org/paper/whitepaper2015.pdf). No setup needed.
# * [Python Anaconda](https://www.continuum.io/downloads) - Python distribution that includes many data science packages, such as Numpy, Scipy, Scikit-Learn, Pandas, and much more.
# * [Juypter Notebooks](http://jupyter.org/) - Easy to use environment that combines Python, Graphics and Text.
# * [TensorFlow](https://www.tensorflow.org/) - Google's mathematics package for deep learning.
# * [Kaggle](https://www.kaggle.com/) - Competitive data science. Good source of sample data.
# * [Course GitHub Repository](https://github.com/jeffheaton/t81_558_deep_learning) - All of the course notebooks will be published here.
# # What is Deep Learning
#
# The focus of this class is deep learning, which is a prevalent type of machine learning that builds upon the original neural networks popularized in the 1980s. There is very little difference between how a deep neural network is calculated compared with the first neural network. We've always been able to create and calculate deep neural networks. A deep neural network is nothing more than a neural network with many layers. While we've always been able to create/calculate deep neural networks, we've lacked an effective means of training them. Deep learning provides an efficient means to train deep neural networks.
#
# ## What is Machine Learning
#
# If deep learning is a type of machine learning, this begs the question, "What is machine learning?" Figure 1.ML-DEV illustrates how machine learning differs from traditional software development.
#
# **Figure 1.ML-DEV: ML vs Traditional Software Development**
# 
#
# * **Traditional Software Development** - Programmers create programs that specify how to transform input into the desired output.
# * **Machine Learning** - Programmers create models that can learn to produce the desired output for given input. This learning fills the traditional role of the computer program.
#
# Researchers have applied machine learning to many different areas. This class explores three specific domains for the application of deep neural networks, as illustrated in Figure 1.ML-DOM.
#
# **Figure 1.ML-DOM: Application of Machine Learning**
# 
#
# * **Predictive Modeling** - Several named input values allow the neural network to predict another named value that becomes the output. For example, using four measurements of iris flowers to predict the species. This type of data is often called tabular data.
# * **Computer Vision** - The use of machine learning to detect patterns in visual data. For example, is an image a picture of a cat or a dog.
# * **Time Series** - The use of machine learning to detect patterns in time. Typical applications of time series are financial applications, speech recognition, and even natural language processing (NLP).
#
# ### Regression
#
# Regression is when a model, such as a neural network, accepts input, and produces a numeric output. Consider if you must to write a program that predicted how many miles per gallon (MPG) a car could achieve. For the inputs, you would probably want such features as the weight of the car, the horsepower, how large the engine is, and other values. Your program would be a combination of math and if-statements.
#
# Machine learning lets the computer learn the "formula" for calculating the MPG of a car, using data. Consider [this](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/data/auto-mpg.csv) dataset. We can use regression machine learning models to study this data and learn how to predict the MPG for a car.
#
# ### Classification
#
# The output of a classification model is what class the input most closely resembles. For example, consider using four measurements of an iris flower to determine the likely species that the flower. The [iris dataset](https://data.heatonresearch.com/data/t81-558/iris.csv) is a typical dataset for machine learning examples.
#
# ### Beyond Classification and Regression
#
# One of the most potent aspects of neural networks is that a neural network can be both regression and classification at the same time. The output from a neural network could be any number of the following:
#
# * An image
# * A series of numbers that could be interpreted as text, audio, or another time series
# * A regression number
# * A classification class
#
# ## What are Neural Networks
#
# Neural networks are one of the earliest examples of a machine learning model. Neural networks were initially introduced in the 1940s and have risen and fallen several times from popularity. The current generation of deep learning begain in 2006 with an improved training algorithm by <NAME>. [[Cite:hinton2006fast]](https://www.mitpressjournals.org/doi/abs/10.1162/neco.2006.18.7.1527) This technique finally allowed neural networks with many layers (deep neural networks) to be efficiently trained. Four researchers have contributed significantly to the development of neural networks. They have consistently pushed neural network research, both through the ups and downs. These four luminaries are shown in Figure 1.LUM.
#
# **Figure 1.LUM: Neural Network Luminaries**
# 
#
# The current luminaries of artificial neural network (ANN) research and ultimately deep learning, in order as appearing in the above picture:
#
# * [<NAME>](http://yann.lecun.com/), Facebook and New York University - Optical character recognition and computer vision using convolutional neural networks (CNN). The founding father of convolutional nets.
# * [<NAME>](http://www.cs.toronto.edu/~hinton/), Google and University of Toronto. Extensive work on neural networks. Creator of deep learning and early adapter/creator of backpropagation for neural networks.
# * [<NAME>](http://www.iro.umontreal.ca/~bengioy/yoshua_en/index.html), University of Montreal. Extensive research into deep learning, neural networks, and machine learning. He has so far remained entirely in academia.
# * [<NAME>](http://www.andrewng.org/), Badiu and Stanford University. Extensive research into deep learning, neural networks, and application to robotics.
#
# <NAME>, <NAME>, and <NAME> won the [Turing Award](https://www.acm.org/media-center/2019/march/turing-award-2018) for their contributions to deep learning.
#
# ## Why Deep Learning?
#
# For predictive modeling, neural networks are not that different than other models, such as:
#
# * Support Vector Machines
# * Random Forests
# * Gradient Boosted Machines
#
# Like these other models, neural networks can perform both **classification** and **regression**. When applied to relatively low-dimensional predictive modeling tasks, deep neural networks do not necessarily add significant accuracy over other model types. <NAME> describes the advantage of deep neural networks over traditional model types as illustrated by Figure 1.DL-VS.
#
# **Figure 1.DL-VS: Why Deep Learning?**
# 
#
# Neural networks also have two additional significant advantages over other machine learning models:
#
# * **Convolutional Neural Networks** - Can scan an image for patterns within the image.
# * **Recurrent Neural Networks** - Can find patterns across several inputs, not just within a single input.
#
# Neural networks are also very flexible in the types of data that are compatible with the input and output layers. A neural network can take tabular data, images, audio sequences, time series tabular data, and text as its input or output.
# # Python for Deep Learning
#
# Python 3.x is the programming language that will be used for this class. Python, as a programming language, has the widest support for deep learning. The three most popular frameworks for deep learning in Python are:
#
# * [TensorFlow](https://www.tensorflow.org/) (Google)
# * [MXNet](https://github.com/dmlc/mxnet) (Amazon)
# * [CNTK](https://cntk.ai/) (Microsoft)
#
# Some references on popular programming languages for AI/Data Science:
#
# * [Popular Programming Languages for AI](https://en.wikipedia.org/wiki/List_of_programming_languages_for_artificial_intelligence)
# * [Popular Programming Languages for Data Science](http://www.kdnuggets.com/2014/08/four-main-languages-analytics-data-mining-data-science.html)
# # Software Installation
# This class is technically oriented. A successful student needs to be able to compile and execute Python code that makes use of TensorFlow for deep learning. There are two options for you to accomplish this:
#
# * Install Python, TensorFlow and some IDE (Jupyter, TensorFlow, and others)
# * Use Google CoLab in the cloud
#
# Near the top of this document, there are links to videos that describe how to use Google CoLab. There are also videos explaining how to install Python on your local computer. The following sections take you through the process of installing Python on your local computer. This process is essentially the same on Windows, Linux, or Mac. For specific OS instructions, refer to one of the tutorial YouTube videos earlier in this document.
#
# To install Python on your computer complete the following instructions:
#
# * [Installing Python and TensorFlow](./install/tensorflow-install-jul-2020.ipynb)
# # Python Introduction
#
#
# * [Anaconda v3.6](https://www.continuum.io/downloads) Scientific Python Distribution, including: [Scikit-Learn](http://scikit-learn.org/), [Pandas](http://pandas.pydata.org/), and others: csv, json, numpy, scipy
# * [Jupyter Notebooks](http://jupyter.readthedocs.io/en/latest/install.html)
# * [PyCharm IDE](https://www.jetbrains.com/pycharm/)
# * [Cx_Oracle](http://cx-oracle.sourceforge.net/)
# * [MatPlotLib](http://matplotlib.org/)
#
# ## Jupyter Notebooks
#
# Space matters in Python, indent code to define blocks
#
# Jupyter Notebooks Allow Python and Markdown to coexist.
#
# Even LaTeX math:
#
# $ f'(x) = \lim_{h\to0} \frac{f(x+h) - f(x)}{h}. $
#
# ## Python Versions
#
# * If you see `xrange` instead of `range`, you are dealing with Python 2
# * If you see `print x` instead of `print(x)`, you are dealing with Python 2
# * This class uses Python 3.6!
# +
# What version of Python do you have?
import sys
import tensorflow.keras
import pandas as pd
import sklearn as sk
import tensorflow as tf
print(f"Tensor Flow Version: {tf.__version__}")
print(f"Keras Version: {tensorflow.keras.__version__}")
print()
print(f"Python {sys.version}")
print(f"Pandas {pd.__version__}")
print(f"Scikit-Learn {sk.__version__}")
print("GPU is", "available" if tf.test.is_gpu_available() \
else "NOT AVAILABLE")
# -
# Software used in this class:
#
# * **Python** - The programming language.
# * **TensorFlow** - Googles deep learning framework, must have the version specified above.
# * **Keras** - [Keras](https://github.com/fchollet/keras) is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. [[Cite:franccois2017deep]](https://www.manning.com/books/deep-learning-with-python)
# * **Pandas** - Allows for data preprocessing. Tutorial [here](http://pandas.pydata.org/pandas-docs/version/0.18.1/tutorials.html)
# * **Scikit-Learn** - Machine learning framework for Python. Tutorial [here](http://scikit-learn.org/stable/tutorial/basic/tutorial.html).
# # Module 1 Assignment
#
# You can find the first assignment here: [assignment 1](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)
| t81_558_class_01_1_overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo MRR-NN
# I will use the MNIST Dataset to demonstrate the usage of Lorentzian.py. While the MNIST task isn't the most suitable case for the application of a microring-resonator neural network, it is a well known task, benchmark and intuitive
import tensorflow as tf
import lorentzian as lz
import numpy as np
from matplotlib import pyplot as plt
# +
# load mnist data and norm it
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
mnist_input_shape = (28, 28)
# -
# build model using MRRelu layers
model_mrrelu = tf.keras.Sequential()
model_mrrelu.add(tf.keras.layers.Flatten(input_shape=mnist_input_shape))
# the MRRelu should function just like a typical Dense layer with
# restrictions to ensure MRR possible behavior
model_mrrelu.add(lz.MMRelu(10, output_splitter=10))
model_mrrelu.add(lz.MMRelu(10))
model_mrrelu.summary()
# define loss function and compile the model
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model_mrrelu.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
history = model_mrrelu.fit(
x_train, y_train, validation_split=0.1, epochs=25, batch_size=32
)
fig = plt.figure()
fig.set_size_inches(12, 8)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="upper left")
plt.show()
model_lorz = lz.create_lorentz_from_mrrelu(model_mrrelu)
model_lorz.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
model_lorz.summary()
model_lorz.evaluate(x_train, y_train)
history = model_lorz.fit(
x_train, y_train, validation_split=0.1, epochs=10, batch_size=32
)
fig = plt.figure()
fig.set_size_inches(12, 8)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="upper left")
plt.show()
model_lorz.evaluate(x_test, y_test)
# ## Determine the necessary ring specifications and write them to file
#
# In this example we want the rings to be centered around 1550nm with a gamma of 60pm. One could also supply a sensitivity for shifting the ring via a microheater. The default sensitivity is 1nm per 5mW supplied to the microheater.\
#
# The printed format is the following:\
# $(n_L , n_{L+1} )$, center_wavelength, new_center_wavelength, gamma, delta_wavelength(shift), heating_power
lz.get_ring_specs(model_lorz, 1550, 60e-12, filename="Demo_MRR-NN.txt")
| Demo_MRR-NN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Numberplate Detection using OPenCV
# - Project as a Part of Learning OpenCV and is used to Develop Further
# - Anyone can contribute to this project and use this project as well
# ## Install the Dependencies
# !pip install easyocr
# !pip install imutils
import cv2
from matplotlib import pyplot as plt
import numpy as np
# ## Reading in Image, Grayscale and Blur
img = cv2.imread('image4.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.imshow(cv2.cvtColor(gray, cv2.COLOR_BGR2RGB))
plt.show()
# ## Applying filter and find edges for localization
# - Maximum the number plates are written in white FLAT surfaces in the car. Sp, Making Greyscale and identifying the edges is very helpful to recognise the text in NumberPlate.
bfilter = cv2.bilateralFilter(gray, 11, 17, 17) #Noise reduction
edged = cv2.Canny(bfilter, 30, 200) #Edge detection
plt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB))
plt.show()
# ## Finding Contours and Apply Mask
import imutils
keypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(keypoints)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
location = None
for contour in contours:
approx = cv2.approxPolyDP(contour, 10, True)
if len(approx) == 4:
location = approx
break
location
mask = np.zeros(gray.shape, np.uint8)
new_image = cv2.drawContours(mask, [location], 0,255, -1)
new_image = cv2.bitwise_and(img, img, mask=mask)
plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))
(x,y) = np.where(mask==255)
(x1, y1) = (np.min(x), np.min(y))
(x2, y2) = (np.max(x), np.max(y))
cropped_image = gray[x1:x2+1, y1:y2+1]
plt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))
plt.show()
# ## Using Easy OCR To Read Text
import easyocr
reader = easyocr.Reader(['en'])
result = reader.readtext(cropped_image)
result
# ## Rendering the Result
text = result[0][-2]
font = cv2.FONT_HERSHEY_SIMPLEX
res = cv2.putText(img, text=text, org=(approx[0][0][0], approx[1][0][1]+60), fontFace=font, fontScale=1, color=(0,255,0), thickness=2, lineType=cv2.LINE_AA)
res = cv2.rectangle(img, tuple(approx[0][0]), tuple(approx[2][0]), (0,255,0),3)
plt.imshow(cv2.cvtColor(res, cv2.COLOR_BGR2RGB))
plt.show()
# Need to be Implemented to recognise Number of Numberplates - NOt only one car
# ## Limitations & Further Improvements
# - Doesn't work on Two Car Images in One Image [ Bound Errors ]
# - Have to work More to take realtime-traffic video as input.
# - implementing this concept in Deep Neural Networks gives Best Result. [ Pytorch & Scikit Learn Libraries ]
#
# ### Learning Journey Goes on...
| Number-Plate-Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Introduction
#
# This notebook demonstrates how to use Geometric Median for time dimension reduction. Geomedian computation is quite expensive in terms of memory, data bandwidth and cpu usage. We use Dask to perform data loading and computation in parallel across many threads to speed things up. In this notebook a local Dask cluster is used, but the same approach should work using a larger, distributed Dask cluster.
# %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import xarray as xr
# ## Install missing requirements
#
# ```
# pip install --user --extra-index-url="https://packages.dea.ga.gov.au" hdstats
# pip install --user --extra-index-url="https://packages.dea.ga.gov.au" odc-algo
# ```
#
# Verify install worked by importing the libraries
import hdstats
import odc.algo
# ## Setup local dask cluster
# +
from datacube.utils.rio import configure_s3_access
from datacube.utils.dask import start_local_dask
import os
import dask
from dask.utils import parse_bytes
# configure dashboard link to go over proxy
dask.config.set({"distributed.dashboard.link":
os.environ.get('JUPYTERHUB_SERVICE_PREFIX', '/')+"proxy/{port}/status"});
# Figure out how much memory/cpu we really have (those are set by jupyterhub)
cpu_limit = float(os.environ.get('CPU_LIMIT', '0'))
cpu_limit = int(cpu_limit) if cpu_limit > 0 else 4
# close previous client if any, so that one can re-run this cell without issues
client = locals().get('client', None)
if client is not None:
client.close()
del client
client = start_local_dask(n_workers=1,
threads_per_worker=cpu_limit,
mem_safety_margin='4G')
display(client)
# Configure GDAL for s3 access
configure_s3_access(aws_unsigned=True, # works only when reading public resources
client=client);
# -
# ## Setup Datacube and data source
#
# In this notebook we are using `ls8_ard` and will be computing Geomedian for one Landsat scene (96, 74) using all available observations for the year 2016. To limit computation and memory this example uses only three optical bands (red, green, blue) and we limit computation to a 2K by 2K block of pixels roughly in the middle of the scene.
#
# Cell bellow finds all the datasets of interest. These all should be in the same projection.
# +
from datacube import Datacube
from odc.algo import fmask_to_bool, to_f32, from_float, xr_geomedian
dc = Datacube()
product = 'ls8_ard'
region_code, year = '96074', 2016
dss = dc.find_datasets(product=product,
region_code=region_code,
time=str(year))
len(dss)
# -
# ## Do native load (lazy version with Dask)
# +
data_bands = ['red', 'green', 'blue']
mask_bands = ['fmask']
xx = dc.load(product=dss[0].type.name,
output_crs=dss[0].crs,
resolution=(-30, 30),
align=(15, 15),
measurements=data_bands + mask_bands,
group_by='solar_day',
datasets=dss,
dask_chunks=dict(
x=1000,
y=1000)
)
# -
# Select a 2k by 2k subsection, to speed up testing
xx = xx.isel(x=np.s_[4000:6000], y=np.s_[4000:6000])
# ## Compute Geomedian on data_bands
# 1. Convert fmask to boolean: `True` - use, `False` - do not use
# 2. Apply masking in native dtype for data bands only
# 3. Convert to `float32` with scaling
# 4. Reduce time dimension with geometric median
# 5. Convert back to native dtype with scaling
#
# All steps are dask operations, so no actual computation is done until `.compute()` is called.
# +
scale, offset = (1/10_000, 0) # differs per product, aim for 0-1 values in float32
no_cloud = fmask_to_bool(xx.fmask, ('valid', 'snow', 'water'))
xx_data = xx[data_bands]
xx_clean = odc.algo.keep_good_only(xx_data, where=no_cloud)
xx_clean = to_f32(xx_clean, scale=scale, offset=offset)
yy = xr_geomedian(xx_clean,
num_threads=1, # disable internal threading, dask will run several concurrently
eps=0.2*scale, # 1/5 pixel value resolution
nocheck=True) # disable some checks inside geomedian library that use too much ram
yy = from_float(yy,
dtype='int16',
nodata=-999,
scale=1/scale,
offset=-offset/scale)
# -
# ## Now we can run the computation
# %%time
yy = yy.compute()
# ## Convert to RGBA and display
# +
from odc.ui import to_png_data
from IPython.display import Image
rgba = odc.algo.to_rgba(yy, clamp=3000)
Image(data=to_png_data(rgba.data))
# -
# ------------------------------------------------------------------
| notebooks/geomedian-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv ("regrex1.csv")
print (df)
# +
import matplotlib.pyplot as plt
x = df.x
y = df.y
plt.scatter(x,y)
plt.title('Regrex Data')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# -
import numpy as np
from sklearn.linear_model import LinearRegression
X = df.x.to_numpy()
X = X.reshape(-1, 1)
y = df.y.to_numpy ()
y = y.reshape(-1,1)
reg = LinearRegression().fit(X, y)
reg.score(X, y)
reg.coef_
reg.intercept_
y_predict = reg.predict(X)
plt.scatter(x,y)
plt.plot(X,y_predict)
plt.title('Regrex Data')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| Linear Model- Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
df=pd.read_csv("SampleSuperstore.csv")
df
df.info()
df.describe()
# # Data Analysis
sns.set_style("whitegrid")
sns.pairplot(df)
df['Ship Mode'].unique()
segment=df.Segment.value_counts().reset_index()
segment.columns=("Segment","Count")
segment
# # Visual representation
plt.pie(x="Count",labels="Segment",data=segment,radius=3,autopct="%.2f",pctdistance=0.4)
plt.show()
plt.figure(figsize=(9,5))
plt.title('CATEGORIES VS REGION')
sns.countplot(x=df['Category'],hue=df['Region'],palette='rocket')
plt.xticks()
plt.show()
# # Columns with high sales
high_sale=df[df["Sales"]>3000]
high_sale.head()
# # States with high sales
plt.figure(figsize=(10,6))
sns.barplot(x=high_sale["Sales"],y=high_sale["State"],data=df)
plt.show()
# # Maximum sales
df.Sales.max()
# # Minimum sales
df.Sales.min()
# # States with low sales
plt.figure(figsize=(9,5))
low_sale=df[df["Sales"]<1]
sns.barplot(y=low_sale["State"],x=low_sale["Sales"],data=low_sale,palette='rocket')
plt.show()
# # Low profit
low_profit=df[df["Profit"]<1]
low_profit.head()
# # Correlation
data=['Sales','Quantity','Discount','Profit']
plt.figure(figsize=(9,5))
sns.heatmap(df[data].corr(),annot=True)
plt.show()
# # Conclusion
# # As per the correlation matrix presented above we can say that
# 1.sales and profit are positively correlated,
# 2.profit and discount are negatively correlated
# # Thank you
| Task 3/Exploratory Data Analysis - Retail.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="http://akhavanpour.ir/notebook/images/srttu.gif" alt="SRTTU" style="width: 150px;"/>
#
# [](https://notebooks.azure.com/import/gh/Alireza-Akhavan/class.vision)
#
# <img src="lecture_images/fully_connected.jpg
# " alt="HTML5 Icon" style="width:600px;">
# <div style="text-align:center">
# <div style="direction:rtl;font-family:tahoma">
# مثال عملی ساده
# ، ساده ترین شبکه عصبی، ورودی در وزن ها ضرب شده و پس از افزودن بایاس به یک تابع فعالیت غیر خطی داده میشوند.
# </div>
# </div>
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# ### <div style="text-align: right;direction:rtl;font-family:tahoma">ایجاد یک interactive</div> section
sess = tf.Session()
# ### <div style="text-align: right;direction:rtl;font-family:tahoma">ایجاد placeholder ها (ورودی ها)</div>
# <hr>
# __Placeholder 'X':__
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# تصاویر ورودی
# * هر تصویر ورودی اندازه ی 28 در 28 دارد ؛ در مجموع هر تصویر 784 پیکسل دارد
# * آرگومان 'shape' اندازه تنسور را مشخص می کند.
# * بعد اول = None. اندازه ی batch size یا سایز دسته را مشخص می کند. None یعنی هر تعدادی میتواند باشد.
# * بعد دوم = 784. تعداد پیکسل ها در هر تصویر در مجموعه داده ی Mnist را مشخص میکند. تصاویر reshape شده اند.
# <hr>
# __Placeholder 'Y':__
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# خروجی نهایی یا label ها.
# * خروجی 10 کلاسه است (0,1,2,3,4,5,6,7,8,9)
# * آرگومان 'shape' با ابعادش اندازه ی تنسور را مشخص میکند.
# * بعد اول = None. اندازه ی batch size یا سایز دسته را مشخص می کند. None یعنی هر تعدادی میتواند باشد.
# * بعد دوم = 10. تعداد target ها
# <hr>
#
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### ایجاد variable ها (متغیرها)
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# برای نگه داری وزن ها و بایاس شبکه، variable تعریف میکنیم.
# <br>
# در اینجا این متغیرها با 0 مقداردهی اولیه شده اند.
# </div>
# Weight tensor
W = tf.Variable(tf.zeros([784,10],tf.float32))
# Bias tensor
b = tf.Variable(tf.zeros([10],tf.float32))
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### اجرای عملیات انتساب
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# Variable ها در تنسورفلو حتما باید مقداردهی اولیه شوند (initialize)
# </div>
#
sess.run(tf.global_variables_initializer())
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### اضافه کردن وزن ها و بایاس به ورودی
# <img src="lecture_images/mod_ID_2_final.png" alt="HTML5 Icon" style="width:400px;height:350px;">
# <div style="text-align:center">
# <div style="direction:rtl;font-family:tahoma">
#
# تصویر فوق نحوه ی اضافه شدن وزن ها و بایاس به ورودی را نمایش میدهد.
# </div>
# </div>
#mathematical operation to add weights and biases to the inputs
tf.matmul(x,W) + b
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### تابع فعالیت softmax
# <div style="text-align: right;direction:rtl;font-family:tahoma">
#
# Softmax یک تابع فعالیت است که به طور معمول در مسائل طبقه بندی استفاده می شود.
# <br>
# این تابع یک سری احتمالات به عنوان خروجی باز میگرداند.
# برای مثال مدل ما هیچ وقت 100٪ مطمئن نخواهد بود که یک رقم عدد نه است.
# این تابع فعالیت احتمالاتی را به عنوان خروجی شبکه اعلام میکند ، هنگامی مدل مناسب است که احتمال مربوط به عدد نه بزرگترین احتمال در بین سایر احتمالات باشد.
# <hr>
# برای مقایسه، در زیر one-hot vector برای حالتی که خروجی عدد 9 باشد آورده شده است:
#
# </div>
#
# + active=""
# 0 --> 0
# 1 --> 0
# 2 --> 0
# 3 --> 0
# 4 --> 0
# 5 --> 0
# 6 --> 0
# 7 --> 0
# 8 --> 0
# 9 --> 1
# -
# <div style="text-align: right;direction:rtl;font-family:tahoma">
#
# هیچ وقت الگوریتم به همچین یقینی دست نخواهد یافت. در عوض میخواهیم بدانیم بالا ترین احتمال مربوط به کدام عدد است. همچنین دومین بالاترین احتمال کدام عدد خواهد بود. و ...
# <hr>
# در زیر یک مثال از یک توزیع فرضی برای رقم 9 آورده شده است:
# </div>
# + active=""
# 0 -->.0.1%
# 1 -->...2%
# 2 -->...3%
# 3 -->...2%
# 4 -->..12%
# 5 -->..10%
# 6 -->..57%
# 7 -->..20%
# 8 -->..55%
# 9 -->..80%
# -
# <img src="lecture_images/fullysoft.png" alt="HTML5 Icon" style="width:560px;">
# <div style="text-align:center">
# <div style="direction:rtl;font-family:tahoma">
# تابع softmax خط به خط اعمل میشود.
# </div>
# </div>
logits = tf.matmul(x,W) + b
y = tf.nn.softmax(logits)
# <div style="text-align: right;direction:rtl;font-family:tahoma">
#
# این تابع برای به حداقل رساندن تفاوت بین پاسخ درست (label) و خروجی تخمین زده شده توسط شبکه استفاده میشود.
# </div>
# <img src="lecture_images/cross_entropy.png?a=1" alt="HTML5 Icon">
# <div style="text-align:center">
# Cross Entropy
# </div>
# +
cross_entropy = -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])
#cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,labels=y)
cost = tf.reduce_mean(cross_entropy)
# -
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### الگوریتم بهینه سازی: Gradient Descent
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# این بخش مربوط به پیکربندی بهینه ساز شبکه عصبی شما است.
# چندین روش بهینه سازی در تنسورفلو پیاده سازی شده و در دسترس است،
# در اینجا از گرادیان نزولی یا Gradient Descent استفاده خواهیم کرد.
# </div>
'''
optimizer = tf.train.GradientDescentOptimizer(0.5)
train_step = optimizer.minimize(cost)
'''
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cost)
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### آموزش دسته ها(batch ها)
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# در عمل، Batch Gradient Descent (محاسبه ی گرادیان با تمام داده های آموزشی به صورت یکجا) استفاده نمیشود، چرا که معمولا این کار از لحاظ محاسباتی بسیار پرهزینه خواهد بود.
# مزیت این روش محاسبه ی گرادیان واقعی مجموعه داده ی آموزشی است، اما به دلیل محاسبات پیچیده معملا در شبکه های عصبی از minibatch استفاده میشود؛ یعنی در هر بار، تعدادی از مجموعه ی آموزشی و نه تمام داده ها برای محاسبه ی گرادیان استفاده میشوند.
# </div>
# <hr>
# <a href="http://qa.deeplearning.ir/1078/%D9%85%D8%B4%DA%A9%D9%84-%D8%AF%D8%B1-%D8%A7%D8%AC%D8%B1%D8%A7%DB%8C-%D8%AA%D9%86%D8%B3%D9%88%D8%B1%D9%81%D9%84%D9%88-%D8%A8%D8%A7-gpu" target="_blank">http://qa.deeplearning.ir/1078/مشکل-در-اجرای-تنسورفلو-با-gpu</a>
#Load 50 training examples for each training iteration
for _ in range(1000):
batch = mnist.train.next_batch(50)
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})
# <div style="text-align: right;direction:rtl;font-family:tahoma">
# ### فاز آزمایش یا Test
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) * 100
print("The final accuracy for the simple ANN model is: {} % ".format(acc) )
sess.close() #finish the session
# <div class="alert alert-block alert-info">
# <div style="direction:rtl;text-align:right;font-family:B Lotus, B Nazanin, Tahoma"> دانشگاه تربیت دبیر شهید رجایی<br>مباحث ویژه 2 - یادگیری عمیق پیشرفته<br>علیرضا اخوان پور<br>97-98<br>
# </div>
# <a href="https://www.srttu.edu/">SRTTU.edu</a> - <a href="http://class.vision">Class.Vision</a> - <a href="http://AkhavanPour.ir">AkhavanPour.ir</a>
# </div>
| 37-FullyConnectedNetwork-mnist-tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Analyzing COVID-19 Data in Greece
# ## COVID-19 Background
# ### COVID-19 originated in 2019 and has been declared a pandemic. Since then, all nations around the world have been infected with the virus. This notebook will be analyzing COVID-19 data in Greece.
#
# ## Author information
# #### Author: <NAME>, JMU Intelligence Analyst
# ## Data Source
# The data is from [European Centre for Diease Prevention and Control](https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide)
# %matplotlib inline
import pandas
# ## The data
# ### Here is a general overview of the COVID-19 data in my S3 bucket
df = pandas.read_excel('s3://lambo-ia241-bucket/covid_data.xls')
greece_data = df.loc[df['countriesAndTerritories'] == 'Greece']
greece_data[:10] #top 10 rows
# ## The top 3 questions I will be answering today are:
# ### 1. In what month did Greece have the most deaths in 2020?
# ### 2. What month had the most cases of COVID-19 in Greece in 2020?
# ### 3. How does the number of cases in Greece relate to the number of deaths in the country?
# # Question 1: In what month did Greece have the most deaths in 2020?
sum_deaths_per_month = greece_data.groupby('month').sum()['deaths']
sum_deaths_per_month.plot()
# ### This data shows that Greece faced the most deaths during month 11 in 2020
# # Question 2: What month had the most cases of COVID-19 in Greece in 2020?
#
sum_cases_per_month = greece_data.groupby('month').sum()['cases']
sum_cases_per_month.plot()
# ### As shown in the graph, November of 2020 in Greece had the most confirmed COVID-19 cases.
# # Question 3: How does the number of cases in Greece relate to the number of deaths in the country?
greece_data.plot.scatter(x = 'cases', y = 'deaths')
# ### This scatter plot shows that when there is an increase of cases, there is an increase in deaths. When the number of cases is lower, there are less deaths in Greece.
# # Explanation of calculations:
# ### Q1: greece_data.groupby('month').sum()['deaths'] added all of the deaths per month and it allows me to be able to figure out which month has the most deaths
# ### Q2: greece_data.groupby('month').sum()['cases'] allows me to add all the cases per month and then shows me which month has the most cases
# ### Q3: greece_data.plot.scatter(x = 'cases', y = 'deaths') simply puts the variables into x and y values on a data plotting chart to show how the data is distributed and helps to find a trend
#
# # Conclusion:
# ## To conclude, COVID-19 has impacted many countries around the world. This was an analysis on Greeces 2020 COVID-19 cases and deaths. The project was only limited to 2020 data, therefore no 2021 and 2022 data were included in this data pool. Next time, I would like to compare two countries COVID-19 data next to one another to compare and contrast how the pandemic impacted them.
#
| final_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_hhwkhsc"
# # Call stacks and recursion
# In this notebook, we'll take a look at *call stacks*, which will provide an opportunity to apply some of the concepts we've learned about both stacks and recursion.
# + [markdown] graffitiCellId="id_wqbk1ib"
# ### What is a *call stack*?
#
# When we use functions in our code, the computer makes use of a data structure called a **call stack**. As the name suggests, a *call stack* is a type of stack—meaning that it is a *Last-In, First-Out* (LIFO) data structure.
#
# So it's a type of stack—but a stack of *what*, exactly?
#
# Essentially, a *call stack* is a stack of *frames* that are used for the *functions* that we are calling. When we call a function, say `print_integers(5)`, a *frame* is created in memory. All the variables local to the function are created in this memory frame. And as soon as this frame is created, it's pushed onto the call stack.
#
# The frame that lies at the top of the call stack is executed first. And as soon as the function finishes executing, this frame is discarded from the *call stack*.
#
# + [markdown] graffitiCellId="id_7h3929v"
# ### An example
#
# Let's consider the following function, which simply takes two integers and returns their sum
# + graffitiCellId="id_njntjot"
def add(num_one, num_two):
output = num_one + num_two
return output
# + graffitiCellId="id_9z6g6sa"
result = add(5, 7)
print(result)
# + [markdown] graffitiCellId="id_5zrgcf5"
# Before understanding what happens when a function is executed, it is important to remind ourselves that whenever an expression such as `product = 5 * 7` is evaluated, the right hand side of the `=` sign is evaluted first. When the right-hand side is completely evaluated, the result is stored in the variable name mentioned in the left-hand side.
#
# When Python executes line 1 in the previous cell (`result = add(5, 7)`), the following things happen in memory:
#
#
# * A frame is created for the `add` function. This frame is then pushed onto the *call stack*. We do not have to worry about this because Python takes care of this for us.
#
#
# * Next, the parameters `num_one` and `num_two` get the values `5` and `7`, respectively
#
# If we run this code in Python tutor website [http://pythontutor.com/](http://pythontutor.com/) , we can get a nice visualization of what's happening "behind the scenes" in memory:
#
# <img src='./stack-frame-resources/01.png'>
#
#
# * Python then moves on to the first line of the function. The first line of the function is
#
# output = num_one + num_two
#
# Here an expression is being evaluated and the result is stored in a new variable. The expression here is sum of two numbers the result of which is stored in the variable `output`. We know that whenever an expression is evaluated, the right-hand side of the `= sign` is evaluated first. So, the numbers `5 and 7` will be added first.
#
#
# * Once the right-hand side is completely evaluated, then the assignment operation happens i.e. now the result of `5 + 7` will be stored in the variable `output`.
# <img src='./stack-frame-resources/02.png'>
#
#
# * In the next line, we are returning this value.
#
# return output
#
# Python acknowledged this return statement.
# <img src='./stack-frame-resources/03.png'>
#
#
# * Now the last line of the function has been executed. Therefore, this function can now be discarded from the stack frame. Also, the right-hand side of the expression `result = add(5, 7)` has finished evaluation. Now, the result of this evaluation will be stored in the variable `result`.
#
# <img src='./stack-frame-resources/04.png'>
#
# + [markdown] graffitiCellId="id_1ic9404"
#
# Now the next question is how does this behave like a stack?
# The answer is pretty simple. We know that a stack is a Last-In First-Out (LIFO) structure, meaning the latest element inserted in the stack is the first to be removed.
#
# You can play more with such "behind-the-scenes" of code execution on the Python tutor website: http://pythontutor.com/
#
# ### Another example
#
# Here's another example. Let's say we have a function `add()` which adds two integers and then prints a custom message for us using the `custom_print()` function.
# + graffitiCellId="id_kiid8nc"
def add(num_one, num_two):
output = num_one + num_two
custom_print(output, num_one, num_two)
return output
def custom_print(output, num_one, num_two):
print("The sum of {} and {} is: {}".format(num_one, num_two, output))
result = add(5, 7)
# + [markdown] graffitiCellId="id_kln1om1"
# What happens "behind-the-scenes" when `add()` is called, as in `result = add(5, 7)`?
#
# Feel free to play with this on the Python tutor website. Here are a few points which might help aid the understanding.
#
# * We know that when add function is called using `result = add(5, 7)`, a frame is created in the memory for the `add()` function. This frame is then pushed onto the call stack.
#
#
# * Next, the two numbers are added and their result is stored in the variable `output`.
#
#
# * On the next line we have a new function call - `custom_print(output, num_one, num_two)`. It's obvious that a new frame should be created for this function call as well. You must have realized that this new frame is now pushed into the call stack.
#
# * We also know that the function which is at the top of the call stack is the one which Python executes. So, our `custom_print(output, num_one, num_two)` will now be executed.
#
#
# * Python executes this function and as soon as it is finished with execution, the frame for `custom_print(output, num_one, num_two)` is discarded. If you recall, this is the LIFO behavior that we have discussed while studying stacks.
#
#
# * Now, again the frame for `add()` function is at the top. Python resumes operation just after the line where it had left and returns the `output`.
# + [markdown] graffitiCellId="id_39ig6qz"
# ### Call Stack and Recursion
# + [markdown] graffitiCellId="id_09tyobw"
# #### Problem Statement
#
# Consider the following problem:
#
# Given a positive integer `n`, write a function, `print_integers`, that uses recursion to print all numbers from `n` to `1`.
#
# For example, if `n` is `4`, the function shuld print `4 3 2 1`.
#
# If we use iteration, the solution to the problem is simple. We can simply start at `4` and use a loop to print all numbers till `1`. However, instead of using an interative approach, our goal is to solve this problem using recursion.
# + graffitiCellId="id_wylq37s"
def print_integers(n):
# TODO: Complete the function so that it uses recursion to print all integers from n to 1
if n <= 0:
return
print(n)
print_integers(n-1)
# + [markdown] graffitiCellId="id_0usbivt"
# <span class="graffiti-highlight graffiti-id_0usbivt-id_8peifb7"><i></i><button>Show Solution</button></span>
# + graffitiCellId="id_r9kqmpj"
print_integers(5)
# + [markdown] graffitiCellId="id_6k9prla"
# Now let's consider what happens in the call stack when `print_integers(5)` is called.
# + [markdown] graffitiCellId="id_1hnh7cy"
# * As expected, a frame will be created for the `print_integers()` function and pushed onto the call stack.
#
#
# * Next, the parameter `n` gets the value `5`.
#
#
# * Following this, the function starts executing. The base condition is checked. For `n = 5`, the base case is `False`, so we move forward and print the value of `n`.
#
#
# * In the next line, `print_integers()` is called again. This time it is called with the argument `n - 1`. The value of `n` in the current frame is `5`. So this new function call takes place with value `4`. Again, a new frame is created. **Note that for every new call a new frame will be created.** This frame is pushed onto the top of the stack.
#
#
# * Python now starts executing this frame. Again the base case is checked. It's `False` for `n = 4`. Following this, the `n` is printed and then `print_integers()` is called with argument `n - 1 = 3`.
#
#
# * The process keeps on like this until we hit the base case. When `n <= 0`, we return from the frame without calling the function `print_integers()` again. Because we have returned from the function call, the frame is discarded from the call stack and the next frame resumes execution right after the line where we left off.
# + graffitiCellId="id_fbvczco"
| data_structure/Call stack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from dotenv import load_dotenv, find_dotenv
from os.path import join, dirname, basename, exists, isdir
### Load environmental variables from the project root directory ###
# find .env automagically by walking up directories until it's found
dotenv_path = find_dotenv()
# load up the entries as environment variables
load_dotenv(dotenv_path)
# now you can get the variables using their names
# Check whether a network drive has been specified
DATABASE = os.environ.get("NETWORK_URL")
if DATABASE == 'None':
pass
else:
pass
#mount network drive here
# set up directory pathsa
CURRENT_DIR = os.getcwd()
PROJ = dirname(dotenv_path) # project root directory
DATA = join(PROJ, 'data') #data directory
RAW_EXTERNAL = join(DATA, 'raw_external') # external data raw directory
RAW_INTERNAL = join(DATA, 'raw_internal') # internal data raw directory
INTERMEDIATE = join(DATA, 'intermediate') # intermediate data directory
FINAL = join(DATA, 'final') # final data directory
RESULTS = join(PROJ, 'results') # output directory
FIGURES = join(RESULTS, 'figures') # figure output directory
PICTURES = join(RESULTS, 'pictures') # picture output directory
# make folders specific for certain data
folder_name = ''
if folder_name != '':
#make folders if they don't exist
if not exists(join(RAW_EXTERNAL, folder_name)):
os.makedirs(join(RAW_EXTERNAL, folder_name))
if not exists(join(INTERMEDIATE, folder_name)):
os.makedirs(join(INTERMEDIATE, folder_name))
if not exists(join(FINAL, folder_name)):
os.makedirs(join(FINAL, folder_name))
print('Standard variables loaded, you are good to go!')
# +
import pandas as pd
import numpy as np
import os
folder = os.listdir(f"{FINAL}/abs_usages_gecko")
df = pd.DataFrame()
for file in folder:
f = pd.read_csv(f"{FINAL}/abs_usages_gecko/{file}", index_col = "Unnamed: 0")
df = pd.concat([df, f], axis=1)
df.head()
# -
# # 1. Core proteome
#
# - first we are going to try to find out the common core proteome
# - the common core proteome will be the intersect of all non-zero usage proteins
# - OOOOr the union
#
#
# generate nonzeros
intersect_nonzero_proteins = df.loc[(df!=0).all(axis=1)]
union_nonzero_proteins = df.loc[(df!=0).any(axis=1)]
# analyse nonzeros
# # 2. comparison of the differences in GO-terms
# - we are trying to find the set of GO terms in two ways:
# - first we are going to try to use differential expression results
# - secondly we are going to choose a fold-change value to define a threshold
# - changes in the "functional" proteome can be assumed to show us the significant differences between given conditions, if not we can argue about how changes in transcriptomics don't necessarily lead to changes in proteomics, and proteomics maybe not even on the level of fluxomics (to this end also compare "differentially expressed" proteins to differentially used ones
df.to_csv(f"{FINAL}/usages/usages_combined_gecko.csv")
| data_science/code/modeling/usage_analysis/.ipynb_checkpoints/comparison-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
import pprint
import json
rdr = csv.DictReader(open('party.csv'))
party = {}
for character in rdr:
party[character['name']] = character
# -
for name in party:
party[name]['level'] = int(party[name]['level'])
party[name]['equipment'] = []
for i in range(1,4):
itm = {}
for field in ('name', 'quantity', 'weight each', 'cost', 'magic',
'materials', 'notes'):
orig_field = 'equip %d %s' % (i, field)
itm[field] = party[name].pop(orig_field)
party[name]['equipment'].append(itm)
party[name]['equipment'].append({
'name': party[name]['magic item %d' % (i + 1)],
'bonus': party[name]['magic item %d bonus' % (i + 1)],
'description': party[name]['magic item %d description' % (i + 1)],
})
for i in range(2):
party[name]['weapons'].append({
'name': party[name]['weapon %d name' % (i + 1)],
'damage': party[name]['weapon %d damage' % (i + 1)],
'materials': party[name]['weapon %d materials' % (i + 1)].split(', '),
})
for field in list(party[name].keys()):
try:
party[name][field] = int(party[name][field])
except (TypeError, ValueError):
pass
if field.startswith('weapon ') or field.startswith('magic item '):
party[name].pop(field)
pprint.pprint(party)
p2 = []
for name in party:
party[name]['_id'] = party[name]['name']
p2.append(party[name])
p2
with open('party.json', 'w') as outfile:
json.dump(p2, outfile, indent=2)
# cat party.json
# !mongoimport -d party -c characters --file party.json --jsonArray
# cat show_one.js
# !mongo party show_one.js
# ## Who carries wooden weapons?
import json
import psycopg2
conn = psycopg2.connect('dbname=dnd user=dungeonmaster password=<PASSWORD>')
curs = conn.cursor()
with open('data/party.json') as infile:
for character in json.load(infile):
curs.execute("INSERT INTO party (character) VALUES (%s)", (json.dumps(character),))
conn.commit()
conn.close()
| make_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
df=pd.read_csv('kyphosis.csv')
df.head()
sns.pairplot(df,hue="Kyphosis")
from sklearn.model_selection import train_test_split
X=df.drop('Kyphosis',axis=1)
y=df["Kyphosis"]
X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.3, random_state=42)
from sklearn.tree import DecisionTreeClassifier
dtree=DecisionTreeClassifier()
dtree.fit(X_train, y_train)
predictions = dtree.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
# +
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns[1:])
features
# -
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=100)
rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
print(confusion_matrix(y_test,rfc_pred))
print(classification_report(y_test,rfc_pred))
| D-Tree & Random Forest Algo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JSJeong-me/SEMICON-BigData/blob/main/statistics-intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="GvteZn5wsGjK"
# MLB 선수연봉 데이터 불러오기
# + id="8z-i0vHCVAQn"
# !nvcc --version
# + id="OJ-P9zFFVG0G"
# !nvcc --help
# + id="dY1FluEgr9h1"
import pandas as pd
df=pd.read_csv('http://wolfpack.hnu.ac.kr/Big_Data/data/MLB1871_2018/Salaries.csv')
# + id="aEzbI6detECq"
df.shape
# + [markdown] id="1XeuZ2YwKVdn"
# 결측치 제거
# + id="wY01PUqK8qWB"
df.dropna(inplace=True)
df.shape
# + [markdown] id="wxDJ3UzoKH3g"
# *선수연봉* 2018년 subset
# + id="2X1uG8WRu-O2"
df18=df[(df['yearID']==2016) & (df['salary']>0)] #연봉 0 초과 선수만 활용
df18.head(3)
# + id="wN0dCP4BOCpB"
import plotly.express as px
fig = px.scatter(df18, x="salary", y="playerID", color="lgID",title="Player Salary with lgID")
fig.show()
# + id="vNw6oseLly6_"
# + id="X8SghvkAvVBZ"
import seaborn as sns
import matplotlib.pyplot as plt
sns.kdeplot(df18.salary).set_title('Kernel of 2018 Salary')
plt.show()
# + id="woxJUd8Fwtek"
sns.distplot(df18.salary).set_title('Histogram of 2018 Salary')
plt.show()
# + id="9cxvz7gbxkKY"
sns.boxplot(x="salary",data =df18).set_title('Boxplot of 2018 Salary')
plt.show()
# + [markdown] id="YkusLAYX7Y5y"
# ## 실습
#
#
# * 2018년 선수연봉과 2011년 선수연봉의 상자그림을 그리고 시각적으로 해석하시오.
#
#
#
#
# + id="F4atePdaxEKE"
# + [markdown] id="PhlzDc_LKeZ3"
# ##중심극한 정리 활용
# + [markdown] id="dejtXRwbLNbB"
# sample data 만들기 n=50
# + id="eZopD6W-LVaI"
df_sample=df18['salary'].sample(n=50, random_state=123) #seed=109
# + id="Cl43sslHMZC3"
sns.boxplot(df_sample).set_title('Boxplot of 2018 Salary : train=sample')
plt.show()
# + [markdown] id="FSnLUrzLYqq5"
# 크기 50인 확률표본 -> 평균, 100개 만들기
# + id="fVpmwJUcTHSC"
xbar=[]
for k in range(0,100):
xbar.append(df18['salary'].sample(n=50).mean())
# + id="H7I1uDKkVD5H"
sns.boxplot(xbar).set_title('Boxplot of 2018 Salary : sample-mean')
plt.show()
# + [markdown] id="TXpEqRrTZDEH"
# ##모집단 추론
# 2086년 MLB 선수들의 연봉이 40만불 이상인가?
# + id="ORd4a5SRQxeY"
pd.options.display.float_format = '{:.2f}'.format
print('Population mean %.1f' % df18['salary'].mean()) #모잡단 연봉
# + [markdown] id="SL-soFGxZjpi"
# 추정
# + id="lQf4hzzsZjFX"
print('Point estimaor %.1f' % df_sample.mean())
# + id="wF7UjbSAa1W2"
import scipy.stats as st
import numpy as np
LB=df_sample.mean()-st.t.ppf(0.975,df_sample.shape[0]-1)*df_sample.std()/np.sqrt(df_sample.shape[0])
UB=df_sample.mean()+st.t.ppf(0.975,df_sample.shape[0]-1)*df_sample.std()/np.sqrt(df_sample.shape[0])
print('95%'+' confidence interval (%.1f , %.1f)' % (LB, UB))
# + [markdown] id="myidGZmLZmZQ"
# 가설검정
#
#
# > H0 : mu=40 H1: mu!=40
#
#
# + id="tnlBx61kZXf7"
from scipy import stats
stats.ttest_1samp(df_sample,40)
# + id="hQ9vqREullRs"
| statistics-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.4 64-bit
# name: python3
# ---
# # Import Package
import csv
from socket import MSG_MCAST
from urllib.parse import quote, quote_plus
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
# # 날짜 지정
strDate = '20200901' # 시작 날짜
endDate = '20200930' # 종료 날짜
# # CSV 저장
filename = f"daum_news_{strDate}-{endDate}.csv"
f = open(filename, "w", encoding="utf-8-sig", newline="")
writer = csv.writer(f)
# # 유저 에이전트 지정
# > [WhatismyUserAgent(ctrl+🖱 후 확인)](https://www.whatismybrowser.com/detect/what-is-my-user-agent)
headers = {"User-Agent" : "개인 유저 에이전트"}
# # 반복문에 들어갈 날짜 지정
dt_index = pd.date_range(start=strDate, end=endDate)
dt_list = dt_index.strftime("%Y%m%d").tolist()
# # 스크래핑 시작
for i in dt_list:
print('날짜',i)
page_url = f"https://news.daum.net/breakingnews/?page=10000®Date={i}"
page =requests.get(page_url, headers=headers)
page_soup = BeautifulSoup(page.text, "lxml")
last_page = page_soup.find("em", attrs="num_page").get_text()
lastPage_num = re.sub(r'[^0-9]','',last_page)
# print(lastPage_num)
for j in range(1, int(lastPage_num)+1):
main_url = f"https://news.daum.net/breakingnews/?page={j}®Date={i}" # url 입력
res = requests.get(main_url, headers=headers)
if res.status_code == 200:
print(i, int(lastPage_num), '중' ,j,'page',round(j/int(lastPage_num)*100, 2),'%', main_url, 'status:',res.status_code)
soup = BeautifulSoup(res.text, "lxml") # soup으로 저장
main = soup.find("ul", attrs={"class":"list_news2 list_allnews"})
news = main.find_all("strong", attrs={"class":"tit_thumb"})
cnt = 0
for new in news:
urls = new.select_one("a")["href"]# 페이지에 나와있는 뉴스 URL 변수 입력
# print(urls)
result = requests.get(urls, headers=headers) # request 로 다시 개별 뉴스 접속
if result.status_code == 200:
news_soup = BeautifulSoup(result.text, "lxml")
# 뉴스 제목, 발행시간, 기사본문 저장
title = news_soup.find("h3", attrs={"tit_view"}).get_text().strip()
pubdate = news_soup.find("span", attrs={"num_date"}).get_text().strip()
text = news_soup.find("div", attrs={"news_view"}).get_text().strip()
cnt += 1
# print(j,'of',cnt,'번째 기사')
# print(i,j,'of',cnt,'번째 기사', urls,'status:', result.status_code)
writer.writerow([cnt, title, pubdate, urls, text])
else:
print(i,j,'of',cnt,'번째 기사','error_code :',result.status_code, urls)
pass
else:
print(i,'page : ',j,'error_code :',res.status_code, main_url)
pass
| scraping/daumNews_scrap_baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Derivatives of the manipulator
# In a first time, we come back to the manipulator robot, with a nice Euclidean configuration space.
# +
from robots import loadTalosArm
from scipy.optimize import fmin_slsqp
import pinocchio
from pinocchio.utils import *
from numpy.linalg import norm,inv,pinv,eig,svd
m2a = lambda m: np.array(m.flat)
a2m = lambda a: np.matrix(a).T
robot = loadTalosArm()
robot.initDisplay(loadModel=True)
robot.viewer.gui.deleteNode('world',True)
robot.initDisplay(loadModel=True)
# -
# The objective is to set up the derivatives of the problem defined in arm3dconstraint.py.
#
# ## Derivative of the cost
# Here nothing special to do: we have taken the sum of square, then the gradient of the cost is simply the residuals.
#
# ## Checking with finite differencing
# A rule of thumb is to always first implement the finite-diff of your problem, because it should gives you a good idea of wether the problem is nicely setup and has a chance to work, but also because you will need your finite diff to check the derivatives.
# +
# # %load dcost.py
refQ = robot.q0
def cost(q):
residuals = m2a(q-refQ)
return .5*sum(residuals**2)
def dCost(q):
dq = m2a(q-refQ)
return dq
def numdiffCost(q,h=1e-6):
f0 = cost(q)
nq,nf = len(q),1
dq = zero(nq)
df_dq = zero([nf,nq])
for i in range(nq):
dq[i] = h
df_dq[:,i] = (cost(q+dq)-f0)/h
dq[i] = 0
return df_dq
q=rand(robot.model.nq)
norm(dCost(q)-numdiffCost(q))
# -
# ## Derivative of the log residual
# The residual is a composition of two functions: log and M.
# $residual(q) = log(M(q))$
#
# The derivative of the first function is implemented in pinocchio as pinocchio.Jlog.
#
# The derivative of the second function is the Jacobian of the corresponding frame, computed locally (i.e. the velocity nu resulting from the Jacobian are expressed in the local frame at the center of the local frame). To get the frame jacobian, it is necessary to first precompute the joint jacobians, then update the frame placement, before getting the correct frame jacobian.
#
# +
LOCAL = pinocchio.ReferenceFrame.LOCAL
WORLD = pinocchio.ReferenceFrame.WORLD
pinocchio.forwardKinematics(robot.model,robot.data,q)
pinocchio.computeJointJacobians(robot.model,robot.data,q)
pinocchio.updateFramePlacements(robot.model,robot.data)
pinocchio.getFrameJacobian(robot.model,robot.data,26,LOCAL)
# -
# ## Derivative in an optimization program
# Here is the final optimization program with derivatives of the cost and contraint.
# +
# # %load arm3dconstraint_diff.py
from robots import loadTalosArm
from scipy.optimize import fmin_slsqp
import pinocchio
from pinocchio.utils import *
from numpy.linalg import norm,inv,pinv,eig,svd
m2a = lambda m: np.array(m.flat)
a2m = lambda a: np.matrix(a).T
LOCAL = pinocchio.ReferenceFrame.LOCAL
WORLD = pinocchio.ReferenceFrame.WORLD
robot = loadTalosArm()
robot.initDisplay(loadModel=True)
class OptimProblem:
def __init__(self,rmodel,gview=None):
self.rmodel = rmodel
self.rdata = rmodel.createData()
self.refEff = pinocchio.SE3( rotate('y',np.pi/4), # Target orientation
np.matrix([ -.3, 0.5, 0.2 ]).T) # Target position
self.idEff = rmodel.getFrameId('gripper_left_fingertip_2_link')
self.refQ = rmodel.neutralConfiguration
self.initDisplay(gview)
def cost(self,x):
q = a2m(x)
self.residuals = m2a(q-self.refQ)
return .5*sum(self.residuals**2)
def dCost_dx(self,x):
q = a2m(x)
dq = m2a(q-self.refQ)
return dq
def constraint(self,x):
q = a2m(x)
pinocchio.forwardKinematics(self.rmodel,self.rdata,q)
pinocchio.updateFramePlacements(self.rmodel,self.rdata)
refMeff = self.refEff.inverse()*self.rdata.oMf[self.idEff]
self.eq = m2a(pinocchio.log(refMeff).vector)
return self.eq.tolist()
def dConstraint_dx(self,x):
q = a2m(x)
pinocchio.forwardKinematics(self.rmodel,self.rdata,q)
pinocchio.computeJointJacobians(self.rmodel,self.rdata,q)
pinocchio.updateFramePlacements(self.rmodel,self.rdata)
refMeff = self.refEff.inverse()*self.rdata.oMf[self.idEff]
log_M = pinocchio.Jlog6(refMeff)
M_q = pinocchio.getFrameJacobian(self.rmodel,self.rdata,self.idEff,LOCAL)
self.Jeq = log_M*M_q
return self.Jeq
@property
def bounds(self):
# return [ (10*l,u) for l,u in zip(self.rmodel.lowerPositionLimit.flat,
# self.rmodel.upperPositionLimit.flat) ]
return [ (-10.,10) for i in range(self.rmodel.nq) ]
def initDisplay(self,gview=None):
self.gview = gview
if gview is None: return
self.gobj = "world/target6d"
self.gview.addBox(self.gobj,.1,0.05,0.025,[1,0,0,1])
self.gview.applyConfiguration(self.gobj,se3ToXYZQUAT(self.refEff))
self.gview.refresh()
def callback(self,x):
import time
q = a2m(x)
robot.display(q)
time.sleep(1e-2)
robot.q0 = robot.model.neutralConfiguration
pbm = OptimProblem(robot.model,robot.viewer.gui)
# --- NUMDIFF CHECK ------------------------------------
def numdiff(f,x,h=1e-6):
f0 = f(x)
nx,nf = len(x),len(f0)
dx = np.zeros(nx)
df_dx = np.zeros([nf,nx])
for i in range(nx):
dx[i] = h
df_dx[:,i] = (f(x+dx)-f0)/h
dx[i] = 0
return df_dx
x = np.random.rand(robot.model.nq)*2-1
def costResiduals(x):
pbm.cost(x)
return pbm.residuals
assert( norm( pbm.dCost_dx(x) - np.dot( numdiff(costResiduals,x).T,costResiduals(x) ) ) <1e-6 )
assert( norm( pbm.dConstraint_dx(x) - numdiff(lambda x:np.array(pbm.constraint(x)),x) ) <1e-6 )
# --- NUMDIFF CHECK ------------------------------------
#x0 = np.random.rand(robot.model.nq)
x0 = np.array([ .7,.9,.8,.5,.9,.7,.1])
result = fmin_slsqp(x0 = x0,
func = pbm.cost,
fprime = pbm.dCost_dx,
f_eqcons = pbm.constraint,
fprime_eqcons = pbm.dConstraint_dx,
bounds = pbm.bounds,
callback = pbm.callback)
qopt = a2m(result)
# -
# # Derivatives in T_q Q
# If you want to go further, you can start investigate the derivatives in a configuration manifold (nonEuclidean) with this example.
# +
# #%load bip6d_diff.py
| 2. Derivative and differential kinematics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 3장 데이터 랭글링
# 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/machine-learning-with-python-cookbook/blob/master/03.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/rickiepark/machine-learning-with-python-cookbook/blob/master/03.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
# </td>
# </table>
# ## 3.0 소개
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터프레임으로 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 처음 다섯 개의 행을 출력합니다.
dataframe.head(5)
# -
# ## 3.1 데이터프레임 만들기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
dataframe = pd.DataFrame()
# 열을 추가합니다.
dataframe['Name'] = ['<NAME>', '<NAME>']
dataframe['Age'] = [38, 25]
dataframe['Driver'] = [True, False]
# 데이터프레임을 확인합니다.
dataframe
# +
# 열을 만듭니다.
new_person = pd.Series(['<NAME>', 40, True], index=['Name','Age','Driver'])
# 열을 추가합니다.
dataframe.append(new_person, ignore_index=True)
# -
# ### 붙임
# +
import numpy as np
data = [ ['<NAME>', 38, True], ['<NAME>', 25, False] ]
matrix = np.array(data)
pd.DataFrame(matrix, columns=['Name', 'Age', 'Driver'])
# -
pd.DataFrame(data, columns=['Name', 'Age', 'Driver'])
data = {'Name': ['<NAME>', '<NAME>'],
'Age': [38, 25],
'Driver': [True, False]}
pd.DataFrame(data)
data = [ {'Name': '<NAME>', 'Age': 38, 'Driver': True},
{'Name': '<NAME>', 'Age': 25, 'Driver': False} ]
pd.DataFrame(data, index=['row1', 'row2'])
# ## 3.2 데이터 설명하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 두 개의 행을 확인합니다.
dataframe.head(2)
# -
# 차원을 확인합니다.
dataframe.shape
# 통곗값을 확인합니다.
dataframe.describe()
# ## 3.3 데이터프레임 탐색하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 첫 번째 행을 선택합니다.
dataframe.iloc[0]
# -
# 세 개의 행을 선택합니다.
dataframe.iloc[1:4]
# 네 개의 행을 선택합니다.
dataframe.iloc[:4]
# +
# 인덱스를 설정합니다.
dataframe = dataframe.set_index(dataframe['Name'])
# 행을 확인합니다.
dataframe.loc['Allen, <NAME>']
# -
# ### 붙임
# 'Allison, <NAME>' 이전까지 Age 열과 Sex 열만 선택합니다.
dataframe.loc[:'Allison, <NAME>', 'Age':'Sex']
# dataframe[:2]와 동일합니다.
dataframe[:'Allison, <NAME>']
dataframe[['Age', 'Sex']].head(2)
# ## 3.4 조건에 따라 행을 선택하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# ‘sex’ 열이 ‘female’인 행 중 처음 두 개를 출력합니다.
dataframe[dataframe['Sex'] == 'female'].head(2)
# -
# 행을 필터링합니다.
dataframe[(dataframe['Sex'] == 'female') & (dataframe['Age'] >= 65)]
# ## 3.5 값 치환
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 값을 치환하고 두 개의 행을 출력합니다.
dataframe['Sex'].replace("female", "Woman").head(2)
# -
# "female"과 "male을 "Woman"과 "Man"으로 치환합니다.
dataframe['Sex'].replace(["female", "male"], ["Woman", "Man"]).head(5)
# 값을 치환하고 두 개의 행을 출력합니다.
dataframe.replace(1, "One").head(2)
# 값을 치환하고 두 개의 행을 출력합니다.
dataframe.replace(r"1st", "First", regex=True).head(2)
# ### 붙임
# female과 male을 person으로 바꿉니다.
dataframe.replace(["female", "male"], "person").head(3)
# female을 1로 바꾸고 male을 0으로 바꿉니다.
dataframe.replace({"female": 1, "male": 0}).head(3)
# ## 3.6 열 이름 바꾸기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 열 이름을 바꾸고 두 개의 행을 출력합니다.
dataframe.rename(columns={'PClass': 'Passenger Class'}).head(2)
# -
# 열 이름을 바꾸고 두 개의 행을 출력합니다.
dataframe.rename(columns={'PClass': 'Passenger Class', 'Sex': 'Gender'}).head(2)
# +
# 라이브러리를 임포트합니다.
import collections
# 딕셔너리를 만듭니다.
column_names = collections.defaultdict(str)
# 키를 만듭니다.
for name in dataframe.columns:
column_names[name]
# 딕셔너리를 출력합니다.
column_names
# -
# ### 붙임
# 인덱스 0을 -1로 바꿉니다.
dataframe.rename(index={0:-1}).head(2)
# 열 이름을 소문자로 바꿉니다.
dataframe.rename(str.lower, axis='columns').head(2)
# ## 3.7 최솟값, 최댓값, 합, 평균 계산 및 개수 세기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 통곗값을 계산합니다.
print('최댓값:', dataframe['Age'].max())
print('최솟값:', dataframe['Age'].min())
print('평균:', dataframe['Age'].mean())
print('합:', dataframe['Age'].sum())
print('카운트:', dataframe['Age'].count())
# -
# 카운트를 출력합니다.
dataframe.count()
# ### 붙임
# 수치형 열의 공분산을 계산합니다.
dataframe.cov()
# 수치형 열의 상관계수를 계산합니다.
dataframe.corr()
# ## 3.8 고유한 값 찾기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 고유한 값을 찾습니다.
dataframe['Sex'].unique()
# -
# 카운트를 출력합니다.
dataframe['Sex'].value_counts()
# 카운트를 출력합니다.
dataframe['PClass'].value_counts()
# 고유한 값의 개수를 출력합니다.
dataframe['PClass'].nunique()
# ### 붙임
dataframe.nunique()
# ## 3.9 누락된 값 다루기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
## 누락된 값을 선택하고 두 개의 행을 출력합니다.
dataframe[dataframe['Age'].isnull()].head(2)
# -
# NaN으로 값을 바꾸려고 합니다.
dataframe['Sex'] = dataframe['Sex'].replace('male', NaN)
# +
# 라이브러리를 임포트합니다.
import numpy as np
# NaN으로 값을 바꿉니다.
dataframe['Sex'] = dataframe['Sex'].replace('male', np.nan)
# -
# 데이터를 적재하고 누란된 값을 설정합니다.
dataframe = pd.read_csv(url, na_values=[np.nan, 'NONE', -999])
# ### 붙임
dataframe = pd.read_csv(url, na_values=['female'],
keep_default_na=False)
dataframe[12:14]
dataframe = pd.read_csv(url, na_filter=False)
dataframe[12:14]
# ## 3.10 열 삭제
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 열을 삭제합니다.
dataframe.drop('Age', axis=1).head(2)
# -
# 열을 삭제합니다.
dataframe.drop(['Age', 'Sex'], axis=1).head(2)
# PClass 열을 삭제합니다.
dataframe.drop(dataframe.columns[1], axis=1).head(2)
dataframe.head(2)
# ## 3.11 행 삭제
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 행을 삭제하고 처음 두 개의 행을 출력합니다.
dataframe[dataframe['Sex'] != 'male'].head(2)
# -
# 행을 삭제하고 처음 두 개의 행을 출력합니다.
dataframe[dataframe['Name'] != 'Allison, <NAME>'].head(2)
# 행을 삭제하고 처음 두 개의 행을 출력합니다.
dataframe[dataframe.index != 0].head(2)
# ## 3.12 중복된 행 삭제
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 중복 행을 삭제하고 처음 두 개의 행을 출력합니다.
dataframe.drop_duplicates().head(2)
# -
# 행의 개수를 출력합니다.
print("원본 데이터프레임 행의 수:", len(dataframe))
print("중복 삭제 후 행의 수:", len(dataframe.drop_duplicates()))
# 중복된 행을 삭제합니다.
dataframe.drop_duplicates(subset=['Sex'])
# 중복된 행을 삭제합니다.
dataframe.drop_duplicates(subset=['Sex'], keep='last')
# ## 3.13 값에 따라 행을 그룹핑하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# ‘Sex’ 열의 값으로 행을 그룹핑하고 평균을 계산합니다.
dataframe.groupby('Sex').mean()
# -
# 행을 그룹핑합니다.
dataframe.groupby('Sex')
# 행을 그룹핑하고 카운팅합니다.
dataframe.groupby('Survived')['Name'].count()
# 행을 그룹핑한 다음 평균을 계산합니다.
dataframe.groupby(['Sex','Survived'])['Age'].mean()
# ## 3.14 시간에 따라 행을 그룹핑하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
import numpy as np
# 날짜 범위를 만듭니다.
time_index = pd.date_range('06/06/2017', periods=100000, freq='30S')
# 데이터프레임을 만듭니다.
dataframe = pd.DataFrame(index=time_index)
# 난수 값으로 열을 만듭니다.
dataframe['Sale_Amount'] = np.random.randint(1, 10, 100000)
# 주 단위로 행을 그룹핑한 다음 합을 계산합니다.
dataframe.resample('W').sum()
# -
# 세개의 행을 출력합니다.
dataframe.head(3)
# 2주 단위로 그룹핑하고 평균을 계산합니다.
dataframe.resample('2W').mean()
# 한 달 간격으로 그룹핑하고 행을 카운트합니다.
dataframe.resample('M').count()
# 월 간격으로 그룹핑하고 행을 카운트합니다.
dataframe.resample('M', label='left').count()
# ### 붙임
dataframe.resample('MS').count()
# ## 3.15 열 원소를 순회하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 처음 두 이름을 대문자로 바꾸어 출력합니다.
for name in dataframe['Name'][0:2]:
print(name.upper())
# -
# 처음 두 이름을 대문자로 바꾸어 출력합니다.
[name.upper() for name in dataframe['Name'][0:2]]
# ## 3.16 모든 열 원소에 함수 적용하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 함수를 만듭니다.
def uppercase(x):
return x.upper()
# 함수를 적용하고 두 개의 행을 출력합니다.
dataframe['Name'].apply(uppercase)[0:2]
# -
# ### 붙임
# Survived 열의 1을 Live로, 0을 Dead로 바꿉니다.
dataframe['Survived'].map({1:'Live', 0:'Dead'})[:5]
# 함수의 매개변수(age)를 apply 메서드를 호출할 때 전달할 수 있습니다.
dataframe['Age'].apply(lambda x, age: x < age, age=30)[:5]
# 각 열에서 가장 큰 값을 뽑습니다.
dataframe.apply(lambda x: max(x))
# +
def truncate_string(x):
if type(x) == str:
return x[:20]
return x
# 문자열의 길이를 최대 20자로 줄입니다.
dataframe.applymap(truncate_string)[:5]
# -
# ## 3.17 그룹에 함수 적용하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터 URL
url = 'https://bit.ly/titanic-csv-data'
# 데이터를 적재합니다.
dataframe = pd.read_csv(url)
# 행을 그룹핑한 다음 함수를 적용합니다.
dataframe.groupby('Sex').apply(lambda x: x.count())
# -
# ## 3.18 데이터프레임 연결
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
data_a = {'id': ['1', '2', '3'],
'first': ['Alex', 'Amy', 'Allen'],
'last': ['Anderson', 'Ackerman', 'Ali']}
dataframe_a = pd.DataFrame(data_a, columns = ['id', 'first', 'last'])
# 데이터프레임을 만듭니다.
data_b = {'id': ['4', '5', '6'],
'first': ['Billy', 'Brian', 'Bran'],
'last': ['Bonder', 'Black', 'Balwner']}
dataframe_b = pd.DataFrame(data_b, columns = ['id', 'first', 'last'])
# 행 방향으로 데이터프레임을 연결합니다.
pd.concat([dataframe_a, dataframe_b], axis=0)
# -
# 열 방향으로 데이터프레임을 연결합니다.
pd.concat([dataframe_a, dataframe_b], axis=1)
# +
# 행을 만듭니다.
row = pd.Series([10, 'Chris', 'Chillon'], index=['id', 'first', 'last'])
# 행을 추가합니다.
dataframe_a.append(row, ignore_index=True)
# -
# ## 3.19 데이터프레임 병합
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
employee_data = {'employee_id': ['1', '2', '3', '4'],
'name': ['<NAME>', '<NAME>', '<NAME>',
'<NAME>']}
dataframe_employees = pd.DataFrame(employee_data, columns = ['employee_id',
'name'])
# 데이터프레임을 만듭니다.
sales_data = {'employee_id': ['3', '4', '5', '6'],
'total_sales': [23456, 2512, 2345, 1455]}
dataframe_sales = pd.DataFrame(sales_data, columns = ['employee_id',
'total_sales'])
# 데이터프레임을 병합합니다.
pd.merge(dataframe_employees, dataframe_sales, on='employee_id')
# -
# 데이터프레임을 병합합니다.
pd.merge(dataframe_employees, dataframe_sales, on='employee_id', how='outer')
# 데이터프레임을 병합합니다.
pd.merge(dataframe_employees, dataframe_sales, on='employee_id', how='left')
# 데이터프레임을 병합합니다.
pd.merge(dataframe_employees,
dataframe_sales,
left_on='employee_id',
right_on='employee_id')
| 03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="3nXS4RUQQugV"
# # Tutorial Part-of-Speech tagging Con Deep Learning (mb-02)
#
# ### En este tutorial, veremos cómo se puede mejorar el modelo base, usando la tecnica de Dropout en la capa oculta
# + [markdown] colab_type="text" id="WIheRrq2Quga"
# ## PARTE 1 - Pre-Procesamiento
# ### Cargamos los Datos de pre-procesamiento de modelo base mb-00
# + colab={} colab_type="code" id="lw10qukzQuge"
# Asegurar reproducibilidad
import numpy as np
CUSTOM_SEED = 42
np.random.seed(CUSTOM_SEED)
# -
def to_categoricals(sequences, categories):
cat_sequences = []
for s in sequences:
cats = []
for item in s:
cats.append(np.zeros(categories))
cats[-1][item] = 1.0
cat_sequences.append(cats)
return np.array(cat_sequences)
# +
import pickle
MAX_LENGTH = 149
test_tags = []
with open("../vectors/test_tags.txt", "rb") as fp:
test_tags = pickle.load(fp)
word2index = np.load('../vectors/word2index.npy').item()
tag2index = np.load('../vectors/tag2index.npy').item()
train_sentences_X = np.load('../vectors/train_sentences_X.npy')
eval_sentences_X = np.load('../vectors/eval_sentences_X.npy')
test_sentences_X = np.load('../vectors/test_sentences_X.npy')
train_tags_y = np.load('../vectors/train_tags_y.npy')
eval_tags_y = np.load('../vectors/eval_tags_y.npy')
test_tags_y = np.load('../vectors/test_tags_y.npy')
cat_train_tags_y = to_categoricals(train_tags_y, len(tag2index))
cat_eval_tags_y = to_categoricals(eval_tags_y, len(tag2index))
cat_test_tags_y = to_categoricals(test_tags_y, len(tag2index))
#print("test_tags: " + str(len(test_tags)))
#print (len(word2index))
#print (len(tag2index))
#print(train_sentences_X[0])
#print(eval_sentences_X[0])
#print(test_sentences_X[0])
#print(cat_train_tags_y[0])
# + [markdown] colab_type="text" id="9-_gAQ7qrWTQ"
# ## PARTE 2 - Entrenamiento
# + colab={} colab_type="code" id="ORyC-422jaD9"
## Funcion que permite forzar el uso de GPU cuando estan presentes
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# + [markdown] colab_type="text" id="odDOhtO4NZDd"
# ### Definimos el Modelo Base con el cual se procedera a desarrollar la fase de Entrenamiento
# + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="x31rRt8PQuiW" outputId="eb0c9647-849c-4697-b151-802f88de7e2e"
from keras.models import Sequential
from keras.layers import Dense, InputLayer, Embedding, Activation, Dropout
from keras.optimizers import Adam
from keras.utils import plot_model
model = Sequential()
model.add(InputLayer(input_shape=(MAX_LENGTH, )))
model.add(Embedding(len(word2index), 128))
model.add(Activation('relu'))
model.add(Dense(len(tag2index)))
model.add(Dropout(0.5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['accuracy'])
model.summary()
plot_model(model, to_file='../Plot/model/model-mb02.png', show_shapes=True)
# + [markdown] colab_type="text" id="4XghotI4NG9G"
# ### Se dedarrolla el entrenamiento del modelo
# + colab={"base_uri": "https://localhost:8080/", "height": 1516} colab_type="code" id="C0gOhZznbg6V" outputId="cfb9376e-230d-40ea-ffb8-ace34c8234b5"
#sudo pip install h5py
import os
model_hist = model.fit(train_sentences_X, cat_train_tags_y,
validation_data=(eval_sentences_X, cat_eval_tags_y),
batch_size=128,
epochs=40,
validation_split=0.2)
# serialize model to JSON
model_json = model.to_json()
with open("../model/mb-02.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("../model/mb-02.h5")
print("Saved model to disk")
# + [markdown] colab_type="text" id="9hTDgQb2rWTa"
# ## PARTE 3 - Evaluación del Modelo
# + [markdown] colab_type="text" id="LdSkk8mzM1KN"
# ### Evaluamos el modelo y calculamos el valor de precision con respecto a los datos de prueba
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="cD-YI5Fgb3Kt" outputId="98cb9946-4f38-4176-c5ad-5932d07962b5"
scores = model.evaluate(test_sentences_X, cat_test_tags_y)
print(f"{model.metrics_names[1]}: {scores[1] * 100}") # acc: 97.66269326210022
# + [markdown] colab_type="text" id="sAhkgtWHQuij"
# ### Definimos la funcion que nos servira para graficar el comportamiento del modelo en cada epoca del entrenamiento
# + colab={} colab_type="code" id="JaBUkInNQuik"
import matplotlib.pyplot as plt
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):
""" Plot model loss and accuracy through epochs. """
blue= '#34495E'
green = '#2ECC71'
orange = '#E23B13'
# plot model loss
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))
ax1.plot(range(1, len(train_loss) + 1), train_loss, blue, linewidth=5, label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, green, linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
ax1.set_title('Model loss through #epochs', color=orange, fontweight='bold')
# plot model accuracy
ax2.plot(range(1, len(train_acc) + 1), train_acc, blue, linewidth=5, label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, green, linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
ax2.set_title('Model accuracy through #epochs', color=orange, fontweight='bold')
fig.savefig('../Plot/training/training-mb-02.png', bbox_inches='tight')
# + [markdown] colab_type="text" id="TxQh1AtuQuis"
# ### Procedemos a Graficar el comportamiento del Entrenamiento, tanto del conjunto de entrenamiento como el de validación con respecto a la cantidad de epocas
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" id="Gs5f3U1nQuit" outputId="4c4ad746-3bbc-4325-9356-8045506b33a4"
plot_model_performance(
train_loss=model_hist.history.get('loss', []),
train_acc=model_hist.history.get('acc', []),
train_val_loss=model_hist.history.get('val_loss', []),
train_val_acc=model_hist.history.get('val_acc', [])
)
# + [markdown] colab_type="text" id="pqiuy8q4GYjF"
# ### Función que Permite convertir Indices en Tags
# + colab={} colab_type="code" id="YJ6GaLot9yZR"
def logits_to_tokens(sequences, index):
token_sequences = []
for categorical_sequence in sequences:
token_sequence = []
for categorical in categorical_sequence:
token_sequence.append(index[np.argmax(categorical)])
token_sequences.append(token_sequence)
return token_sequences
# + [markdown] colab_type="text" id="W-1GH3ZYuLc-"
# ### Hacemos la prediccion sobre el conjunto de pruebas
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="6HgbDqqsR4a7" outputId="63275f95-19ca-41f0-db78-e79ece5e6317"
import pandas as pd
prediction = model.predict(test_sentences_X)
log_tokens = logits_to_tokens(prediction, {i: t for t, i in tag2index.items()})
print(log_tokens[0])
# + [markdown] colab_type="text" id="uT6IIQXrQuix"
# ### Hallamos los valores de F1 score, recall, precision
# + colab={"base_uri": "https://localhost:8080/", "height": 4219} colab_type="code" id="GqTuNxppFNu-" outputId="ce0f608a-171e-411d-f5d1-88666b57cd19"
from sklearn.metrics import classification_report, confusion_matrix
results = pd.DataFrame(columns=['Expected', 'Predicted'])
k = 0
for i, lista_etiquetas_oracion in enumerate(test_tags):
for j, etiquetas in enumerate(lista_etiquetas_oracion):
k = k + 1
results.loc[k, 'Expected'] = etiquetas
results.loc[k, 'Predicted'] = log_tokens[i][j]
# print(results)
print('\nclassification_report:\n', classification_report(results['Expected'], results['Predicted']))
# + [markdown] colab_type="text" id="nrAAFx0XrWT1"
# ## PARTE 4 - Testing
# + [markdown] colab_type="text" id="uvOz-IShFzRR"
# ### Creamos un pequeño Ejemplo
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="_WT1PtS_Qui0" outputId="2b4924f5-440f-4d52-8378-9c0adc54155a"
test_samples = [
"Correr es importante para mi .".split(),
"El hombre bajo corre bajo el puente con bajo índice de adrenalina .".split()
]
print(test_samples)
# + [markdown] colab_type="text" id="X5E7-zZdGCjY"
# ### Convertimos el texto en Una entrada para el Modelo
# + colab={"base_uri": "https://localhost:8080/", "height": 459} colab_type="code" id="BApB6ScZ9jU8" outputId="cc225d1f-7182-4ab1-d327-b5cb265f4c63"
from keras.preprocessing.sequence import pad_sequences
test_samples_X = []
for s in test_samples:
s_int = []
for w in s:
try:
s_int.append(word2index[w.lower()])
except KeyError:
s_int.append(word2index['-OOV-'])
test_samples_X.append(s_int)
test_samples_X = pad_sequences(test_samples_X, maxlen=MAX_LENGTH, padding='post')
print(test_samples_X)
# + [markdown] colab_type="text" id="trNZCjTWGLp-"
# ### Se Ejecuta la predicion con la Entrada del modelo entrenado
# + colab={"base_uri": "https://localhost:8080/", "height": 476} colab_type="code" id="OX6Bd2Rz9oha" outputId="60f05bcd-7fe3-4fe2-f169-f95d48b386f7"
predictions = model.predict(test_samples_X)
print(predictions, predictions.shape)
# + [markdown] colab_type="text" id="l-XS5z-NGiM-"
# ### Conversion de la Salida del Modelo a un lista de Indices de Tags
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="IgIutMjq92cp" outputId="be1b6bc1-1783-47fd-e971-4a496ba89247"
#print(len(predictions))
log_tokens = logits_to_tokens(predictions, {i: t for t, i in tag2index.items()})
print(log_tokens)
# + [markdown] colab_type="text" id="VmWp09kyGrQC"
# ### Presentación de los Resultados
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="wNMCM8_jSdCL" outputId="127dade3-c5da-4163-fbe6-d68839d1fe02"
# #!pip install tabulate
from tabulate import tabulate
heads1 = test_samples[0]
body1 = [log_tokens[0][:len(test_samples[0])]]
heads2 = test_samples[1]
body2 = [log_tokens[1][:len(test_samples[1])]]
print(tabulate(body1, headers=heads1))
print ("\n")
print(tabulate(body2, headers=heads2))
## postagging Freeling 4.1
## El hombre bajo corre bajo el puente con bajo índice de adrenalina .
## DA0MS0 NCMS000 AQ0MS00 VMIP3S0 SP DA0MS0 NCMS000 SP SP NCMS000 SP NCFS000 Fp
## pos tagger Stanford NLP
## El hombre bajo corre bajo el puente con bajo índice de adrenalina .
## da0000 nc0s000 aq0000 vmip000 sp000 da0000 nc0s000 sp000 aq0000 nc0s000 sp000 nc0s000 fp
| mb-02/mb-02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import matplotlib.pyplot as plt
# %matplotlib inline
import math
import numpy as np
left = []
lf = []
sampleRate = 44100.0
duration=20000.0
window=20.0
import wave
wr = wave.open("oui.wav")
par = list(wr.getparams())
ww = wave.open('out.wav', 'w')
ww.setparams(tuple(par))
avg_amplitude = []
amplitude = []
silence = 00
avg_amplitude_without_silence = []
# split by 20ms window
for i in range(int(duration/window)):
da = np.frombuffer(wr.readframes(int(sampleRate/1000*window)), dtype=np.int16)
left = da[0::2]
right = da[1::2]
if not len(left):
continue
lf = np.fft.rfft(left)
rf = np.fft.rfft(right)
lowpass = 21 # Remove lower frequencies.
highpass = 2000 # Remove higher frequencies.
lf[:lowpass] = 0 # low pass filter (1)
lf[highpass:] = 0 # high pass filter (3)
rf[:lowpass] = 0 # low pass filter (1)
rf[highpass:] = 0 # high pass filter (3)
avg = max(np.average(abs(lf)), np.average(abs(rf)))
if avg > silence:
avg_amplitude_without_silence.append(1000)
nl = np.fft.irfft(lf)
nr = np.fft.irfft(rf)
ns = np.column_stack((nl,nr)).ravel().astype(np.int16)
ww.writeframes(ns.tostring())
else:
avg_amplitude_without_silence.append(-1000)
avg_amplitude.append(avg)
for l in left:
amplitude.append(l)
wr.close()
ww.close()
plt.plot(avg_amplitude)
plt.plot(avg_amplitude_without_silence)
# -
| docs/jupyter-notebooks/wordDetection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Argoverse-forecasting
# This is a simple tutorial that will show you how to interact with the Argoverse-forecasting dataset using our python package. See [github page](https://github.com/argoai/argoverse-api) for instructions on how to install the package.
# Argoverse dataset can be download at [https://www.argoverse.org](https://www.argoverse.org)
# ## Data loading
# First we need to create argoverse loader.
# +
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
##set root_dir to the correct path to your dataset folder
root_dir = '../forecasting_sample_v1.1/forecasting_sample/data/'
afl = ArgoverseForecastingLoader(root_dir)
print('Total number of sequences:',len(afl))
# -
print(afl[4])
# One way to go through each log in our dataset is by iterating through our data loader. For example, we can see statistics for each log with simple iteration and printing.
for argoverse_forecasting_data in (afl):
print(argoverse_forecasting_data)
# You can also get all the track_ids for a sequence.
argoverse_forecasting_data = afl[0]
print(argoverse_forecasting_data.track_id_list)
# ## Visualizing sequences
from argoverse.visualization.visualize_sequences import viz_sequence
seq_path = f"{root_dir}/2645.csv"
viz_sequence(afl.get(seq_path).seq_df, show=True)
seq_path = f"{root_dir}/3828.csv"
viz_sequence(afl.get(seq_path).seq_df, show=True)
seq_path = f"{root_dir}/4791.csv"
viz_sequence(afl.get(seq_path).seq_df, show=True)
# ## Using map_api
# Getting candidate centerlines for the agent's trajectory is a simple function call. Below we use the first 2 secs of the trajectory to compute candidate centerlines for the next 3 secs.
# +
from argoverse.map_representation.map_api import ArgoverseMap
avm = ArgoverseMap()
obs_len = 20
seq_path = f"{root_dir}/2645.csv"
agent_obs_traj = afl.get(seq_path).agent_traj[:obs_len]
candidate_centerlines = avm.get_candidate_centerlines_for_traj(agent_obs_traj, afl[1].city, viz=True)
seq_path = f"{root_dir}/3828.csv"
agent_obs_traj = afl.get(seq_path).agent_traj[:obs_len]
candidate_centerlines = avm.get_candidate_centerlines_for_traj(agent_obs_traj, afl[4].city, viz=True)
# -
# So is getting the lane direction of the trajectory's coordinates.
# +
seq_path = f"{root_dir}/2645.csv"
agent_traj = afl.get(seq_path).agent_traj
lane_direction = avm.get_lane_direction(agent_traj[0], afl[1].city, visualize=True)
seq_path = f"{root_dir}/3828.csv"
agent_traj = afl.get(seq_path).agent_traj
lane_direction = avm.get_lane_direction(agent_traj[0], afl[4].city, visualize=True)
# -
| demo_usage/argoverse_forecasting_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
#
# # terrainbento model BasicVs steady-state solution
# This model shows example usage of the BasicVs model from the TerrainBento package.
#
# The BasicVs model implements modifies Basic to use variable source area runoff using the ""effective area"" approach:
#
# $\frac{\partial \eta}{\partial t} = - KA_{eff}^{1/2}S + D\nabla^2 \eta$
#
# where
#
# $A_{eff} = R_m A e^{-\alpha S / A}$
#
# and
#
# $\alpha = \frac{K_{sat} H_{init} dx}{R_m}$
#
# where $K$ and $D$ are constants, $S$ is local slope, and $\eta$ is the topography. $A$ is the local upstream drainage area, $R_m$ is the average recharge (or precipitation) rate, $A_{eff}$ is the effective drainage area, $K_{sat}$ is the hydraulic conductivity, $H$ is the soil thickness, and $dx$ is the grid cell width. $\alpha$ is a courtesy parameter called the "saturation area scale" that lumps together many constants.
#
# Refer to [Barnhart et al. (2019)](https://www.geosci-model-dev.net/12/1267/2019/) for further explaination. For detailed information about creating a BasicVs model, see [the detailed documentation](https://terrainbento.readthedocs.io/en/latest/source/terrainbento.derived_models.model_basicVs.html).
#
# This notebook (a) shows the initialization and running of this model, (b) saves a NetCDF file of the topography, which we will use to make an oblique Paraview image of the landscape, and (c) creates a slope-area plot at steady state.
# +
# import required modules
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams["font.size"] = 20
matplotlib.rcParams["pdf.fonttype"] = 42
# %matplotlib inline
from landlab import imshow_grid
from landlab.io.netcdf import write_netcdf
from terrainbento import BasicVs
np.random.seed(4897)
# +
# create the parameter dictionary needed to instantiate the model
params = {
# create the Clock.
"clock": {
"start": 0,
"step": 10,
"stop": 1e7
},
# Create the Grid.
"grid": {
"RasterModelGrid": [(25, 40), {
"xy_spacing": 40
}, {
"fields": {
"node": {
"topographic__elevation": {
"random": [{
"where": "CORE_NODE"
}]
},
"soil__depth": {
"constant": [{
"value": 1.0
}]
}
}
}
}]
},
# Set up Boundary Handlers
"boundary_handlers": {
"NotCoreNodeBaselevelHandler": {
"modify_core_nodes": True,
"lowering_rate": -0.001
}
},
# Set up Precipitator
"precipitator": {
"UniformPrecipitator": {
"rainfall_flux": 0.01
}
},
# Parameters that control output.
"output_interval": 1e4,
"save_first_timestep": True,
"output_prefix": "basicVs.",
"fields": ["topographic__elevation"],
# Parameters that control process and rates.
"water_erodibility": 0.001,
"m_sp": 0.5,
"n_sp": 1.0,
"regolith_transport_parameter": 0.1,
"hydraulic_conductivity": 10.
}
# -
# the tolerance here is high, so that this can run on binder and for tests. (recommended value = 0.001 or lower).
tolerance = 20.0
# we can use an output writer to run until the model reaches steady state.
class run_to_steady(object):
def __init__(self, model):
self.model = model
self.last_z = self.model.z.copy()
self.tolerance = tolerance
def run_one_step(self):
if model.model_time > 0:
diff = (self.model.z[model.grid.core_nodes] -
self.last_z[model.grid.core_nodes])
if max(abs(diff)) <= self.tolerance:
self.model.clock.stop = model._model_time
print("Model reached steady state in " +
str(model._model_time) + " time units\n")
else:
self.last_z = self.model.z.copy()
if model._model_time <= self.model.clock.stop - self.model.output_interval:
self.model.clock.stop += self.model.output_interval
# +
# initialize the model using the Model.from_dict() constructor.
# We also pass the output writer here.
model = BasicVs.from_dict(params, output_writers={"class": [run_to_steady]})
# to run the model as specified, we execute the following line:
model.run()
# +
# MAKE SLOPE-AREA PLOT
# plot nodes that are not on the boundary or adjacent to it
core_not_boundary = np.array(
model.grid.node_has_boundary_neighbor(model.grid.core_nodes)) == False
plotting_nodes = model.grid.core_nodes[core_not_boundary]
# assign area_array and slope_array
area_array = model.grid.at_node["drainage_area"][plotting_nodes]
slope_array = model.grid.at_node["topographic__steepest_slope"][plotting_nodes]
# instantiate figure and plot
fig = plt.figure(figsize=(6, 3.75))
slope_area = plt.subplot()
# plot the data
slope_area.scatter(area_array,
slope_array,
marker="o",
c="k",
label="Model BasicVs")
# make axes log and set limits
slope_area.set_xscale("log")
slope_area.set_yscale("log")
slope_area.set_xlim(9 * 10**1, 1 * 10**6)
slope_area.set_ylim(1e-4, 1e4)
# set x and y labels
slope_area.set_xlabel(r"Drainage area [m$^2$]")
slope_area.set_ylabel("Channel slope [-]")
slope_area.legend(scatterpoints=1, prop={"size": 12})
slope_area.tick_params(axis="x", which="major", pad=7)
plt.show()
# +
# Save stack of all netcdfs for Paraview to use.
# model.save_to_xarray_dataset(filename="basicVs.nc",
# time_unit="years",
# reference_time="model start",
# space_unit="meters")
# remove temporary netcdfs
model.remove_output_netcdfs()
# -
# make a plot of the final steady state topography
imshow_grid(model.grid, "topographic__elevation")
# ## Next Steps
#
# - We recommend you review the [terrainbento manuscript](https://www.geosci-model-dev.net/12/1267/2019/).
#
# - There are three additional introductory tutorials:
#
# 1) [Introduction terrainbento](../example_usage/Introduction_to_terrainbento.ipynb)
#
# 2) [Introduction to boundary conditions in terrainbento](../example_usage/introduction_to_boundary_conditions.ipynb)
#
# 3) [Introduction to output writers in terrainbento](../example_usage/introduction_to_output_writers.ipynb).
#
#
# - Five examples of steady state behavior in coupled process models can be found in the following notebooks:
#
# 1) [Basic](model_basic_steady_solution.ipynb) the simplest landscape evolution model in the terrainbento package.
#
# 2) [BasicVm](model_basic_var_m_steady_solution.ipynb) which permits the drainage area exponent to change
#
# 3) [BasicCh](model_basicCh_steady_solution.ipynb) which uses a non-linear hillslope erosion and transport law
#
# 4) **This Notebook**: [BasicVs](model_basicVs_steady_solution.ipynb) which uses variable source area hydrology
#
# 5) [BasisRt](model_basicRt_steady_solution.ipynb) which allows for two lithologies with different K values
| lessons/landlab/landlab-terrainbento/coupled_process_elements/model_basicVs_steady_solution_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time series analysis
# Struture of the report:
# - Time series problem (Introduction and Project outline and software used)
# - Background Theory
# - Explain dataset and vaiables
# - Model 1: ARIMA model (Implementation)
# - Model 2: SARIMA model (Implementation)
# - Comparing the two models (Testing and evaluation)
# - Conclusion
# - References:
# - Appendences: Attach Jupyter notebook
# Whenever any data or observations are recorded at regular time intervals, it's refered to as Time Series data. Time Series attempts to observe data over time period to forecast or predict what will happen in the future. They are often based on patterns or re-occuring trends from previous time periods. History often repeats itself, so whatever events happened in the past, are likely to happen in the future.
# {}
# By fitting a model to time series data, we can create forecasts of future outcomes.
#
#
# The [dataset](https://www.marketwatch.com/investing/index/spx/download-data?startDate=3/7/2018&endDate=11/12/2021) for this project was sourced from 'marketwatch.com' which is a website that provides the latest stock prices updated on a daily basis. The website allows you to download all the S&P 500 Stock Price information at a date range and a frequency of your choosing as a 'csv' file. We used this publically available data source to build our dataset.
#
# For our purpose, we choose to collect the S&P 500 stock prices on a monthly basis, from '03/2018' to '11/2021' so the dataset contains 45 datapoints in total spaning over 3 years. We renamed the dataset to 'SaP_500_3yrs_Monthly.csv' to improve the readability of the project.
#
# This dataset contains the Opening price of the S&P 500, Closing price of the S&P 500, highest price and lowest price of the S&P 500 on that day.
#
# The S&P 500 (Standard and Poor's 500) is a stock market index tracking the performance of 500 leading U.S publicly traded companies, with a primary emphasis on market capitalisation. A stock index can be simply put as a collection of stocks that are tracked together to give people an overall idea of how the stock market is doing. It's a diversified and a relatively low-risk way to invest in stocks.
#
# The S&P is a float-weighted index, meaning the market capitalisations of the companies in the index are adjusted by the number of shares available for public trading.
# It is the most popular index because it represents the largest publicly traded corporations in the U.S.
#
# There give a general indication of how the overall market which, is thousands of stocks are doing
#
# The dataset obtained from the S&P 500 data contains the variables:
# - 'Date' - Date of the S&P 500 prices
# - 'Open'- Opening Price of the S&P 500 on the respective date
# - 'High' - Highest Price of the S&P 500 on the respective date
# - 'Low' - Lowest Price of the S&P 500 on the respective date
# - 'Close' - Price the S&P 500 finished on the respective date (in U.S. Dollers)
#
# We will analysing two different models for tackling the problem and comparing the result from each.
# ## Loading the dataset
# Loads the 'SaP_500_3yrs_Monthly.csv' file as a DataFrame ('SP_500'), which contains our entire dataset.
#
# Then we will print the entire dataset.
# +
# Importing the pandas library
import pandas as pd
# Loads the 'SaP_500_3yrs_Monthly.csv' which contains our dataset
SP_500 = pd.read_csv('SaP_500_3yrs_Monthly.csv')
# Prints the entire dataset
SP_500.head(50)
# -
# As we can see the dataset begins at the most current value of the S&P price which is not ideal for the ARIMA model. Therefore we will have to reverse the order of the data to make it usable.
# Below, we will remove all the columns in the 'SP_500' DataFrame except the 'Close' column which is necessary for implementing the ARIMA model.
## Remove all the columns in 'SP_500' DataFrame except
## the 'Close' column which is useful for plotting the data
SP_500_Close = SP_500[['Close']].dropna()
# ## Visualising the dataset
# We will plot the dataset so we can plan our approach for the Time series problem.
#
# In order to plot out S&P 500 dataset we need to:
# - Create a new DataFrame contining only the 'Date' and 'Close' Variables.
# - Create a DataFrame containing only the 'Close' variable and reverse the order of the variable.
# - Create a seperate DataFrame containing only the 'Date' variable and reverse the order of the variable.
# - Convert the 'Date' DataFrame to datetime
# - Plot the final updated variables using matplotlib
#
# Creating a new DataFrame for plotting the S&P 500 dataset purposes only containing the 'Date' and 'Close' Variables as it won't affect the original dataset.
#
# We will seperate the 'Close' variable with the DataFrame 'Plotting_SP_500_Profit' containing all the closing day prices in our dataset. The order of the 'Close' prices are reversed since our dataset is in the opposite order for our plot.
#
# We will then seperate the 'Date' variables with a unique DataFrame to convert the variable to datetime which is needed for the 'autofmt_xdate()' functionused to make the plot more readable. The order of the 'Date' will also need to be reversed to match the prives from above.
#
# Finally we can plot the complete dataset with the updated variables to observe the monthly price of the S&P 500 from '03/2018' to '11/2021'.
# +
from matplotlib import pyplot as plt
## Creates a new Dataframe using only the 'Date' and 'Close' variables
## to plot the dataset only using the closing day price of the S&P
## Stock Price and the dates for these Closing day prices.
Plotting_SP_500 = pd.DataFrame(SP_500, columns = ['Date', 'Close'])
## Creates a Variable 'Plotting_SP_500_Profit' which contains all
## the closing day prices in our dataset.
## It is also necessary to reverse the order of the
## 'Plotting_SP_500_Profit' variables content because our data set
## starts with the newest content from the dataset whereas reasonable
## plots containing data need to be shown in the reverse order.
Plotting_SP_500_Profit = SP_500.iloc[:, -1].values
Plotting_SP_500_Profit = Plotting_SP_500_Profit[::-1]
## Creates a Variable 'Plotting_SP_500_Date' which contains all the
## dates assosiated with the closing price in the
## 'Plotting_SP_500_Profit' variable created above.
## This new variables content also need to be reversed
## in order to align properly with the above variable.
Plotting_SP_500_Date = SP_500.iloc[:, 0].values
Plotting_SP_500_Date = Plotting_SP_500_Date[::-1]
## Converts the content in the 'Plotting_SP_500_Date' variable to
## datetime. This is needed for the 'autofmt_xdate()' function
## used to make the plot of the dataset more readable.
Plotting_SP_500_Date = pd.to_datetime(Plotting_SP_500_Date)
## Creates a new Dateframe to store only the content needed to
## plot the dataset.
Plot_for_SP_500 = pd.DataFrame(Plotting_SP_500_Profit, Plotting_SP_500_Date)
## Using the 'matplotlib' libriary to plot the dataset
plt.title('S&P 500 Price History on a monthly basis')
plt.xlabel('Date')
plt.ylabel('Stock Price in U.S. Dollers')
plt.grid(True)
plt.plot(Plot_for_SP_500, color = 'red')
plt.gcf().autofmt_xdate()
plt.show()
# -
# ## Model 1: ARIMA model
# The first model we are going to test for this problem is an ARIMA (AutoRegressive Integrated Moving Average) model, which is a forecasting algorithm based on the idea that the information in the past values of the time series can alone be used to predict the future values.
#
# Notation for the ARIMA model: $\text{ARIMA}(p,d,q)$
# The equation for the ARIMA model:
#
#
# $$Y_t = c + \beta_1{Y_{t-1}} + \beta_2{Y_{t-2}} + ... + \beta_p{Y_{t-p}} + \theta_1{\epsilon_{t-1}} + ... + \theta_q{\epsilon_{t-q}} + {\epsilon_t}$$
#
# $$\text{where}$$
#
# $$c \text{ is the intercept}$$
#
# $$\text{and}$$
#
# $$\beta_1{Y_{t-1}} + \beta_2{Y_{t-2}} + ... + \beta_p{Y_{t-p}} \text{ are the lags (AR)}$$
#
# $$\text{and}$$
#
# $$\theta_1{\epsilon_{t-1}} + ... + \theta_q{\epsilon_{t-q}} + {\epsilon_t} \text{ are the errors (MA)}$$
# In basic terms we can look at the formula as:
#
# $$ \text{Predicted } {Y_t} = Constant + \text{Linear combination Lags of Y upto p lags} + \text{Linear Combination of Lagged forecast error upto q lags} $$
# An ARIMA model is characterised by 3 terms(<i>p</i>, <i>d</i>, <i>q</i>):
# - <i>p</i> is the order of the AR term. It refers to the number of lags
# - <i>d</i> is the number of differencing required to make the time series stationary refering to the I term.
# - <i>q</i> is the order of the MA term. It refers to the numbrt of lagged forecast errors that should go into the ARIMA model.
#
# It is necessary for the time series to be stationary in order to fit the ARIMA model.
#
# If a time series, has seasonal patterns you can add seasonalterm to create a SARIMA model.
#
# In order to fit the ARIMA model, the time searies need to be stationary of as close to staionarity as possible.
#
# Price series are typically non-stationary because the time in which the data is collected has a tendency to affect how the prices can change over a period of time, such as the season of the year where the data has been collected.
#
# Finding the order of differencing (d) in ARIMA model
#
# Finding the order of the AR term (p)
#
# Finding the order of the MA term (q)
#
# Accuracy metrics for Time Series Forecast
#
#
# ### SARIMA model
# SARIMA(p,d,q)(P,D,Q)S where the P, D and Q are SAR, order if seasonal differencing and SMA terms respectively and 's' is the frequency of the time series.
#
# Seasonal Autoregressive Integrated Moving Average (SARIMA) is an extension of ARIMA that explicity supports univariate time aeries data with a seasonal component.
#
# <b>Seasonal Elements</b>
# There are four seasonal elements that are not part of ARIMA that must be configured
#
# - <b>P:</b> Seasonal autoregressive order
# - <b>D:</b> Seasonal differencing order
# - <b>Q:</b> Seasonal moving average order
# - <b>s:</b> The number of time steps for a single seasonal period
# <b>Steps required to implement the ARIMA model:</b>
# 1. Loads the dataset
# 2. Check if the time series is stationary
# 3. Determine the <i>d</i> value
# 4. Create ACF and PACF plots for the time series
# 5. Determine the <i>p</i> and <i>q</i> values
# 6. Fit the ARIMA model to the time series with the parameters calculated previously
# 7. Predicting the future values on a testing sets
# 8. Calculate RMSE to check the performance of the model
# 9. Comparing the forecasted values vs the Actual Values
# We will begin building the ARIMA model by finding the <i>d</i> term.
# ---------------------------------------------------------------------
# ### Determine <i>d</i> - the number of differencing required to make the time series stationary
# It's firstly important to check if the time series is stationary.
#
# This can be done using the '<i>adfuller</i>' function from the '<i>statsmodel</i>' libriary which performs the Augmented Dickey-Fuller (ADF) test on the S&P 500 Closing prices to show the presence of serial correlation in our time series. With Time Series data, this test generally proves that the time series is non-stationary.
# As in our case, if the p-value > 0.05 meaning the Time series is definately Stationary.
#
# we'll need to find the order of differencing.
#
# First we need to create a new variable 'SP_500_Close_Values' which will contain the entire dataset. Then we will issolate and invert the 'Close' price which is all that is needed for the ARIMA model.
#
# Preparing the data for the ARIMA model will require:
# - Create a new variable 'SP_500_Close_Values' which contains all the prices of S&P 500.
# -
# +
## Importing the 'adfuller' functionfrom the 'statesmodel' libriary
## to perform the Augmented Dickey-Fuller (ADF) to check if
## the time series stationary
# Importing the necessarey libriaries
from statsmodels.tsa.stattools import adfuller
## Creates a Variable 'SP_500_Close_Values' which contains all
## the content from the 'SP_500_Close' Variable.
## It is also necessary to reverse the order of the
## 'SP_500_Close_Values' variables content because our data set
## starts with the newest content from the dataset we need to
## have the newest content at the end of the Dtaframe.
## We also need to reset the 'index' column or else our plots
## will be in reverse
SP_500_Close_Values = SP_500_Close
Reverse_SP_500_Close_Values = SP_500_Close_Values[::-1]
Reverse_SP_500_Close_Values.reset_index(inplace=True, drop=True)
## Uses the 'adfuller' function to check if the time series
## is stationary or not
result = adfuller(Reverse_SP_500_Close_Values.dropna())
print('ADF Statistic: ', result[0])
print('p-value: ', result[1])
# -
# Our p-value is 0.98 so it clear that this price series is non-stationary. This isn't a problem since we could have probably guessed that the time in which the prices were collect will influence future results.
#
# In order to make the ARIMA model work for this time series we need to difference them to remove the trends in the time series. It might take multple differences to make the time series stationary. The amount of differences required is known as the order of differencing (d).
#
#
#
#
# We can use the ACF plot from stats.models and the acf plot tells us how many terms are required to remove any autocorrelation in the series
# #### Autocorrelation Function (ACF)
# Autocorrelation refers to the correlation between a Time series and a previous version of the time series.
#
# For this dataset, we shifted the time series back by one month. This shift is known as the lag.
#
# The 'statesmodel' libriary has a function 'plot_act' to plot the autocorrelation for our time series data. This function plots the lags on the horizonal axis and the correlations on the vertical axis.
# +
from statsmodels.graphics.tsaplots import plot_acf
## Plots the autocorrelation for the closing prices of the S&P 500
## in our dataset
plt.plot(Reverse_SP_500_Close_Values)
plt.title('Original data')
plot_acf(Reverse_SP_500_Close_Values)
plt.show()
# -
# $$ \text{First-order differencing: } x'_t = x_t x_{t -1} $$
# Next, we will plot the difference of the closing price data along with the autocorrelation of the difference for the closing prices of the S&P 500 in our dataset
# +
## Calculates the difference of the closing price data
diff = Reverse_SP_500_Close_Values.diff().dropna()
## Plots the difference of the closing price data along with
## the autocorrelation of the difference for the closing prices
## of the S&P 500 in our dataset
plt.plot(diff)
plt.title('Difference one')
plot_acf(diff)
plt.show()
# -
# $$ \text{Second-order differencing: } x_t* = x'_t - x'_{t -1} $$
#
# $$ = (x_t - x_{t-1}) - (x_{t-1} - x_{t-2}) $$
#
# $$ = x_t - 2x_{t-1} + x_{t-2} $$
# +
## Calculates the difference of the difference
## the closing price data
diff = Reverse_SP_500_Close_Values.diff().diff().dropna()
## Plots the second difference of the closing price data along with
## the autocorrelation of the difference for the closing prices
## of the S&P 500 in our dataset
plt.plot(diff)
plt.title("Difference twice")
## Add ; to the end of the plot function so that the plot is not
## duplicated
plot_acf(diff);
plt.show()
# -
# Lag terms in graph skew to lag axis when the series is differenced twice indicating that the series is over differenced. So, we will set the <i>d</i> term as 1.
#
# Our order of differencing term = 1
#
# The <i>d</i> term will also be confirmed below with the function '<i>pmdarima.arima.ndiffs</i>':
# +
## Estimates the ARIMA differencing term d required to make the time
## series stationary
from pmdarima.arima.utils import ndiffs
print("Estimate ARIMA diferencing term, d, required to convert the time series to stationary below:")
ndiffs(Reverse_SP_500_Close_Values, test="adf")
# -
# Since we came to the same conclusion, we can be reasonably confident in our result.
# ### Determining <i>p</i> - the order of the AR term
#
# p is the order of the Auto Regressive (AR) term. It refers to the number of lags to be used as predictors.
#
# We can find out the required number of AR terms by inspecting the Partial Autocorrelation (PACF) plot.
#
# The partial autocorrelation represents the correlation between the series and its lags.
from statsmodels.graphics.tsaplots import plot_pacf
# +
diff = Reverse_SP_500_Close_Values.diff().dropna()
plt.plot(diff)
plt.title('Difference once')
plot_pacf(diff)
plt.show()
# -
# The partial autocorrelation lag number 7 is above the significance line, hence we will set <i>p</i> term as 7.
# ### Determining <i>q</i> - the order of the MA term
#
# The <i>q</i> term is the order of the Moving Average(MA), which refers to the number of lagged forecast errors that should go in the ARIMA model.
#
# We can look at the ACF plot for the number of MA terms.
# +
diff = Reverse_SP_500_Close_Values.diff().dropna()
plt.plot(diff)
plt.title("Difference once")
plot_acf(diff);
plt.show()
# -
# We choose a forcast of 0
# ### Fitting the ARIMA model
#
# ARIMA(p,d,q)
#
# p = 7
# d = 1
# q = 0
#
# The model is prepared on the training data by calling the fit() function.
# +
from statsmodels.tsa.arima_model import ARIMA
import warnings
warnings.filterwarnings('ignore') # Ignore unnecessary warnings
# ARIMA Model
#model = ARIMA(Reverse_SP_500_Close_Values, order=(6, 1, 1))
model = ARIMA(Reverse_SP_500_Close_Values, order=(7, 1, 0))
result = model.fit()
result.summary()
# -
# Plot the residual errors to ensure there are no patterns (that is, looking for constant mean and variance).
# +
# Plot residual errors
residuals = pd.DataFrame(result.resid)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
residuals.plot(title = 'Residuals', ax=ax1)
residuals.plot(kind='kde', title = 'Density', ax=ax2)
plt.show()
# +
# Actual vs Fitted
result.plot_predict(
start=1, end=44, dynamic=False,
)
plt.grid(True)
plt.show()
# -
# Train test split
# Creating a testing and testing set to apply a
## 'n' is an integer that is the equivalant to 87.5% of the
## amount of closing prices from our dataset
n = int(len(Reverse_SP_500_Close_Values) * 0.875)
## Our training set contains the first 87.5% of closing prices
## from our dataset
train = Reverse_SP_500_Close_Values[:n]
## Our testing set will contain the remainder of closing prices
## from our dataset
test = Reverse_SP_500_Close_Values[n:]
print('Number of closing prices in our training set: ', len(train))
print('Number of closing prices in our testing set: ', len(test))
# +
## Plots the training vs testing data from the S&P 500 data
plt.title('S&P 500 Price Train vs test')
plt.xlabel('Position in the dataset')
plt.ylabel('Stock Price in U.S. Dollers')
plt.grid(True)
plt.plot(train, color = 'red')
plt.plot(test, color = 'blue')
plt.show()
# -
model = ARIMA(train, order=(7, 1, 0))
result = model.fit()
# +
step = 6
# Forecast
fc, se, conf = result.forecast(step, alpha=0.05) # 95% confidence interval
# +
## Make as pandas series
fc = pd.Series(fc, index=test[:step].index)
lower = pd.Series(conf[:, 0], index=test[:step].index)
upper = pd.Series(conf[:, 1], index=test[:step].index)
# -
plt.plot(test[:step], label="actual")
plt.plot(fc, label="forecast")
plt.fill_between(lower.index, lower, upper, color="k", alpha=0.1)
plt.title("Forecast vs Actual")
plt.legend(loc="upper left")
plt.grid(True)
plt.show()
answer = pd.DataFrame({'Actual Price' :test['Close'], 'Forecasted Price' :fc, 'Difference' :test['Close']-fc})
answer
# +
# Calculating the Root Mean Squared Error
from math import sqrt
from sklearn.metrics import mean_squared_error
rmse = sqrt(mean_squared_error(test, fc))
print('Test RMSE: %.3f' % rmse)
# -
plt.plot(train[step:], label='training')
plt.plot(test[:step], label='actual')
plt.plot(fc, label='forecast')
plt.fill_between(lower.index, lower, upper, color='k', alpha=0.1)
plt.title('Complete Picture')
plt.legend(loc='upper left')
plt.grid(True)
plt.show()
# ### Still need to evaluate the model !!!!
# ## Model 2: SARIMA
# The SARIMA model builds upon the ARIMA model. it also includes the p, q, and d parameters, but also an extra set of parameters to account for time series seasonality.
#
# - P: The order of the seasonal autoregrssive model.
# - Q: The order of the seasonal moving average model.
# - D: The number of seasonal differences applied to the time series.
#
# The SARIMA model is denoted as SARIMA(p,d,q)(P,D,Q)[s] where the 's' is the frequency of the time series.
# +
## Loads the 'SaP_500_3yrs_Monthly.csv' file as a new DataFrame ('data')
## to remove any influence from our ARIMA model and uses the 'Date'
## column as the index for the DataFrame
data = pd.read_csv('SaP_500_3yrs_Monthly.csv', parse_dates=['Date'], index_col='Date')
## Removes all the columns in the 'data' DataFrame except the
## 'Close' column which is linked to the correct dates since the
## 'Date' is the index for the DataFrame
data = data[['Close']].dropna()
data = data[::-1]
# +
plt.plot(data, label='Original Series')
plt.plot(data.diff(1), label='Usual Differencing')
plt.title('Usual Differencing')
plt.legend(loc='upper left', fontsize=10)
plt.grid(True)
plt.gcf().autofmt_xdate()
plt.show()
# +
plt.plot(data, label='Original Series')
plt.plot(data.diff(6), label='Seasonal Differencing', color = 'green')
plt.title('Seasonal Differencing')
plt.legend(loc='upper left', fontsize=10)
plt.grid(True)
plt.gcf().autofmt_xdate()
plt.show()
# +
import pmdarima as pm
# Seasonal - fit stepwise auto-ARIMA
smodel = pm.auto_arima(data['Close'], start_p=1, start_q=1,
test='adf',
max_p=6, max_q=6, m=6,
start_P=0, seasonal=True,
d=None, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
smodel.summary()
# +
# Forecast
n_periods = 6
fitted, confint = smodel.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.date_range(data.index[-1], periods = n_periods, freq='MS')
# make series for plotting purpose
fitted_series = pd.Series(fitted, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)
# Plot
plt.plot(data)
plt.plot(fitted_series, color='darkgreen')
plt.fill_between(lower_series.index,
lower_series,
upper_series,
color='k', alpha=.15, label='conf int')
plt.title("SARIMA - Final Forecast of S&P 500 Prices")
plt.gcf().autofmt_xdate()
plt.grid(True)
plt.show()
# +
from statsmodels.tsa.arima.model import ARIMAResults
smodel.plot_diagnostics(figsize=(8,8))
plt.show()
# -
forecasted_Price = pd.DataFrame({'Forecasted Price' :fitted_series})
forecasted_Price
# ## Comparing the two models
# ## Conclusion
| Time Series Analysis (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ARIMA
# ARIMA adalah metode statistikal yang cukup populer digunakan untuk time series forecasting dan data analysis. ARIMA adalah singkatan dari AutoRegressive Integrated Moving Average. Metode ini menangkap struktur dari sebuah time series data untuk mengetahui pola dari data tersebut.
#
#
#
# ARIMA mempunyai 2 jenis, yaitu:
# 1. Non-seasonal ARIMA
# 2. Seasonal ARIMA
#
# ARIMA model biasanya digunakan dalam kasus dimana data terplot secara non-stationary, dimana akan diberlakukan langkah differencing pada data yang diobservasi sehingga data terplot menjadi stationary.
#
# Non-seasonal ARIMA secara umum mempunyai 3 parameter, yaitu p, d, dan q. parameter-parameter ini bernilai non-negative.
# 1. AR(p):AutoRegression. Regresi yang memanfaatkan fitur-fitur antara parameter yang diamati terhadap parameter sebelumnya.
# 2. I(d):Integrated. Dilakukan Operasi selisih antara data point yang diamati terhadap data point pada waktu sebelumnya (t-1). Hal ini dilakukan untuk membuat time series menjadi stationary.
# 3. MA(q):Moving Average. Nilai yang mempunyai ketergantungan terhadap parameter yang diamati dan error pada model Moving Average.
#
# Untuk memaksimalkan model ARIMA, kita harus membuat data terplot secara stationary. Data time-series stationary adalah data yang mempunyai nilai rata-rata dan varian yang konstan terhadap waktu. Stationary data dibutuhkan agar model bisa membuat prediksi dengan baik.
#
#
#
#
#
# Berikut adalah contoh data stationary dan non-stationary.
# 
# Jika data yang kita peroleh bersifat non-stationary, maka kita harus mengubah data tersebut menjadi stationary agar kita bisa mengevaluasi dan menentukan parameter-parameter pada ARIMA. Caranya adalah melakukan differencing.
# 
# Cara kerjanya relatif mudah dimengerti. jadi saat data bersifat non-stationary saat kita amati, maka kita bisa melakukan differencing tahap pertama. Jika saat differencing pertama telah dilakukan namun data masih bersifat non-stationary saat kita amati, maka kita bisa melakukan differencing tahap kedua. Differencing bisa terus dilakukan sampai data yang kita peroleh bersifat stationary. Yang harus kita perhatikan diatas adalah setiap melakukan differencing, maka kita akan mengorbankan 1 baris data.
# Kita bisa melakukan differencing berdasarkan season/musim. Contoh, jika kita mempunyai data bulanan, kita bisa melakukan differencing dengan nilai 12, jika kita ingin mendefinisikan 1 musim = 1 tahun.
# Saat data kita menjadi stationary, maka kita mampu melanjutkan ke tahap selanjutnya, yaitu menentukan p, d, dan q. Sebelum menentukan nilai p, d, dan q, kita harus mengetahui AutoCorrelation Plots
# AutoCorrelation plot menunjukan korelasi antara time series data terhadap data sebelumnya yang dimundurkan(lagged) sebesar x. Jadi y axis adalah nilai korelasi, x adalah jumlah unit waktu yang dimundurkan.
#
# Contoh jika kita mempunyai time series sebesar T, kita copy menjadi 2. Kita hapus data pertama dari copy pertama, kemudian kita hapus data terakhir dari copy kedua. Dengan ini kita mempunyai data untuk membuat AutoCorrelation plot dengan lag unit sebesar 1.
# Gradual Decline
# 
# Sharp Drop-off
# 
# Bagaimana cara kita menentukan model mana yang harus kita pilih? apakah AR atau MA? Berapa nilai lag yang harus kita pilih?
#
# Saat AutoCorrelation Plot bernilai positif saat lag pertama(x=1), maka kita disarankan untuk menggunakan AR model. Sebaliknya, Saat AutoCorrelation Plot bernilai negatif saat lag pertama, maka kita disarankan untuk menggunakan MA model. Cara ini cukup baik untuk menentukan nilai p, d, dan q pada model ARIMA.<br>
# p: jumlah lag<br>
# d: jumlah tahap differencing<br>
# q: Nilai ukuran terhadap Moving Average.
#
# Untuk Seasonal ARIMA, akan ada parameter tambahan yang akan kita gunakan.
# # CODING SECTION
# Proses ARIMA pada umumnya seperti berikut.
# 1. Melakukan visualisasi time series data
# 2. membuat data time series menjadi stationary.
# 3. Plot AutoCorrelation
# 4. Menentukan parameter model ARIMA.
# 5. menggunakan model untuk membuat prediksi.
#
# +
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
# -
df = pd.read_csv('monthly-milk-production-pounds-p.csv')
df.head()
df.tail()
df.columns = ['Month','Milk in pounds per cow']
df.head()
# +
df.drop(168,axis=0,inplace=True)
# -
df['Month'] = pd.to_datetime(df['Month'])
df.head()
df.set_index('Month',inplace=True)
df.head()
df.describe()
# # Visualisasi
df.plot()
timeseries = df['Milk in pounds per cow']
timeseries.rolling(12).mean().plot(label='12 Month Rolling Mean')
timeseries.rolling(12).std().plot(label='12 Month Rolling Std')
timeseries.plot()
plt.legend()
timeseries.rolling(12).mean().plot(label='12 Month Rolling Mean')
timeseries.plot()
plt.legend()
# ## Decomposition
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(df['Milk in pounds per cow'], freq=12)
fig = plt.figure()
fig = decomposition.plot()
fig.set_size_inches(15, 8)
# # Stationary Test
#
df.head()
from statsmodels.tsa.stattools import adfuller
result = adfuller(df['Milk in pounds per cow'])
# +
print('Augmented Dickey-Fuller Test:')
labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']
for value,label in zip(result,labels):
print(label+' : '+str(value) )
if result[1] <= 0.05:
print("strong evidence against the null hypothesis, reject the null hypothesis. Data has no unit root and is stationary")
else:
print("weak evidence against null hypothesis, time series has a unit root, indicating it is non-stationary ")
# -
# Store in a function for later use!
def adf_check(time_series):
"""
Pass in a time series, returns ADF report
"""
result = adfuller(time_series)
print('Augmented Dickey-Fuller Test:')
labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']
for value,label in zip(result,labels):
print(label+' : '+str(value) )
if result[1] <= 0.05:
print("strong evidence against the null hypothesis, reject the null hypothesis. Data has no unit root and is stationary")
else:
print("weak evidence against null hypothesis, time series has a unit root, indicating it is non-stationary ")
# ## Differencing
df['Milk First Difference'] = df['Milk in pounds per cow'] - df['Milk in pounds per cow'].shift(1)
adf_check(df['Milk First Difference'].dropna())
df['Milk First Difference'].plot()
# ** Second Difference **
# Sometimes it would be necessary to do a second difference
# This is just for show, we didn't need to do a second difference in our case
df['Milk Second Difference'] = df['Milk First Difference'] - df['Milk First Difference'].shift(1)
adf_check(df['Milk Second Difference'].dropna())
df['Milk Second Difference'].plot()
# ** Seasonal Difference **
df['Seasonal Difference'] = df['Milk in pounds per cow'] - df['Milk in pounds per cow'].shift(12)
df['Seasonal Difference'].plot()
# Seasonal Difference by itself was not enough!
adf_check(df['Seasonal Difference'].dropna())
# ** Seasonal First Difference **
# You can also do seasonal first difference
df['Seasonal First Difference'] = df['Milk First Difference'] - df['Milk First Difference'].shift(12)
df['Seasonal First Difference'].plot()
adf_check(df['Seasonal First Difference'].dropna())
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(df['Seasonal First Difference'].iloc[13:], lags=40, ax=ax1)
# # Modelling with ARIMA
# For non-seasonal data
from statsmodels.tsa.arima_model import ARIMA
# +
# I recommend you glance over this!
#
help(ARIMA)
# -
# p: jumlah lag<br>
# d: jumlah tahap differencing<br>
# q: Nilai ukuran terhadap Moving Average.
#
# We have seasonal data!
model = sm.tsa.statespace.SARIMAX(df['Milk in pounds per cow'],order=(0,1,0), seasonal_order=(1,1,1,12))
results = model.fit()
results.resid.plot()
results.resid.plot(kind='kde')
# ## Prediction of Future Values
#
#
df['forecast'] = results.predict(start = 150, end= 168, dynamic= True)
df[['Milk in pounds per cow','forecast']].plot(figsize=(12,8))
# ### Forecasting
# This requires more time periods, so let's create them with pandas onto our original dataframe!
df.tail()
from pandas.tseries.offsets import DateOffset
future_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,24) ]
future_dates
future_dates_df = pd.DataFrame(index=future_dates[1:],columns=df.columns)
future_df = pd.concat([df,future_dates_df])
future_df.head()
future_df.tail()
future_df['forecast'] = results.predict(start = 168, end = 188, dynamic= True)
future_df[['Milk in pounds per cow', 'forecast']].plot(figsize=(12, 8))
# # KESIMPULAN
# ARIMA sangat cocok untuk digunakan pada dataset diatas. Namun, banyak dataset yang sifatnya tidak bisa dikenali oleh ARIMA sehingga harus dimodelkan dengan metode yang lain, contohnya dataset yang bersifat finansial.
| 2 Regression/2.4 ARIMA/ARIMA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/likhith-nagaraju-gowda/PYTHON_WORKSHOP/blob/main/my_code_world.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="EnYellex3_Cr"
# hello boss program
# + id="38KYhVjo0uIV" colab={"base_uri": "https://localhost:8080/"} outputId="3ccc42c7-5c3a-4855-ce90-3456bafbbcc5"
print("hello boss")
# + colab={"base_uri": "https://localhost:8080/"} id="QgLycCtF6K74" outputId="816cd503-dc8b-4da9-aa43-dd02081c68f5"
g = "gouri"
print(type(g))
print(type(x))
x=int(.999)
print(x)
x , y , z = "orange",7,int(7.333)
print(x,y,z)
print(y)
print(z)
x = y = z = "sharu cherry"
print(x)
print(y)
print(z)
# + [markdown] id="SlnHEc7nA34J"
# exercise 1 and 2
# + colab={"base_uri": "https://localhost:8080/"} id="NwmTVPbSA7qM" outputId="32e367bf-75ff-4c1c-ecbc-ba966875856e"
carname="<NAME>"
x=50
print(carname,x)
print(type(carname),type(x))
# + [markdown] id="Ihg88vc-CVVu"
# exercise 3
# + colab={"base_uri": "https://localhost:8080/"} id="Bq8l4ohoCYdT" outputId="81735ff9-d9c9-4721-efed-b28aaeadcd41"
x=5
y=10
print(x+y)
# + [markdown] id="9rp55AvHC22i"
# exercise 4
# + colab={"base_uri": "https://localhost:8080/"} id="mPx6VCzlC4oH" outputId="fb66681a-1592-4a0d-fec2-760cbdde6ad8"
x , y = 11,17
z=x+y
print(z)
# + [markdown] id="zLgTfXKvPFXI"
# **DATATYPES** **EXAMPLE**
# + id="HkaCFHPrPF39"
x1 = "Hello World"
x2 = 10
x3=10.1
x4=["apple","banana","cherry"]
x5=("apple","banana","cherry")
x6=range(6)
x7={"Name" : "Likhith", "Age" : 36}
x8={"Apple", "Banana", "cherry"}
x9=True
# + colab={"base_uri": "https://localhost:8080/"} id="vwHaghJNRWQk" outputId="e7c73640-1b65-4792-ce2a-206aaa9b7231"
print(type(x1),type(x2),type(x3),type(x4),type(x5),type(x6),type(x7),type(x8),type(x9))
print(x1,x2,x3,x4,x5,x6,x7,x8,x9)
x=3+5j
print(x,type(x))
print(len(x1))
# + colab={"base_uri": "https://localhost:8080/"} id="Ri2AMpi9WrJW" outputId="fc5fcd9c-108f-4325-efcf-e8f8cf10d462"
x={1,2,3,2}
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="jIsF1xISYHvb" outputId="ffa82457-6019-46c7-f89b-081176f45e68"
x=1
y=2.8
z=1j
a=float(x)
b=int(y)
c=complex(y,x)
print(a,b,c)
# + [markdown] id="khBmubW6cQqq"
# **assignment** **1**
# + colab={"base_uri": "https://localhost:8080/"} id="TDzCWTwhaaKF" outputId="3665f3f0-78a8-4fb5-a5af-576f1292908f"
x,y = 2,3
a=str(x)
b=str(y)
c=a+b
print("x + y =",c)
# + [markdown] id="-QqOnsgtdUT5"
# **STRINGS**
# + colab={"base_uri": "https://localhost:8080/"} id="V2NKjPgjdYF3" outputId="c19f6a27-b798-4436-d29c-1ca23ed00dfe"
alvas="life in alvas is hell"
print("heaven" in alvas)
# + [markdown] id="Q8Qr7oVvgCyn"
# #**slicing**
# + colab={"base_uri": "https://localhost:8080/"} id="_0se8GadgIcC" outputId="2f50baee-d81e-4d11-b104-8119193c166b"
txt="life is costlier than u think"
print(txt[0:4])
print(txt[ :8])
print(txt[8:])
print(txt[-5:])
print(txt.upper())
# + [markdown] id="LW-Cz5gQi7Yg"
# #**format in python**
# + colab={"base_uri": "https://localhost:8080/"} id="lZj9b-Qli_i8" outputId="03a8c06e-ced8-4090-ed1f-496bbef2c7e5"
age = 20000
txt="my name is <NAME>, and i am {}"
print(txt.format(age))
# + [markdown] id="UFC15wTwkf3Z"
# inbuilt string functions
# + colab={"base_uri": "https://localhost:8080/"} id="aIhajbNukq1l" outputId="acb373d4-4f6d-40e7-a303-9e8a720a187a"
txt="LIKHITH"
print(txt.capitalize())
print(txt.upper())
print(txt.isupper())
print(txt.isalpha())
print(txt.isdigit())
# + [markdown] id="fAkNQ1cC-D-B"
# #**ASSIGNMENT**#
# + colab={"base_uri": "https://localhost:8080/"} id="FzLHx4Bl-LEf" outputId="749626c7-5d73-44ef-f15c-33a1d747edb2"
x=int(input("INPUT THE VALUE FOR X"))
y=int(input("INPUT THE VALUE FOR Y"))
print(x+y)
print(x-y)
print(x*y)
print(x/y)
print(x%y)
# + [markdown] id="daIEySlpET2o"
# #**PYTHON LIST**
# + colab={"base_uri": "https://localhost:8080/"} id="_mGZjAkEEZL5" outputId="0d623a55-73d9-431c-daa9-568031e1b915"
thislist= list(("abhishek", "vishak", "jayraj"))
print(thislist)
print(len(thislist))
| my_code_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from urllib.request import urlopen
html = urlopen('http://pythonscraping.com/pages/page1.html')
print(html.read())
# +
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen('http://www.pythonscraping.com/pages/page1.html')
bs = BeautifulSoup(html.read(), 'html.parser')
print(bs.h1)
# -
| basic_spider/simple_scrapy.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # UAF2 (usage example)
# ------------------------------
# ## Importing data and preprocessing
# Set the variable `ENV["PHAGEDATAPATH"]` to the directory where you want the downloaded data to be placed
ENV["PHAGEDATAPATH"]="/home/matteo/poliTo/Tesi/Data"
using PhageData, PhageFields
using Pkg
Pkg.activate("/home/matteo/.julia/dev/UAF2")
using UAF2
# Import one among the possible dataset (Boyer, Fowler, Olson, Wu, Araya) with and easy sintax (`:boyer, :fowler, :olson, :wu, :araya`)
rawdata = import_dataset(:boyer);
# filter out sequences that disappear and the reappear since they cannot be described by the model
data = subdata(rawdata, not_disappearing(rawdata));
# add pseudocounts using `add_pseudocounts`
data = add_pseudocounts(data, 0.5);
# filter out sequences for which counts go below `count_thr` during rounds in `rounds`
count_thr = 3
rounds = [1, 2] #filter out low counts sequences only in the first two rounds
filter_counts(data, rounds, count_thr);
# ## Defining the model
# store within the struct `Model` the model parameters using the model defined by `PhageFields` and the amplification factors $\lambda(t)$ by indicating the number of rounds of the experiment
epistasis = EpistasisMu{data.A, data.L}()
model = Model(epistasis, data.T)
# ## Learning with single train-test split
# create train and test sets using `randtrain` from `PhageData`
train_percentage = 0.8
data_train, data_test = randtrain(data, round(Int, train_percentage*data.S));
# define some function that act as getters during the training for some quantity defined on model parameters
get_lambda(m ,p ,d) = m.λ #gets λ(t) during the training
get_mu(m, p, d) = [m.x[end]]; #gets the chemical potential μ during the training
# set some parameters for the learning ($\Delta \lambda$ is the minimum distance that $\lambda(t)$ must keep from the border during the optimization)
prior = set_prior(100.0, model) #sets the gaussian prior to 0 mean and 100.0 as variance
starting_point!(model, data_train) #sets a valid starting point in the parameters space
opt = set_optimizer(:LD_MMA, model, data_train; ftol_rel = 1e-7, Δλ = 1e-10) #sets the optimizer
# perform the learning specifying in `monitor` a vector of getter methods of the quantities to monitor during the learning (note that the log-likelihood is always returned `logP`)
model, ret, logP, properties = learn!(model, prior, opt, data_train; monitor = [get_lambda, get_mu]);
properties
# ## Plot some results
using PyPlot
# loglikelihood
plot(logP);
# $\lambda(1)$
λ1 = map(x -> x[1], properties[1]);
plot(λ1);
# $\mu$
μ = map(x->x[1], properties[2])
plot(μ);
# probability distribution:
# - use the learnt model to infer the energies and probabilities of the sequences using the function `inference`
# - plot the result
etr, ptr = inference(model, data_train); #infer energies and binding probabilities on training set
plot_distribution(etr, ptr); #plot the probability distribution
# ## Crossvalidation
# crossvalidation can be performed in a single step by specifying all the needed parameters. Create some empty arrays to stores all the information produced during the various leanring.
# - `training_set`: actual training sets and indices of the samples from the whole dataset
# - `test_set`: same as above but for the test sets
# - `model_parameters`: model inferred at each time a different fold is left out
# - `L2reg`: priors used
# - `logP`: behaviour of the loglikelihood at each learning
# - `obs`: array containing all the quantities that are kept under control during the vriuos learnings. For instance `learn!` is run with `monitor=[quantity1, quantity2, ...]` and `obs[r][k]` is the value of quantity `k` when fold `r` is left out from the training set
training_set = []
test_set = []
model_parameters = []
outcomes = []
L2reg = []
logP = []
obs = []
prior_variance = 100.0
nfolds = 5
crossvalidation!(data, model, prior_variance, nfolds, training_set, test_set, model_parameters, outcomes, L2reg,
logP, obs; monitor=[get_lambda, get_mu]);
# ## Plot some results
(etr, ptr), (ete, pte) = traintest_inferenceenceenceenceence(model_parameters, data_tain, data_test)
| Examples/UAF2_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div>
# <a href="https://www.audiolabs-erlangen.de/fau/professor/mueller"><img src="data_layout/PCP_Teaser.png" width=100% style="float: right;" alt="PCP Teaser"></a>
# </div>
# # Unit 2: Python Basics
#
# <ul>
# <li><a href='#learn'>Overview and Learning Objectives</a></li>
# <li><a href='#basics'>Basic Facts</a></li>
# <li><a href='#variables'>Variables and Basic Operators</a></li>
# <li><a href='#lists'>Lists and Tuples</a></li>
# <li><a href='#bool'>Boolean Values</a></li>
# <li><a href='#sets'> Sets</a></li>
# <li><a href='#dict'>Dictionaries</a></li>
# <li><a href='#type'>Python Type Conversion</a></li>
# <li><a href='#exercise_list'>Exercise 1: Basic List Manipulations</a></li>
# <li><a href='#exercise_dict'>Exercise 2: Basic Dictionary Manipulations</a></li>
# </ul>
# <a id='learn'></a>
# <div class="alert alert-block alert-warning">
# <h2>Overview and Learning Objectives</h2>
#
# This unit introduces basic concepts used in Python programming while we assume that you are familiar with general programming and have some experience with other programming languages such as MATLAB, C/C++, or Java. We start with basic Python variables and operators and then introduce compound data types such as lists, tuples, sets, and dictionaries. It is important that you understand the conceptual difference between these constructs and the Python syntax to encode them. Using variables and data types in Python can differ significantly from the other programming languages, often resulting in seemingly surprising programming errors. Therefore, we will briefly discuss how to convert data types in Python. Furthermore, we point out the difference between deep and shallow copies, a delicate and important topic that is a common cause of errors, especially when passing variables to functions. As said, we only touch on these topics by giving concrete examples. To get familiar with the Python syntax, we recommend that you do the small exercises on list (<a href='#exercise_list'>Exercise 1</a>) and dictionary manipulations (<a href='#exercise_dict'>Exercise 2</a>). For more comprehensive tutorials, we refer to the following sources:
# <ul>
# <li> <a href="https://docs.python.org/3/tutorial/index.html">The Python Tutorial</a> introduces basic concepts and features of the Python language and system.
# </li>
# <li> The <a href="https://scipy-lectures.org/">Scipy Lecture Notes</a> introduce the scientific Python ecosystem including libraries such as NumPy, Scipy, and Matplotlib.
# </li>
# <li> The <a href="https://www.audiolabs-erlangen.de/FMP">FMP Nootbooks</a> are a collection of Python notebooks on <a href="http://www.music-processing.de/">Fundamentals of Music Processing</a> (FMP).
# </li>
# </ul>
#
# </div>
# <a id='basics'></a>
# ## Basic Facts
#
# * Python is an interpreted (not compiled), open-source, multi-platform programming language.
# * There exist several modules for scientific computing (e.g. `numpy`, `scipy`, `matplotlib`, `librosa`) in Python.
# * Python uses indentation (and not brackets or `end`-commands) to separate blocks of code.
# * Comment lines start with the character `#`.
# * Useful functions for help:
# * Invoke the built-in help system: `help()`.
# * List objects in namespace: `dir()`.
# * Show global and local variables: `globals()`, `locals()`.
# <a id='variables'></a>
# ## Variables and Basic Operators
#
# Let us start with some basic facts on Python variables:
# * Variables do not need to be declared; neither their type.
# * Variables are created automatically when they are first assigned.
# * A variable name may contain letters (`a`, `b`, ..., `Y`, `Z`) and the underscore (`_`).
# * Variable names are **case sensitive**.
# * All but the first character can also be positive integer numbers.
# * Usually, one uses lower case letters and underscores to separate words.
#
# A string is given in single ticks (`'`) or double ticks (`"`). If there is no other reason, we recommend single ticks. The following code assigns a string to a variable and prints it using the `print`-command. Furthermore, some basic string formatting is applied.
string_variable = 'Welcome to the Python Tutorial'
print(string_variable)
print('This is an integer: %d.' % 17)
print('This is string 1: %s and this is string 2: %6s.' % ('ABCD', '1234'))
print('This is a floating point number: %06.3f.' % 3.14159265359)
# Some basic math:
n = 3
print('n + 1 =', n + 1)
print('n - 1 =', n - 1)
print('n * 2 =', n * 2)
print('n / 2 =', n / 2)
print('n ^ 2 =', n ** 2)
# Division always results in a floating-point number, even if the number is divisible without remainder. (Note that there are differences between **Python 2** and **Python 3** in using `/`). If the result should be an integer (e.g., when using it as an index), one may use the `//` operator. The `%` yields the remainder.
n = 8
print('Normal division:', n / 2)
print('Integer division:', n // 2)
print('Normal division:', n / 5)
print('Integer division:', n // 5)
print('Remainder of integer division:', n % 5)
# For re-assigning a variable, one may use the following conventions:
# +
n = 7
n += 11
print('Addition:', n)
n *= 2
print('Multiplication:', n)
n /= 18
print('Division:', n)
n **= 0.5
print('Exponentiation:', n)
# -
# <a id='lists'></a>
# ## Lists and Tuples
#
# The basic compound data types in Python are **lists** and **tuples**. A list is enclosed in square brackets and a tuple is enclosed in round brackets. Both are indexed with square brackets (with indexing starting with $0$). The `len` function gives the length of a tuple or a list.
# +
var_lis = ['I', 'am', 'a', 'list']
var_tup = ('I', 'am', 'a', 'tuple')
print(var_lis)
print(var_tup)
print(var_lis[0], var_tup[1], 'generated from',
var_tup[2], var_tup[3], 'and', var_lis[2], var_lis[3])
print(len(var_tup))
print(len(var_lis))
print(type(var_lis))
print(type(var_tup))
# -
# What is the difference between a list and a tuple? Tuples are **immutable** objects (i.e., their state cannot be modified after they are created) and a bit more efficient. Lists are more flexible. Here are some examples for list operations:
# +
var_list = [1, 2, 3]
print('Print list:', var_list)
var_list[0] = -1
print('Alternate item:', var_list)
var_list.append(10)
print('Append item:', var_list)
var_list = var_list + ['a', '12', [13, 14]]
print('Concatenate two lists:', var_list)
print('Last element of list: ', var_list[-1])
print('Remove an item by index and get its value:', var_list.pop(2))
var_list.remove([13, 14])
print('Remove an item by value:', var_list)
del(var_list[2:4])
print('Remove items by slice of indices:', var_list)
# -
# One can index a list with start, stop, and step values (`[start:end:step]`). Note that, in Python, the last index value is `end-1`. Negative indices are possible with `-1` referring to the last index. When not specified, `start` refers to the first item, `end` to the last item, and `step` is set to $1$.
var_list = [11, 12, 13, 14, 15]
print('var_list =', var_list)
print('var_list[0:3] =', var_list[0:3])
print('var_list[1:3] =', var_list[1:3])
print('var_list[-1] =', var_list[-1])
print('var_list[0:4:2] =', var_list[0:4:2])
print('var_list[0::2] =', var_list[0::2])
print('var_list[::-1] =', var_list[::-1])
# The following examples shows how the elements of a list or tuple can be assigned to variables (called **unpacking**):
# +
var_list = [1, 2]
[a, b] = var_list
print(a, b)
var_tup = (3, 4)
[c, d] = var_tup
print(c, d)
# -
# Leaving out brackets, tuples are generated.
t = 1, 2
a, b = t
print(t)
print(a, b)
# The `range`-function can be used to specify a tuple or list of integers (without actually generating these numbers). A range can then be converted into a tuple or list.
print(range(9))
print(range(1, 9, 2))
print(list(range(9)))
print(tuple(range(1, 9, 2)))
print(list(range(9, 1, -1)))
# <a id='bool'></a>
# ## Boolean Values
#
# Boolean values in Python are `True` and `False`. Here are some examples for basic comparisons:
a = 1
b = 2
print(a < b)
print(a <= b)
print(a == b)
print(a != b)
# The `bool` function converts an arbitrary value into a boolean value. Here, are some examples:
print(bool('a'))
print(bool(''))
print(bool(1))
print(bool(0))
print(bool(0.0))
print(bool([]))
print(bool([4, 'hello', 1]))
# <a id='sets'></a>
# ## Sets
#
# There are also other data types in Python, which we want to mention here. In the following, we introduce **sets**, which are unordered collections of unique elements. Furthermore, we apply some basic set operations.
s = {4, 2, 1, 2, 5, 2}
print('Print the set s:', s)
print('Union of sets:', {1, 2, 3} | {2, 3, 4})
print('Intersection of sets:', {1, 2, 3} & {2, 3, 4})
s.add(7)
print('Adding an element:', s)
s.remove(2)
print('Removing an element:', s)
# <a id='dict'></a>
# ## Dictionaries
#
# Another convenient data type are **dictionaries**, which are indexed by **keys** (rather than by a range of numbers as is the case for lists or arrays). The following code cell gives an example and introduces some basic operations.
dic = {'a': 1 , 'b': 2, 3: 'hello'}
print('Print the dictionary dic:', dic)
print('Print the keys of dic:', list(dic.keys()))
print('Access the dictionary via a key:', dic['b'])
print('Print the values of the dictionary:', list(dic.values()))
# <a id='type'></a>
# ## Python Type Conversion
#
# Python offers many different numerical types and methods for type conversion. The standard numeric types in Python are `int`, `float`, and `complex`. The function `type()`can be used to identify the type of a variable. One can use the methods `int()`, `float()`, and `complex()` to convert from one type to another. This is demonstrated in the following code cell.
#
#
# <div class="alert alert-block alert-warning">
# <strong>Note:</strong><br>
# <ul>
# <li>Type conversions from <code>float</code> to <code>int</code> may result in some rounding.</li>
# <li>One cannot directly convert from the type <code>complex</code> to <code>int</code> or <code>float</code>.</li>
# </ul>
# </div>
a = 1
print('a =', a, type(a))
b = float(a)
print('b =', b, type(b))
c = 2.2
print('c =' , c, type(c))
d = int(c)
print('d =', d, type(d))
e = complex(d)
print('e =', e, type(e))
f = complex(a, c)
print('f =', f, type(f))
# ## Shallow and Deep Copy Operations
#
# Dealing with objects such as lists, tuples, dictionaries, or sets can be trickier as one may think. In particular, using the assignment operator `=` may only create a **pointer** from variable to an object. As a result, two variables may point to the same object, which may lead to unexpected modifications. This effect is illustrated by the following example.
a = [1, 2, 3]
print('a = ', a, ', id(a) = ', id(a))
b = a
b[0] = 0
b.append(4)
print('b = ', b, ', id(b) = ', id(b))
print('a = ', a, ', id(a) = ', id(a))
# This example shows that the assignment `b = a` does not create a copy of the list. The variables `a` and `b` point to the same object, which identifier (unique integer) is revealed by the function `id()`. To create copies of an object, one can use the python module <a href="https://docs.python.org/3.1/library/copy.html">`copy`</a>, which is imported by the statement `import copy`. There are different types of copies called **shallow copy** and **deep copy**, which become important when dealing with **compound objects** (objects which elements are again objects such as lists of lists).
#
# * A **shallow copy** creates a new compound object. However, if the entries are again compound objects, only links are created.
# * A **deep copy** creates a new compound object, where new objects are created for all entries (and recursively for their entries).
#
# The two cases are illustrated by the subsequent example.
# +
import copy
a = [[1, 2, 3], [4, 5, 6]]
b = copy.copy(a)
b[0] = 0
print('a = ', a)
print('b = ', b)
b[1][0] = 0
print('a = ', a)
print('b = ', b)
a = [[1, 2, 3], [4, 5, 6]]
c = copy.deepcopy(a)
c[1][0] = 0
print('a = ', a)
print('c = ', c)
# -
# ## Exercises and Results
import libpcp.python
show_result = True
# <a id='exercise_list'></a>
# <div class="alert alert-block alert-info">
# <strong>Exercise 1: Basic List Manipulations</strong><br>
# <ul>
# <li>In the following, we assume that a student is specified by a list containing a student ID (integer), last name (string), and first name (string). Create a list of students, which contains the following entries: <code>[123, 'Meier', 'Sebastian']</code>, <code>[456, 'Smith', 'Walter']</code>. (Note that this becomes a list of lists.)</li>
# <li>Add to this list the student <code>[789, 'Wang', 'Ming']</code>. </li>
# <li>Print out the student list in reversed order (descending student IDs).</li>
# <li>Print out only the first name of the second student in this list.</li>
# <li>Print the length of the list (using the <code>len</code> function).</li>
# <li>Make a deep copy of the list and then remove the first and second student from the copied list (using the <code>del</code> statement). Furthermore, change the student ID of the remaining student from <code>789</code> to <code>777</code>. Check if the original list has been modified.</li>
# </ul>
# </div>
# +
#<solution>
# Your Solution
#</solution>
# -
libpcp.python.exercise_list(show_result=show_result)
# <a id='exercise_dict'></a>
# <div class="alert alert-block alert-info">
# <strong>Exercise 2: Basic Dictionary Manipulations</strong><br>
# <ul>
# <li>Again, we assume that a student is specified by student ID (integer), last name (string), and first name (string). Create a dictionary, where a key corresponds to the student ID and a value to a list of the last name and first name. Start with a dictionary with key <code>123</code> and value <code>['Meier', 'Sebastian']</code> as well as key <code>456</code> and value <code>['Smith', 'Walter']</code>.</li>
# <li>Add the student with key <code>789</code> and value <code>['Wang', 'Ming']</code>.</li>
# <li>Print out a list of all keys of the dictionary.</li>
# <li>Print out a list of all values of the dictionary.</li>
# <li>Print out the last name of the student with key <code>456</code>.</li>
# <li>Remove the student with key <code>456</code> (using the <code>del</code> statement).</li>
# <li>Print the length of the dictionary (using the <code>len</code> function).</li>
# </ul>
# </div>
# +
#<solution>
# Your Solution
#</solution>
# -
libpcp.python.exercise_dict(show_result=show_result)
# <div>
# <a href="https://opensource.org/licenses/MIT"><img src="data_layout/PCP_License.png" width=100% style="float: right;" alt="PCP License"></a>
# </div>
| PCP_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="igkJBjlUfQq8"
import numpy as np
from abc import ABC, abstractmethod
# + id="cBHzfBrecR4h"
def validate_node(func):
def wrapper(node):
try:
if node.size:
return func(node)
else:
return 0
except AttributeError:
raise
return wrapper
class Criterion:
@staticmethod
@validate_node
def gini(node: np.ndarray):
_, counts = np.unique(node, return_counts=True)
return 1- np.sum( (counts/np.sum(counts)**2 )) # numpy vectors
@staticmethod
@validate_node
def entropy(node: np.ndarray):
_, counts = np.unique(node, return_counts=True)
n_instances=np.sum(counts)
log=np.log2(counts/n_instances)
return -np.sum( counts/n_instances* np.nan_to_num(log,neginf=0))
@staticmethod
@validate_node
def RMSE(node: np.ndarray):
return np.sqrt( Criterion.MSE(node) )
@staticmethod
@validate_node
def MSE(node: np.ndarray):
y_mean=np.mean(node)
return np.sum( (node- y_mean)**2 )
# + id="gc9If-VUZlQQ"
import numpy as np
CRITERIA_CLASSIFIER={'gini':Criterion.gini,'entropy':Criterion.entropy}
CRITERIA_REGRESSOR={'RMSE':Criterion.RMSE,'MSE':Criterion.MSE}
class DecisionTreeBase:
def __init__(self,max_depth: int=np.iinfo(np.int32).max) ->None:
#'min_samples_split':min_samples_split, 'min_samples_leaf':min_samples_leaf
params={ 'max_depth':max_depth}
final_params=self.validate_params( params )
self.max_depth=final_params['max_depth']
#self.min_samples_split=final_params['min_samples_split']
#self.min_samples_leaf=final_params['min_samples_leaf']
self.task_type=None
self.criterion=None
self.is_fitted=False
def set_criterion(self,criterion):
if self.task_type =='regression':
criterion= CRITERIA_REGRESSOR.get(criterion,None)
if criterion:
self.criterion =criterion
#print('Criterion set for regression', self.criterion)
else:
print('Invalid criterion chosen for regression, criterion set to default MSE')
self.criterion=Criterion.MSE
elif self.task_type=='classification':
criterion= CRITERIA_CLASSIFIER.get(criterion,None)
if criterion:
self.criterion =criterion
else:
print('Invalid criterion chosen for classification, criterion set to default gini')
self.criterion=Criterion.gini
else:
raise ValueError('Invalid task type chosen, make sure to use derived class')
def check_param_value(self,param):
if isinstance(param,(int,np.integer)):
if param>=0:
return True
else:
print('Invalid parameter value, check if it is >= 0, parameter set to maximum value ')
else:
print('Invalid parameter type, check if it is int or numpy.integer, parameter set to maximum value ')
return False
def validate_params(self,params: dict):
final_params={}
for param,value in params.items():
if self.check_param_value(value):
final_params[param]=value
else:
final_params[param]=np.iinfo(np.int32).max
return final_params
def fit( self, X: np.ndarray, y: np.ndarray, check_input = True) -> None:
if check_input:
try:
X=self.check_input(X,'X')
y=self.check_input(y,'y')
except ValueError:
raise
if self.task_type=='regression':
#print(f'Fit in decisiontree started, criterion: {self.criterion}')
self.node= NodeRegressor( X, y,self.max_depth,0,self.criterion)#, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf
self.is_fitted=True
elif self.task_type=='classification':
possible_classes=np.unique(y)
self.node= NodeClassifier( X, y, possible_classes, self.max_depth,0,self.criterion ) # building tree
self.is_fitted=True
else:
raise ValueError('Invalid task type chosen, make sure to use derived class')
def predict( self, X:np.ndarray ) -> np.ndarray:
X=self.check_input(X,'X to predict')
return np.array([ self.node.predict(x) for x in X]).reshape(-1,1)
def printTree( self ) -> None:
self.node.print()
def check_input( self, X , input_name):
if isinstance(X,(list,)):
return np.array(X).reshape(-1,1)
elif isinstance(X,(np.ndarray,)):
shape=X.shape
try:
n_instances, n_features= shape
except ValueError:
n_instances, = shape
n_features=None
if n_features == None:
X=X.reshape((-1,1))
return X
else:
raise ValueError(f'Invalid {input_name}, please check if it is a list or a numpy.ndarray')
def is_classifier(self):
return getattr(self,'task_type', None) =='classication'
def is_regressor(self):
return getattr(self,'task_type',None) == 'regression'
# + id="lBy8EFoNamFK"
class DecisionTreeClassifier(DecisionTreeBase):
def __init__(self, criterion='gini',max_depth: int=np.iinfo(np.int32).max) ->None:
super().__init__( max_depth=max_depth)
self.task_type='classification'
super().set_criterion(criterion=criterion)
class DecisionTreeRegressor(DecisionTreeBase):
def __init__(self, criterion='MSE',max_depth: int=np.iinfo(np.int32).max) ->None:
super().__init__( max_depth=max_depth)
self.task_type='regression'
super().set_criterion(criterion=criterion)
# + id="rvKwUItsy-VA"
class NodeBase:
def __init__(self,X,y,criterion,max_depth=np.inf,depth=0):
self.X=X
self.y=y
self.children=None
self.final_value=None
self.is_leaf=False
self.max_depth=max_depth
self.depth=depth
self.criterion=criterion
self.cost=self.criterion(y)
def print(self) -> None:
'''Implemented in derived classes'''
pass
def build(self):
shape=self.X.shape
left_indices=None
min_error_feature=None
min_error_feature_value=None
min_cost=self.cost
try:
n_instances, n_features= shape
except ValueError:
n_instances, =shape
n_features=1
for k in range(n_features): # check in every feature
X_k=self.X[:,k] # feature values
X_k_unique=np.unique(X_k) # unique values
for instance in X_k_unique:
left_indices_current=np.where(X_k < instance ) # indexes of instances <= current instance - left indexes,
mask = np.zeros(len(self.y),dtype=bool)
mask[left_indices_current] = True
# split y into possible children
left=self.y[mask]
right=self.y[~mask]
curr_cost=len(left)/len(self.y)*self.criterion(left ) + len(right)/len(self.y)*self.criterion(right)
if curr_cost < min_cost: # smaller impurity
min_cost=curr_cost
left_indices=left_indices_current
min_error_feature=k
min_error_feature_value=instance
#after all features and their values have been checked
if min_cost<self.cost: # if split reduces cost
self.min_error_feature=min_error_feature
self.min_error_value=min_error_feature_value
mask = np.zeros(len(self.y),dtype=bool)
mask[left_indices] = True
self.is_leaf=False
if self.task_type=='regression':
self.children=( NodeRegressor(self.X[mask,:] ,self.y[mask],self.max_depth,self.depth+1,self.criterion ), NodeRegressor(self.X[~mask,:] ,self.y[~mask],self.max_depth,self.depth+1,self.criterion ) )
else:
self.children=( NodeClassifier(self.X[mask,:] ,self.y[mask],self.possible_classes,self.max_depth,self.depth+1,self.criterion ), NodeClassifier(self.X[~mask,:] ,self.y[~mask],self.possible_classes,self.max_depth,self.depth+1,self.criterion ) )
else: # no split, impurity wont be reduced
self.is_leaf=True
def predict(self,X):
if self.is_leaf:
return self.final_value
else:
# if self.validate_input(X):
if X[self.min_error_feature] < self.min_error_value:
return self.children[0].predict(X)
else:
return self.children[1].predict(X)
class NodeClassifier(NodeBase):
def __init__(self,X,y,possible_classes,max_depth,depth,criterion):
super().__init__(X,y,criterion,max_depth,depth)
self.possible_classes=possible_classes
self.task_type='classification' # needed for parent class
classes, counts = np.unique(y, return_counts=True) # classes in node and their counts
self.final_value=classes[np.argmax(counts)]
value=np.zeros(len(possible_classes) )
value[classes]=counts
self.value=value
if self.depth < max_depth:
self.build()
else:
self.is_leaf=True
def print(self) -> None:
print(f'Possible classes:{self.possible_classes}')
print(f'Value:{self.value}')
print(f'Samples: {len(self.X)}')
print(f'Impurity:{self.cost}')
print(f'Depth: {self.depth}')
print(f'Class:{self.final_value}')
if self.is_leaf:
print("is Leaf")
print('\n\n')
if self.children is not None:
self.children[0].print()
self.children[1].print()
return
class NodeRegressor(NodeBase):
def __init__(self,X,y,max_depth,depth,criterion):
super().__init__(X,y,criterion,max_depth,depth)
self.task_type='regression'
self.final_value=np.mean(y)
if self.depth < max_depth:
self.build()
else:
self.is_leaf=True
def print(self) -> None:
print(f'Value:{self.final_value}')
print(f'Samples: {len(self.X)}')
print(f'Cost:{self.cost}')
print(f'Depth: {self.depth}')
if self.is_leaf:
print("is Leaf")
print('\n\n')
if self.children is not None:
self.children[0].print()
self.children[1].print()
return
# + [markdown] id="CQFzkukz90ag"
# ### Classification example
# + id="vdfEy-fc9jmz"
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris=datasets.load_iris()
X = iris["data"][:, 2:]
y=iris.target
y=y.reshape((-1,1))
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
# + id="XGfHLZnWA-wn"
tree_clf=DecisionTreeClassifier(max_depth=15)
tree_clf.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 466, "status": "ok", "timestamp": 1628179388894, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="yKqoa6aBG-PV" outputId="f0e0aee2-4513-4b40-f508-e0329d8d4cd1"
tree_clf.printTree()
# + id="oIPr7Vh111KH"
y_pred=tree_clf.predict(X_val)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 22, "status": "ok", "timestamp": 1628179388896, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="enZ1wo-A21Dc" outputId="b2b3e03f-6750-46e8-a7fe-4799aa5a5fdb"
accuracy_score(y_pred,y_val)
# + [markdown] id="Ff2JgGijpw-M"
# ### Regression example
# + id="g9lFKzYVpzsD"
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
np.random.seed(42)
m = 200
X = np.random.rand(m, 1)
y = 4 * (X - 0.5) ** 2
y = y + np.random.randn(m, 1) / 10
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
# + id="eoWZhvI7scPb"
tree_reg1=DecisionTreeRegressor(max_depth=2)
tree_reg1.fit(X_train,y_train)
tree_reg2=DecisionTreeRegressor(max_depth=3)
tree_reg2.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1628179460548, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="90xr0cPSwEq-" outputId="4aff581b-97c8-402c-9575-d7b1948783d4"
y_pred1=tree_reg1.predict(X_val)
mean_squared_error(y_val,y_pred1)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1628179460549, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="XL3_o98W-pp7" outputId="306b8a4b-dd33-4528-811d-c7973202bef0"
y_pred2=tree_reg2.predict(X_val)
mean_squared_error(y_val,y_pred2)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1628179460551, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="uT-FdZ3ewNHt" outputId="ffeab8ee-f50e-4928-84c9-fb8abb4f26d1"
y.mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 471} executionInfo={"elapsed": 547, "status": "ok", "timestamp": 1628179461089, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="wuIWj6KrrojE" outputId="d791621e-b88f-4ef4-8d2f-5be44ca2dd07"
import matplotlib.pyplot as plt
def plot_regression(regressor, X,y,max_depth=None,y_label=None):
X_temp=np.linspace(X.min(), X.max(),num=len(X)*2)
y_temp=regressor.predict(X_temp)
plt.plot(X,y,'b.')
plt.plot(X_temp,y_temp,'r', linewidth=3)
plt.xlabel("X",fontsize=20)
if y_label:
plt.ylabel(y_label,fontsize=20,rotation=0)
if max_depth:
plt.title(f'Max depth: {max_depth}',fontsize=20)
fig,axes=plt.subplots(ncols=2,sharey=True, figsize=(16,7) )
plt.sca(axes[0])
plot_regression(tree_reg1,X,y,2,'y')
plt.sca(axes[1])
plot_regression(tree_reg2,X,y,3)
# + colab={"base_uri": "https://localhost:8080/", "height": 417} executionInfo={"elapsed": 808, "status": "ok", "timestamp": 1628179464321, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08286890153310590408"}, "user_tz": -120} id="3C4aqhLk7U7E" outputId="2cbc1d17-33f9-4483-bcf2-6111463490ce"
tree_reg3=DecisionTreeRegressor()
tree_reg3.fit(X_train,y_train)
plt.figure(figsize=(8,6))
plot_regression(tree_reg3,X,y,'None','y')
| decision_tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # TODO
# - Check if the series needs / benefits from a BoxCox transform
library(forecast)
loadData <- function(dataFolder) {
files <- list.files(dataFolder)
data <- list()
for(file in files) {
df <- read.csv(paste0(dataFolder, "/", file), stringsAsFactors=F)
minYear <- min(df$Year)
complaintType <- substr(file,1,(nchar(file))-4)
tsObject <- ts(df$Complaints, start=c(minYear, 1), frequency = 12)
data[[complaintType]] <- tsObject
}
data
}
data <- loadData("../../data/topNComplaints")
series <- data[["Non Burning of Street Lights"]]
series
tsdisplay(series)
# data before 2012 are too few to consider
train_start <- c(2012,4)
series <- window(series, start=train_start, end=c(2016, 6))
tsdisplay(series)
# ## Cleaning up data
#
# Although this data looks like it doesn't have any outliers, let's take a look at where the potential extreme values are
plot(series, col="red", lty=2)
lines(tsclean(series), lty=1)
legend("topright", col=c("red", "black"), lty=c(2,1), legend=c("Original", "Cleaned"))
series.cleaned <- tsclean(series)
# Taking a call here that the data doesn't contain any outliers, so we're leaving the data as it is
#
# ## Decomposition
# +
# first try a static seasonal component
plot(stl(series, s.window="periodic"))
#Comparing the plot with the decomposition of training data alone
plot(stl(window(series,end = c(2015,6)) , s.window="periodic"))
#From the plot, it seems that there are some minor and intricate seasonality differences between the
#training and overall data set
# -
# The trend component is the most significant here, so the series probably needs some differencing. Strangely, there is also a seasonal component. Let's take varying s.window to see if changes over time.
old.par <- par(mfrow=c(2, 2), mar=c(3,3,3,3))
plot(stl(series, s.window=3)$time.series[, 1], main="Seasonal Component with s.window = 3")
plot(stl(series, s.window=6)$time.series[, 1], main="Seasonal Component with s.window = 6")
plot(stl(series, s.window=10)$time.series[, 1], main="Seasonal Component with s.window = 10")
plot(stl(series, s.window=12)$time.series[, 1], main="Seasonal Component with s.window = 12")
par(old.par)
# Looks like the seasonal component is there, but $s.window=3$ suggests that it is not as significant
seasonal <- stl(series, s.window="periodic")$time.series[, 1] # change s.window
plot(seasonal, col="grey")
month <- 11 # change this to month you want
for(i in 2012:2016) {
abline(v=(month-1)/12 + i, lty=2)
}
# **Looks like it peaks in November. **
#
# Let us then do a seasonal adjustment of the data. All further analysis should be done on this data
stl.fit <- stl(series, s.window="periodic")
series.adj <- seasadj(stl.fit)
tsdisplay(series.adj)
stl.cleaned.fit <- stl(series.cleaned, s.window=6)
series.cleaned.adj <- seasadj(stl.cleaned.fit)
tsdisplay(series.cleaned.adj)
# ## Forecasting
# ### ARIMA models - estimating p, d, q
#
# First, let us estimate $d$. This is done by looking at the ACF of the data.
Acf(series.adj)
# the above series is a classic example of a series that requires a diff of order 1,
# so let's try that out and take a look at the Acf to see if it is overdifferenced
series.diff <- diff(series.adj, lag=1, differences = 1)
tsdisplay(series.diff)
# the series looks good!
# let's take a look at the standard deviation as well
sd(series.adj)
sd(series.diff)
# looks good - it has decreased. Since stationary series return to the mean, let's take a look at that as well
plot(series.diff, col="grey")
# a 2x4 MA
lines(ma(ma(series.diff, order=2), order=4))
abline(mean(series.diff), 0, col="blue", lty=2)
# let's verify once wheather d=1
ndiffs(series.adj)
# Next, we need to estimate p and q. To do this, we take a look at the PACF of the data. Note that this analysis is done on the differenced data. If we decide to fit a model with d=0, then we need to perform this analysis for the un-differenced data as well
# for d=0
Pacf(series.adj)
# looks like a AR(1) and a MA(5) process
# take a look at the d=1
Pacf(series.diff)
# +
# this looks like a MA(11) and a AR(4) process
# -
# #### Building candidate models
modelArima <- function(series, order, h, testData = NULL) {
fit <- Arima(series, order=order)
print(summary(fit))
predictions <- forecast(fit, h)
# compute max and min y
min.yvalue <- min(min(series), min(testData))
max.yvalue <- max(max(series), max(testData))
plot(predictions, ylim=c(min.yvalue, max.yvalue))
if(!is.null(testData)) {
lines(testData, col="red", lty=2)
print(accuracy(predictions, testData))
}
# check if residuals looklike white noise
Acf(residuals(fit), main="Residuals")
# portmantaeu test
print(Box.test(residuals(fit), lag=24, fitdf=4, type="Ljung"))
}
# split the series into a test and a train set
series.train <- window(series.adj, end=c(2015, 6))
series.test <- window(series.adj, start=c(2015, 7))
# with d=0, order=(1, 0, 5)
modelArima(series.train, c(1, 0, 5), length(series.test), series.test)
# with d=1, order=(4, 1, 11)
modelArima(series.train, c(4, 1, 11), length(series.test), series.test)
# fiddle with p and q, with d=1
modelArima(series.train, c(5, 1, 11), length(series.test), series.test)
modelArima(series.train, c(4, 1, 12), length(series.test), series.test)
modelArima(series.train, c(4, 1, 10), length(series.test), series.test)
modelArima(series.train, c(3, 1, 11), length(series.test), series.test)
# ## Exponential Smoothing
# +
# series = original data
# series.cleaned = outliers removed
# series.adj = original data, seasonally adjusted
# series.cleaned.adj = cleaned data, seasonally adjusted
# series.train = original seasonally adjusted data's train split
# series.test = original seasonally adjusted data's test split
# series.cleaned.train = cleaned seasonally adjusted data's train split
# series.cleaned.test = cleaned seasonally adjusted data's test split
# stl.fit = original data's stl
# stl.cleaned.fit = cleaned data's stl
# tsdisplay(series.adj)
train_start = c(2012,4)
train_end = c(2015,6)
test_start = c(2015, 7)
test_end = c(2016, 6)
seasonal = stl.fit[[1]][,1]
seasonal_cleaned = stl.cleaned.fit[[1]][,1]
# +
## Function for finding the average of seasonal components
period_stat <- function(ts_data_in, type = 1, start_value, years){
#type 1: sum
#type 2: mean
freq <- frequency(ts_data_in)
len <- length(ts_data_in)
freq_vector <- numeric(0)
freq_sum <- numeric(0)
vec <- numeric(0)
sum_vec <- numeric(0)
start_val <- start(ts_data_in)
ts_data_in <- c(rep(NA,start_val[2] - 1),ts_data_in)
max_limit <- ceiling(len/freq)
for(i in 1:max_limit){
vec <- ts_data_in[(((i-1)*freq)+1):(((i-1)*freq)+freq)]
freq_vector <- as.numeric(!is.na(vec))
vec[is.na(vec)] <- 0
if(i == 1){
sum_vec <- vec
freq_sum <- freq_vector
}else{
sum_vec <- sum_vec + vec
freq_sum <- freq_sum + freq_vector
}
}
final_ts <- numeric(0)
if(type == 1)
{
final_ts <- sum_vec
}else if(type == 2) {
final_ts <- (sum_vec/freq_sum)
} else {
stop("Invalid type")
}
return(ts(rep(final_ts,years),frequency = freq, start = start_value ))
}
# +
#Adjust the negative values in the ts data
min_ts_value <- min(series.adj)
min_ts_cleaned_value <- min(series.cleaned.adj)
bias_value <- (-1*min_ts_value) + 1
bias_value_cleaned <- (-1*min_ts_cleaned_value) + 1
#min(series)
#min(series.cleaned)
#min(series.adj)
#min(series.cleaned.adj)
ES_series <- series.adj + bias_value
ES_series_cleaned <- series.cleaned.adj + bias_value_cleaned
#plot(ES_series)
train_data_adj <- window(ES_series,start = train_start, end=train_end)
test_data_adj <- window(ES_series, start= test_start, end = test_end)
train_data_adj_cleaned <- window(ES_series_cleaned,start = train_start, end = train_end)
test_data_adj_cleaned <- window(ES_series_cleaned, start = test_start, end = test_end)
train_data <- window(series, start = train_start, end = train_end)
test_data <- window(series, start = test_start, end = test_end)
train_data_cleaned <- window(series.cleaned, start = train_start, end = train_end)
test_data_cleaned <- window(series.cleaned, start = test_start, end = test_end)
# +
#Getting the mean value from the seasonal components for the data set and not for the training set alone.
#Need to adjust based on the input from Suchana.
seasonal_mean <- period_stat(seasonal,2,c(2012,1),years = 7)
seasonal_cleaned_mean <- period_stat(seasonal_cleaned,2,c(2012,1),years = 7)
plot(seasonal_mean)
plot(seasonal_cleaned_mean)
# -
#Preprocessing data. Removing 0 from the data
train_data_adj[train_data_adj==0]=0.01
train_data_adj_cleaned[train_data_adj_cleaned==0]=0.01
# ## Finding the best fit for exponential smoothing
all_types = c("ANN","AAN","AAA","ANA","MNN","MAN","MNA","MAA","MMN","MNM","MMM","MAM")
forecast_values = 12
# For eg: AAA -> additive level, additive trend and additive seasonality
# ANN -> No trend or seasonality
# #### Function: For trying out various possible models in Exponential smoothing, and picking the best with MAPE values
fit_function <- function(train_data, test_data)
{
all_fit <- list()
test_models <- list()
print("Fitting various models: ")
for (bool in c(TRUE,FALSE)){
for (model_type in all_types){
if(bool & substr(model_type,2,2)=="N"){
next
}
test_model = ets(train_data, model = model_type,damped = bool)
#Box.test(test_model$residuals, lag = 20, type = "Ljung-Box")$p.value
all_fit[[paste0("ETS Model: ",model_type,", Damped: ",bool)]][1] <-
accuracy(f = forecast.ets(test_model,h=forecast_values)$mean,x = test_data)[5]
all_fit[[paste0("ETS Model: ",model_type,", Damped: ",bool)]][2] <-
100*(Box.test(test_model$residuals, lag = 20, type = "Ljung-Box")$p.value)
test_models[[paste0("ETS Model: ",model_type,", Damped: ",bool)]] <- test_model
print(test_model$method)
print(accuracy(f = forecast.ets(test_model,h=forecast_values)$mean, x = test_data)[5])
print("")
#Excluding the models which has auto correlated residuals @ 10% significance level
}
}
return(list(all_fit,test_models))
}
# +
# Fitting the models for all types of data - Original, cleaned, seasonally adjusted, cleaned - seasonally adjusted
models_adj <- fit_function(train_data_adj,test_data_adj) #Seasonally adjusted data
models_adj_cleaned <- fit_function(train_data_adj_cleaned,test_data_adj_cleaned) #Seasonally adjusted, cleaned(with outliers being removed) data
models <- fit_function(train_data,test_data) #Original data
models_cleaned <- fit_function(train_data_cleaned, test_data_cleaned) #Original, cleaned data
# +
all_fit_adj <- models_adj[[1]]
test_models_adj <- models_adj[[2]]
all_fit_adj_cleaned<- models_adj_cleaned[[1]]
test_models_adj_cleaned <- models_adj_cleaned[[2]]
all_fit <- models[[1]]
test_models <- models[[2]]
all_fit_cleaned <- models_cleaned[[1]]
test_models_cleaned <- models_cleaned[[2]]
# -
# #### Case 1: Identifying the best fit for seasonally adjusted data
# +
#Finding the best fit
proper_models <- all_fit_adj
if(length(proper_models)==0){
print("None of the model satisfies - Ljung-Box test; Model with least 3 p values taken")
p_values <- sapply(all_fit_adj, function(x)x[2])
proper_models <- all_fit_adj[order(p_values)][1:3]
}
best_mape <- min(sapply(proper_models,function(x)x[1]))
best_model <- names(which.min(sapply(proper_models,function(x)x[1])))
print(paste0("Best Model:",best_model))
print(paste0("Best Mape: ",best_mape))
#Finding top n fits
#top_models <- c()
Top_n <- 3
if(length(proper_models)<3){Top_n <- length(proper_models)}
top_mape_val <- proper_models[order(sapply(proper_models, function(x)x[1]))][1:Top_n]
top_models_adj <- names(top_mape_val)
top_mape_val
seasonal_mean
# -
# #### Case 2: Identifying the best for cleaned, seasonlly adjusted data
# +
#Finding the best fit
proper_models <- all_fit_adj_cleaned
if(length(proper_models)==0){
print("None of the model satisfies - Ljung-Box test; Model with least 3 p values taken")
p_values <- sapply(all_fit_adj_cleaned, function(x)x[2])
proper_models <- all_fit_adj_cleaned[order(p_values)][1:3]
}
best_mape <- min(sapply(proper_models,function(x)x[1]))
best_model <- names(which.min(sapply(proper_models,function(x)x[1])))
print(paste0("Best Model:",best_model))
print(paste0("Best Mape: ",best_mape))
#Finding top n fits
#top_models <- c()
Top_n <- 3
if(length(proper_models)<3){Top_n <- length(proper_models)}
top_mape_val <- proper_models[order(sapply(proper_models, function(x)x[1]))][1:Top_n]
top_models_adj_cleaned <- names(top_mape_val)
top_mape_val
seasonal_cleaned_mean
# -
# #### Case 3: Identifying the best fit for original data
# +
#Finding the best fit
proper_models <- all_fit
if(length(proper_models)==0){
print("None of the model satisfies - Ljung-Box test; Model with least 3 p values taken")
p_values <- sapply(all_fit, function(x)x[2])
proper_models <- all_fit[order(p_values)][1:3]
}
best_mape <- min(sapply(proper_models,function(x)x[1]))
best_model <- names(which.min(sapply(proper_models,function(x)x[1])))
print(paste0("Best Model:",best_model))
print(paste0("Best Mape: ",best_mape))
#Finding top n fits
#top_models <- c()
Top_n <- 3
if(length(proper_models)<3){Top_n <- length(proper_models)}
top_mape_val <- proper_models[order(sapply(proper_models, function(x)x[1]))][1:Top_n]
top_models<- names(top_mape_val)
top_mape_val
seasonal_cleaned_mean
# -
# #### Case 4: Identifying the best fit for cleaned original data
# +
#Finding the best fit
proper_models <- all_fit_cleaned
if(length(proper_models)==0){
print("None of the model satisfies - Ljung-Box test; Model with least 3 p values taken")
p_values <- sapply(all_fit, function(x)x[2])
proper_models <- all_fit[order(p_values)][1:3]
}
best_mape <- min(sapply(proper_models,function(x)x[1]))
best_model <- names(which.min(sapply(proper_models,function(x)x[1])))
print(paste0("Best Model:",best_model))
print(paste0("Best Mape: ",best_mape))
#Finding top n fits
#top_models <- c()
Top_n <- 3
if(length(proper_models)<3){Top_n <- length(proper_models)}
top_mape_val <- proper_models[order(sapply(proper_models, function(x)x[1]))][1:Top_n]
top_models_cleaned <- names(top_mape_val)
top_mape_val
seasonal_cleaned_mean
# -
# ### Plot analysis
# #### Plot 1: Seasonally adjusted data
# +
plot(ES_series,col = "black")
lines(test_data_adj, col = "blue")
lines(forecast.ets(test_models_adj[[top_models_adj[1]]],h=12)$mean, col = "red") #Top model
lines(forecast.ets(test_models_adj[[top_models_adj[2]]],h=12)$mean, col = "green") #Top second model
lines(forecast.ets(test_models_adj[[top_models_adj[3]]],h=12)$mean, col = "yellow") #Top third model
legend("topleft", lty=1,col = c("blue","red","green","yellow"),
c("Test data", "Best model", "Second best", "Third best"))
#Observation: Unusual peak at December'15. To check if it is an anomaly
# -
# #### Plot 2: Seasonally adjusted & cleaned data
# +
plot(ES_series_cleaned,col = "black")
lines(test_data_adj_cleaned, col = "blue")
lines(forecast.ets(test_models_adj_cleaned[[top_models_adj_cleaned[1]]],h=12)$mean, col = "red") #Top model
lines(forecast.ets(test_models_adj_cleaned[[top_models_adj_cleaned[2]]],h=12)$mean, col = "green") #Top second model
lines(forecast.ets(test_models_adj_cleaned[[top_models_adj_cleaned[3]]],h=12)$mean, col = "yellow") #Top third model
legend("topleft", lty=1,col = c("blue","red","green","yellow"),
c("Test data", "Best model", "Second best", "Third best"))
#Observation: Unusual peak at December'15. To check if it is an anomaly
# -
# #### Plot 3: Original data
# +
#all_fit
#test_models[[all_fit[1]]]
plot(series,col = "black")
lines(test_data, col = "blue")
accuracy(test_models[[top_models[1]]])
accuracy(test_models[[top_models[2]]])
accuracy(test_models[[top_models[3]]])
lines(forecast.ets(test_models[[top_models[1]]],h=12)$mean, col = "red") #Top model
lines(forecast.ets(test_models[[top_models[2]]],h=12)$mean, col = "green") #Top second model
lines(forecast.ets(test_models[[top_models[3]]],h=12)$mean, col = "yellow") #Top third model
legend("topleft", lty=1,col = c("blue","red","green","yellow"),
c("Test data", "Best model", "Second best", "Third best"))
#Observation: Unusual peak at December'15. To check if it is an anomaly
# -
# #### Plot 4: Cleaned original data
# +
accuracy(test_models_cleaned[[top_models_cleaned[1]]])
accuracy(test_models_cleaned[[top_models_cleaned[2]]])
accuracy(test_models_cleaned[[top_models_cleaned[3]]])
plot(series.cleaned,col = "black", ylim = c(200,2200))
lines(test_data_cleaned, col = "blue")
lines(forecast.ets(test_models_cleaned[[top_models_cleaned[1]]],h=12)$mean, col = "red") #Top model
lines(forecast.ets(test_models_cleaned[[top_models_cleaned[2]]],h=12)$mean, col = "green") #Top second model
lines(forecast.ets(test_models_cleaned[[top_models_cleaned[3]]],h=12)$mean, col = "yellow") #Top third model
legend("topleft",lty=c(1,1,1,1),col = c("blue","red","green","yellow","brown"),
c("Test data(cleaned)", "Best model", "Second best", "Third best"))
#Observation: Unusual peak at December'15. To check if it is an anomaly
# -
# ## Getting back the original data
# #### Case 1: Seasonally adjusted data (To bring back the original data, seasonal component and the Bias value is added back)
# +
print("Case 1: Seasonally adjusted data")
#Adding the bias value which was added to overcome the negative values
ES_series_bias <- ES_series - bias_value
test_series_bias <- test_data_adj - bias_value
forecast1_bias <- forecast.ets(test_models_adj[[top_models_adj[1]]],h=12)$mean - bias_value
forecast2_bias <- forecast.ets(test_models_adj[[top_models_adj[2]]],h=12)$mean - bias_value
forecast3_bias <- forecast.ets(test_models_adj[[top_models_adj[3]]],h=12)$mean - bias_value
#Adding back the seasonal value from stl decomposition
ES_value_adj <- ES_series_bias + seasonal
test_series_adj <- test_series_bias + seasonal
#Adding back the mean seasonal component to the forecasted data
forecast1_adj <- forecast1_bias + seasonal_mean
forecast2_adj <- forecast2_bias + seasonal_mean
forecast3_adj <- forecast3_bias + seasonal_mean
#Calculating the accuracy of the training data
accuracy(test_models_adj[[top_models_adj[1]]])
accuracy(test_models_adj[[top_models_adj[2]]])
accuracy(test_models_adj[[top_models_adj[3]]])
# +
#Checking the MAPE values with original data
print(paste0("Top model: ", top_models_adj[1]))
accuracy(forecast1_adj,test_series_adj)
print(paste0("Top model: ", top_models_adj[2]))
accuracy(forecast2_adj,test_series_adj)
print(paste0("Top model: ", top_models_adj[3]))
accuracy(forecast3_adj,test_series_adj)
#accuracy(test_data, forecast.ets(test_models[[top_models[3]]],h=12)$mean )
# -
# #### Case 2: Seasonally adjusted & cleaned data (To bring back the original data, seasonal component and the Bias value is added back)
# +
print("Case 2: Seasonally adjusted & cleaned data")
#Adding the bias value which was added to overcome the negative values
ES_series_bias_cleaned <- ES_series_cleaned - bias_value_cleaned
test_series_bias_cleaned <- test_data_adj_cleaned - bias_value_cleaned
forecast1_bias <- forecast.ets(test_models_adj_cleaned[[top_models_adj_cleaned[1]]],h=12)$mean - bias_value_cleaned
forecast2_bias <- forecast.ets(test_models_adj_cleaned[[top_models_adj_cleaned[2]]],h=12)$mean - bias_value_cleaned
forecast3_bias <- forecast.ets(test_models_adj_cleaned[[top_models_adj_cleaned[3]]],h=12)$mean - bias_value_cleaned
#Adding back the seasonal value from stl decomposition
ES_value_adj_cleaned <- ES_series_bias_cleaned + seasonal_cleaned
test_series_adj_cleaned <- test_series_bias_cleaned + seasonal_cleaned
#Adding back the mean seasonal component to the forecasted data
forecast1_adj_cleaned <- forecast1_bias + seasonal_cleaned_mean
forecast2_adj_cleaned <- forecast2_bias + seasonal_cleaned_mean
forecast3_adj_cleaned <- forecast3_bias + seasonal_cleaned_mean
#Calculating the accuracy of the training data
accuracy(test_models_adj_cleaned[[top_models_adj_cleaned[1]]])
accuracy(test_models_adj_cleaned[[top_models_adj_cleaned[2]]])
accuracy(test_models_adj_cleaned[[top_models_adj_cleaned[3]]])
# +
#Checking the MAPE values with original data
print(paste0("Top model: ", top_models_adj_cleaned[1]))
accuracy(forecast1_adj_cleaned,test_series_adj_cleaned)
print(paste0("Top model: ", top_models_adj_cleaned[2]))
accuracy(forecast2_adj_cleaned,test_series_adj_cleaned)
print(paste0("Top model: ", top_models_adj_cleaned[3]))
accuracy(forecast3_adj_cleaned,test_series_adj_cleaned)
top_models
#accuracy(forecast.ets(test_models[[top_models[1]]],h=12)$mean, test_data)
#accuracy(test_data, forecast.ets(test_models[[top_models[3]]],h=12)$mean)
# -
# #### Residual Analysis
# +
#Ljung Box test - One of the checks to perform stationarity of TS data
# A small function
residual_analyis <- function(model_name){
print(model_name)
print(Box.test(test_models[[model_name]]$residuals, lag = 20, type = "Ljung-Box"))
#p_value <- Box.test(test_models[[model_name]]$residuals, lag = 20, type = "Ljung-Box")
Acf(test_models[[model_name]]$residuals, main = model_name)
}
# -
#Case 1: Seasonally adjusted models
#Residual Analysis for top three models
residual_analyis(top_models_adj[1]) #Top model
residual_analyis(top_models_adj[2]) #Second best model
residual_analyis(top_models_adj[3]) #Third best model
#Case 2 - Seasonally adjusted cleaned models
#Residual Analysis for top three models
residual_analyis(top_models_adj_cleaned[1]) #Top model
residual_analyis(top_models_adj_cleaned[2]) #Second best model
residual_analyis(top_models_adj_cleaned[3]) #Third best model
#Case 3 - Models on original data
#Residual Analysis for top three models
residual_analyis(top_models[1]) #Top model
residual_analyis(top_models[2]) #Second best model
residual_analyis(top_models[3]) #Third best model
#Case 4 - Models on original data
#Residual Analysis for top three models
residual_analyis(top_models_cleaned[1]) #Top model
residual_analyis(top_models_cleaned[2]) #Second best model
residual_analyis(top_models_cleaned[3]) #Third best model
# #### Residual output: The top two models across all four cases seem to be slighly autocorrelated
# ### Final Output:
# #### Analysing each case and figuring out the most suitable model
# ### Case 1: Model for seasonally adjusted data
# +
plot(ES_value_adj,col = "black", ylab = "No of complaints",
main = "Model with seasonal adjustment")
lines(test_series_adj, col = "blue") #Original test data
accuracy(forecast1_adj,test_series_adj)
accuracy(forecast2_adj,test_series_adj)
accuracy(forecast3_adj,test_series_adj)
lines(test_series_bias + seasonal_mean, col = "brown", lty =2) #Deseasonlised data with average seasonal component applied
lines(forecast1_adj, col = "red") #Top model
lines(forecast2_adj, col = "green") #Top second model
lines(forecast3_adj, col = "yellow") #Top third model
legend("topleft",lty=c(1,1,1,1,2),col = c("blue","red","green","yellow","brown"),
c("Test data", "Best model", "Second best", "Third best","test data with seasonal mean"))
# -
# #### Note: A great fit whose forecasts capture seasonality very well. MAPE values are also significantly less
# ### Case 2: Model for seasonally adjusted and cleaned data
# +
plot(ES_value_adj_cleaned,col = "black", ylab = "No of complaints",
main = "Model with seasonal adjustment and cleaning")
lines(test_series_adj_cleaned, col = "blue") #Original test data
accuracy(forecast1_adj_cleaned,test_series_adj_cleaned)
accuracy(forecast2_adj_cleaned,test_series_adj_cleaned)
accuracy(forecast3_adj_cleaned,test_series_adj_cleaned)
lines(test_series_bias_cleaned + seasonal_cleaned_mean, col = "brown", lty =2) #Deseasonlised data with average seasonal component applied
lines(forecast1_adj_cleaned, col = "red") #Top model
lines(forecast2_adj_cleaned, col = "green") #Top second model
lines(forecast3_adj_cleaned, col = "yellow") #Top third model
legend("topleft",lty=c(1,1,1,1,2),col = c("blue","red","green","yellow","brown"),
c("Test data", "Best model", "Second best", "Third best","test data with seasonal mean"))
# -
# #### Note: With cleaned data, the model is better with lesser MAPE values. Except the small peak at the end of 2015 other forecasts are almost spot on.
# ### Case 3: Model for the original data as is
# +
plot(series,col = "black", ylab = "No of complaints",
main = "Model with original data")
lines(test_data, col = "blue") #Originayl test data
accuracy(forecast.ets(test_models[[top_models[1]]],h=12)$mean,test_data)
accuracy(forecast.ets(test_models[[top_models[2]]],h=12)$mean,test_data)
accuracy(forecast.ets(test_models[[top_models[3]]],h=12)$mean,test_data)
lines(forecast.ets(test_models[[top_models[1]]],h=12)$mean, col = "red") #Top model
lines(forecast.ets(test_models[[top_models[2]]],h=12)$mean, col = "green") #Top second model
lines(forecast.ets(test_models[[top_models[3]]],h=12)$mean, col = "yellow") #Top third model
legend("topleft", lty=1,col = c("blue","red","green","yellow"),
c("Test data", "Best model", "Second best", "Third best"))
#Observation: Unusual peak at December'15. To check if it is an anomaly
legend("topleft",lty=c(1,1,1,1,2),col = c("blue","red","green","yellow","brown"),
c("Test data", "Best model", "Second best", "Third best","test data with seasonal mean"))
# -
# #### Note: Even though MAPE values are less for some models, they hardly capture the desired seasonality
# ### Case 4: Model for original data which is cleaned
# +
#plot(forecast.ets(test_models_cleaned[[top_models_cleaned[1]]],h=12))
plot(series.cleaned,col = "black", ylim = c(300,2300),main = "Model with cleaned data")
lines(test_data_cleaned, col = "blue")
#lines(test_data, col = "brown", lty = 2)
accuracy(forecast.ets(test_models_cleaned[[top_models_cleaned[1]]],h=12)$mean,test_data_cleaned)
accuracy(forecast.ets(test_models_cleaned[[top_models_cleaned[2]]],h=12)$mean,test_data_cleaned)
accuracy(forecast.ets(test_models_cleaned[[top_models_cleaned[3]]],h=12)$mean,test_data_cleaned)
lines(forecast.ets(test_models_cleaned[[top_models_cleaned[1]]],h=12)$mean, col = "red") #Top model
lines(forecast.ets(test_models_cleaned[[top_models_cleaned[2]]],h=12)$mean, col = "green") #Top second model
lines(forecast.ets(test_models_cleaned[[top_models_cleaned[3]]],h=12)$mean, col = "yellow") #Top third model
legend("topleft",lty=c(1,1,1,1,2),col = c("blue","red","green","yellow","brown"),
c("Test data(cleaned)", "Best model", "Second best", "Third best","Actual test data"))
#Observation: Unusual peak at December'15. To check if it is an anomaly
# -
# #### Note: Only the best model captures the seasonality but also suffers a constant bias. Interestingly, even though other two models have more or less the same MAPE they don't have any trend or seasonality.
# ### Observation: From the MAPE values and the plot observations, the forecasting model works best for seasonally adjusted data. More specifically, the model created for seasonally adjusted cleaned data seems to give best results.
| jupyter-notebooks/time-series/topNComplaints/complaintTypes/Non burning of street lights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Required imports
import numpy as np
import cv2 as cv
# +
## Start webcam
# NOTE: won't work in server hosted notebooks
# Get the webcam
cap = cv.VideoCapture(0)
# Looping over all captured frames
while True:
# Capture frame
# ret - True or False depending on the presence of frame
# frame - the next frame
ret, frame = cap.read()
# Perform operations on the frame
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Display the resulting frame
cv.imshow('Image', frame)
# Close the window if certain key is pressed
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When the process is complete, release the capture
cap.release()
cv.destroyAllWindows()
# +
## Playing video from file
# Get the video source
cap = cv.VideoCapture("/home/arunava/Downloads/tyr/Spiderwick Chronicles_Evolutionnet.info.avi")
# Looping over all the frames
while cap.isOpened():
# Capture frame
ret, frame = cap.read()
# Perform operations on the frame
frame = frame
# Display the resulting frame
cv.imshow('Image', frame)
# Close the window if certain key is pressed
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When the process is complete, release the capture
cap.release()
cv.destroyAllWindows()
# +
## Writing a video
# Get the video source
cap = cv.VideoCapture('/home/arunava/Downloads/tyr/Spiderwick Chronicles_Evolutionnet.info.avi')
# Define the codec and create VideoWriter object
# fourcc is a 4-byte code to specify the video codec
# Fedora - DIVX, XVID, MJPG, X264, WMV1, WMV2
# Windows - DVIX
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
# Looping over all the frames
while cap.isOpened():
# Getting the frame
ret, frame = cap.read()
# Operations on the frame
if ret:
frame = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
# Write the modified frame
out.write(frame)
# Show the frame
cv.imshow('frame', frame)
# Stop if certain key is pressed
if cv.waitKey(1) & 0xFF == ord('q'):
break
# Release resources
cap.release()
out.release()
cv.destroyAllWindows()
# -
| 002_videos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Topic-of-interest" data-toc-modified-id="Topic-of-interest-1"><span class="toc-item-num">1 </span>Topic of interest</a></span></li><li><span><a href="#Problems-within-topic" data-toc-modified-id="Problems-within-topic-2"><span class="toc-item-num">2 </span>Problems within topic</a></span><ul class="toc-item"><li><span><a href="#Definition-of-"conservative"" data-toc-modified-id="Definition-of-"conservative"-2.1"><span class="toc-item-num">2.1 </span>Definition of "conservative"</a></span></li><li><span><a href="#Find-dataset-with-geographic-information" data-toc-modified-id="Find-dataset-with-geographic-information-2.2"><span class="toc-item-num">2.2 </span>Find dataset with geographic information</a></span></li><li><span><a href="#Interested-people-don't-even-watch-the-movie/no-review" data-toc-modified-id="Interested-people-don't-even-watch-the-movie/no-review-2.3"><span class="toc-item-num">2.3 </span>Interested people don't even watch the movie/no review</a></span></li><li><span><a href="#Does-negative-review-only-because-of-LGBT/feminism?" data-toc-modified-id="Does-negative-review-only-because-of-LGBT/feminism?-2.4"><span class="toc-item-num">2.4 </span>Does negative review only because of LGBT/feminism?</a></span></li></ul></li></ul></div>
# -
# # Topic of interest
# Are people from conservative states or people who claim to be conservatives usually give negative reviews for LGBT/Feminism movies?
#
# I'm interested in the relationship between people's political views and their cultural preferences. I would like to know if "conservative" people usually give negative reviews for LGBT/feminism movies.
#
# The definition of conservative is important in my project. I'm thinking about defining political conservatives as either people from conservative states based on presidential election result or people who in some way self-claimed to be political conservatives. There will be two ways of getting data then, and the results are very likely different. I will most likely go with the first way of definition but the second one might also be interesting to think about.
# # Problems within topic
# ## Definition of "conservative"
# - use different measurements to see different results
# - presidential election result changes every four year
#
# ## Find dataset with geographic information
# - reviewer's location
#
# ## Interested people don't even watch the movie/no review
# - people who are not interested in such movies may not watch it in the first place and won't write a review
#
# ## Does negative review only because of LGBT/feminism?
# - some people who give negative reviews may only because it is a bad movie not because the content is "LGBT/feminism" related.
| Preparation/Open Data Mashups project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + jupyter={"outputs_hidden": false}
import cs109style
cs109style.customize_mpl()
cs109style.customize_css()
# special IPython command to prepare the notebook for matplotlib
# %matplotlib inline
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
import requests
from pattern import web
# -
# ## Fetching population data from Wikipedia
#
# In this example we will fetch data about countries and their population from Wikipedia.
#
# http://en.wikipedia.org/wiki/List_of_countries_by_past_and_future_population has several tables for individual countries, subcontinents as well as different years. We will combine the data for all countries and all years in a single panda dataframe and visualize the change in population for different countries.
#
# ###We will go through the following steps:
# * fetching html with embedded data
# * parsing html to extract the data
# * collecting the data in a panda dataframe
# * displaying the data
#
# To give you some starting points for your homework, we will also show the different sub-steps that can be taken to reach the presented solution.
# ## Fetching the Wikipedia site
# + jupyter={"outputs_hidden": false}
url = 'http://en.wikipedia.org/wiki/List_of_countries_by_past_and_future_population'
website_html = requests.get(url).text
#print website_html
# -
# ## Parsing html data
# + jupyter={"outputs_hidden": false}
def get_population_html_tables(html):
"""Parse html and return html tables of wikipedia population data."""
dom = web.Element(html)
### 0. step: look at html source!
#### 1. step: get all tables
#### 2. step: get all tables we care about
return tbls
tables = get_population_html_tables(website_html)
print "table length: %d" %len(tables)
for t in tables:
print t.attributes
# + jupyter={"outputs_hidden": false}
def table_type(tbl):
### Extract the table type
# group the tables by type
tables_by_type = defaultdict(list) # defaultdicts have a default value that is inserted when a new key is accessed
for tbl in tables:
tables_by_type[table_type(tbl)].append(tbl)
print tables_by_type
# -
# ## Extracting data and filling it into a dictionary
# + jupyter={"outputs_hidden": false}
def get_countries_population(tables):
"""Extract population data for countries from all tables and store it in dictionary."""
result = defaultdict(dict)
# 1. step: try to extract data for a single table
# 2. step: iterate over all tables, extract headings and actual data and combine data into single dict
return result
result = get_countries_population(tables_by_type['Country or territory'])
print result
# -
# ## Creating a dataframe from a dictionary
# + jupyter={"outputs_hidden": false}
# create dataframe
df = pd.DataFrame.from_dict(result, orient='index')
# sort based on year
df.sort(axis=1,inplace=True)
print df
# -
# ## Some data accessing functions for a panda dataframe
# + jupyter={"outputs_hidden": false}
subtable = df.iloc[0:2, 0:2]
print "subtable"
print subtable
print ""
column = df[1955]
print "column"
print column
print ""
row = df.ix[0] #row 0
print "row"
print row
print ""
rows = df.ix[:2] #rows 0,1
print "rows"
print rows
print ""
element = df.ix[0,1955] #element
print "element"
print element
print ""
# max along column
print "max"
print df[1950].max()
print ""
# axes
print "axes"
print df.axes
print ""
row = df.ix[0]
print "row info"
print row.name
print row.index
print ""
countries = df.index
print "countries"
print countries
print ""
print "Austria"
print df.ix['Austria']
# -
# ## Plotting population of 4 countries
# + jupyter={"outputs_hidden": false}
plotCountries = ['Austria', 'Germany', 'United States', 'France']
for country in plotCountries:
row = df.ix[country]
plt.plot(row.index, row, label=row.name )
plt.ylim(ymin=0) # start y axis at 0
plt.xticks(rotation=70)
plt.legend(loc='best')
plt.xlabel("Year")
plt.ylabel("# people (million)")
plt.title("Population of countries")
# -
# ## Plot 5 most populous countries from 2010 and 2060
# + jupyter={"outputs_hidden": false}
def plot_populous(df, year):
# sort table depending on data value in year column
df_by_year = df.sort(year, ascending=False)
plt.figure()
for i in range(5):
row = df_by_year.ix[i]
plt.plot(row.index, row, label=row.name )
plt.ylim(ymin=0)
plt.xticks(rotation=70)
plt.legend(loc='best')
plt.xlabel("Year")
plt.ylabel("# people (million)")
plt.title("Most populous countries in %d" % year)
plot_populous(df, 2010)
plot_populous(df, 2050)
# + jupyter={"outputs_hidden": false}
| labs/lab2/Lab_2_A_Live.ipynb |
/ -*- coding: utf-8 -*-
/ ---
/ jupyter:
/ jupytext:
/ text_representation:
/ extension: .q
/ format_name: light
/ format_version: '1.5'
/ jupytext_version: 1.14.4
/ kernelspec:
/ display_name: PostgreSQL
/ language: sql
/ name: postgres
/ ---
/ # 自定义类型
/
/ pg支持使用[CREATE TYPE](http://www.postgres.cn/docs/10/sql-createtype.html)语句自定义类型,可以定义的类型有两种
/
/ + 枚举型类型
/
/ + 复合类型
-- connection: postgres://postgres:postgres@localhost:5432/test
-- autocommit: true
/ ## 枚举型类型
/
/ 枚举(enum)类型是由一个静态,值为有序集合的数据类型.它们等效于很多编程语言所支持的enum类型.枚举类型的一个例子可以是一周中的日期,或者一个数据的状态值集合.
CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')
CREATE TABLE IF NOT EXISTS enum_test(
a serial4 PRIMARY KEY,
b mood
)
INSERT INTO enum_test (b) VALUES ( 'happy')
select * from enum_test
INSERT INTO enum_test (b) VALUES ( 'happyly')
/ ## 复合类型
/
/ 一个复合类型表示一行或一个记录的结构,它本质上就是一个域名和它们数据类型的列表,一个典型的应用就是复数.
/
/ 复合类型本质上就是一个表,要插入一个复合类型可以使用`ROW()`函数
CREATE TYPE complex AS (
r float8,
i float8
)
CREATE TABLE IF NOT EXISTS complex_test(
a serial4 PRIMARY KEY,
b complex
)
INSERT INTO complex_test (b) VALUES (ROW(1.8,3.3))
select * from complex_test
| src/数据存储篇/工具介绍/postgresql/功能与特性/自定义扩展/.ipynb_checkpoints/自定义数据类型-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analytical Dataset (ADS)
# The core concept of ADS is that we merge different data sources to have all possible information about objects of interest (the most often, these are our clients) in one place. We will then create a sliding window with all possible metrics.
# 1. The core part of ADS is a sliding window for each time period, for example, 1 week.
# 2. ADS contains one line for each observation every week
#
# <img src='images/ADS_timeline.png'>
#
# As indicated in the picture above we use the same history to create each slice, for example 12 months.
#
# We can summarize the main advantages of ADS as
# * Combines all data sources into one table
# * In the future, ML models can be based on one table
# * By using time slices (weekly, monthly) we take care of fluctuations in the data
# * It suits as aggregation layer for the reporting
# * Batch scoring (weekly, monthly) is easy to implement
# * New data sources can be simply added in the future using joins
#
# <img src='images/ADS.png'>
#
# # Tutorial
# [**Northwind Database**](https://drive.google.com/file/d/1HCfNF5BsYUrQhhr-vnO_LFgs5THJUAh_/view)
#
# ### Best if worked in VSCode following the instructions descrived here:
#
# We are going to build ADS about our customers. We will store information about orders from the last month. We want, in the ADS, to have only ONE line per every customer and every month.
#
# Let's see how many orders we have:
# ```sql
# select count(*) from orders;
# select min(orderdate) from orders;
# select max(orderdate) from orders;
# ```
# We can see 830 orders ranging from `1996-07-04` to `1998-05-06`. We are going to build ADS with values aggregated by month. We could use the same logic for a week or days but for training purposes, monthly aggregations are sufficient.
#
# In practice, it depends on the industry that required granularity. For traditional banking, 1 month may be enough. For telecommunications, 1 week can be appropriate, but there are also industries like e-commerce where they need to aggregate per day.
#
# For our tutorial, we will aggregate orders from each month and label it with column called `end_obs_date` (end observation date). `end_obs_date` is the column that labels monthly slice we are aggregating to. If anything happens, for example in August 1996, we will assign `end_obs_date` 1st of September, 1996.
#
# For example:
# * order date is 1996-12-12 --> endobsdate will be 1997-01-01
# * order date is 1997-01-31 --> endobsdate will be 1997-02-01
#
# Therefore, in our ADS, we will have following end observation dates: from `1996-08-01` to `1998-06-01`.
#
# We start with the create of table endobsdates. We will store all possible dates there.
# ```sql
# DROP TABLE if exists end_obs_dates;
# CREATE TABLE end_obs_dates
# AS
#
# -- Recursive query example (something like FOR loop for SQL)
# WITH RECURSIVE
# cnt(x) AS (
# SELECT 0
# UNION ALL
# SELECT x+1 FROM cnt
# LIMIT (SELECT ROUND(((julianday('1998-06-01') - julianday('1996-08-01'))/30) + 1)) -- this counts number of months between these two days
# )
# SELECT date('1996-08-01', '+' || x || ' month') as end_obs_date FROM cnt;
# ```
# It's good practice to put drop table if exists before each table creation. This will eliminate errors when rerunning the script and doesn't do anything if the table doesn't exist yet.
#
# Now, we create ADS POPULATION HIST(each customer id for each month). We will use table customers for this.
# ```sql
# DROP TABLE if exists ads_population_hist;
#
# CREATE TABLE ads_population_hist
# AS
# SELECT A.*,
# B.*
# FROM end_obs_dates AS A
# CROSS JOIN (SELECT DISTINCT customerid FROM customers) AS B
# ;
# ```
# Let's see how our new table looks like:
# ```sql
# select * from ads_population_hist limit 10;
# ```
# Now, we have the main table where every customer has exactly one row for every month, even when they didn't buy anything.
#
# Our main goal is to create a table where we keep all the important information about our clients. In this tutorial, we will work with clients' orders and create following variables:
# * noofitems
# * noofdistinct_orders
# * total_price
#
# All of these information will be aggregated on monthly level. We can take this information from the table `Order Details`.
#
# As the first step, we will compute additional attribute `totalprice_for_product` as `unitprice * quantity` using the following select statement.
# ```sql
# SELECT *,
# unitprice*quantity AS totalprice_for_product
# FROM "Order Details" LIMIT 20
# It is important that in our summary, every order_id is only once.
#
# SELECT A.orderid,
# COUNT(DISTINCT A.productid) AS no_of_distinct_products,
# SUM(A.quantity) AS no_of_items,
# SUM(A.totalprice_for_product) AS total_price
# FROM (SELECT *,
# unitprice*quantity AS totalprice_for_product
# FROM "Order Details") AS A
# GROUP BY 1;
# ```
# We have aggregated information for every `orderid`. Now, we need to assign it to the correct customer and correct month. we will use table `orders` for that. We can see the sample of that table below.
# ```sql
# SELECT
# orderid,
# customerid,
# orderdate
# FROM orders
# LIMIT 100;
# ```
# We need to assign `end_obs_date` to each order.
# ```sql
# SELECT orderid,
# customerid,
# orderdate,
# date(orderdate,'start of month','+1 month') as end_obs_date
# FROM orders LIMIT 100;
# ```
# Now, we will combine previous steps into 1 table called `ads_orders_hist`.
# ```sql
# DROP TABLE if exists ads_orders_hist;
#
# CREATE TABLE ads_orders_hist
# AS
# SELECT A.orderid,
# A.customerid,
# A.end_obs_date,
# B.no_of_distinct_products,
# B.no_of_items,
# B.total_price
# FROM (
# SELECT orderid,
# customerid,
# orderdate,
# date(orderdate,'start of month','+1 month') as end_obs_date
# FROM orders)
# AS A
# LEFT OUTER JOIN (
# SELECT A.orderid,
# COUNT(DISTINCT A.productid) AS no_of_distinct_products,
# SUM(A.quantity) AS no_of_items,
# SUM(A.totalprice_for_product) AS total_price
# FROM (
# SELECT *,
# unitprice*quantity AS totalprice_for_product
# FROM "Order Details")
# AS A
# GROUP BY 1)
# AS B
# ON A.orderid = B.orderid;
#
# ;
# ```
# It is important to test that every `orderid` is only **once** in the table.
# ```sql
# select orderid
# ,count(*)
# from ads_orders_hist
# group by 1
# order by 2 desc
# limit 5
# ;
# ```
# If the first line is 1 then we are good to go to the next step.
#
# The last step is to join `ads_orders_hist` with `ads_pop_hist`. We will join everything on columns `customerid` and `end_obs_date`. We will use left join because we want to keep also track of the clients who didn't buy anything in the specific month.
# ```sql
# drop table if exists ads_observation_hist;
# create table ads_observation_hist as
# select
# A.*
# -- we can replace missings with 0 because it means there were no orders for this client during specific month.
# ,coalesce(B.no_of_distinct_orders_1M, 0) as no_of_distinct_orders_1M
# ,coalesce(B.no_of_items_1M, 0) as no_of_items_1M
# ,coalesce(B.total_price_1M, 0) as total_price_1M
# from ads_population_hist as A
# left outer join (
# -- we need to group by our orders to customer level
# select customerid
# ,end_obs_date
# ,count(distinct orderid) as no_of_distinct_orders_1M
# ,sum(no_of_items) as no_of_items_1M
# ,sum(total_price) as total_price_1M
# from ads_orders_hist
# group by 1,2
# ) as B
# on A.customerid = B.customerid
# and A.end_obs_date = B.end_obs_date
# ;
# ```
# It is important to test again if every `customerid` is in the table only once for every month.
# ```sql
# select customerid
# ,end_obs_date
# ,count(*)
# from ads_observation_hist
# group by 1,2
# order by 3 desc
# limit 5
# ;
# ```
# Let's see our final table:
# ```sql
# select * from ads_observation_hist limit 10;
# ```
# We can create many more variables that could be useful for modeling and further analysis, for example, `max_price`, `min_price`, `avg_price` etc.
#
# Once new data from the next month come in, we can simply add them to these tables using where clause for `orderdate` and `INSERT INTO` statements.
#
# ## Conclusion
# We have finished our first ADS with a couple of columns. We have named these columns with `_1M` at the end to indicate that these are monthly aggregations.
# # Challenge
# We'll use the one we have created during the tutorial and we will add additional attributes aggregated for 1 month. Plus, we will use window functions to aggregate orders from 3 and 6 months.
#
# We are going to continue with the creation of the ADS from the tutorial today. So far we have 3 attributes:
# * `noofdistinctorders1M`
# * `noofitems_1M`
# * `totalprice1M`
#
# We want to create additional attributes for every client and every month.
#
# Create new attributes in table ads_observation_hist using VSCode and database Northwind.
# * `no_of_items_3M` (use window functions)
# * `total_price_3M` (use window functions)
# * `max_monthly_total_price_3M` (use window functions)
# * `min_monthly_total_price_3M` (use window functions)
# * `avg_no_of_items_3M` (use window functions)
#
# All attributes should be in the table that is monthly aggregated and every client is there for every month.
#
# You can continue directly after the ADS tutorial and build on top of the table `ads_observation_hist`.
| SQL + Relational Databases/ADS - Analytical Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# _Lambda School Data Science — Big Data_
#
# # AWS SageMaker
#
# ### Links
#
# #### AWS
# - The Open Guide to Amazon Web Services: EC2 Basics _(just this one short section!)_ https://github.com/open-guides/og-aws#ec2-basics
# - AWS in Plain English https://www.expeditedssl.com/aws-in-plain-english
# - Amazon SageMaker » Create an Amazon SageMaker Notebook Instance https://docs.aws.amazon.com/sagemaker/latest/dg/gs-setup-working-env.html
# - Amazon SageMaker » Install External Libraries https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-add-external.html
#
# #### Dask
# - Why Dask? https://docs.dask.org/en/latest/why.html
# - Use Cases https://docs.dask.org/en/latest/use-cases.html
# - User Interfaces https://docs.dask.org/en/latest/user-interfaces.html
#
# #### Numba
# - A ~5 minute guide http://numba.pydata.org/numba-doc/latest/user/5minguide.html
# ## 1. Estimate pi
# https://en.wikipedia.org/wiki/Approximations_of_π#Summing_a_circle's_area
# ### With plain Python
# +
import random
def monte_carlo_pi(nsamples):
acc = 0
for _ in range(int(nsamples)):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / nsamples
# -
# %%time
monte_carlo_pi(1e7)
# ### With Numba
# http://numba.pydata.org/
from numba import njit
@njit
def monte_carlo_pi(nsamples):
acc = 0
for _ in range(int(nsamples)):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / nsamples
# %%time
monte_carlo_pi(1e7)
# ## 2. Loop a slow function
# ### With plain Python
# +
from time import sleep
def slow_square(x):
sleep(1)
return x**2
# -
# %%time
[slow_square(n) for n in range(16)]
# ### With Dask
# - https://examples.dask.org/delayed.html
# - http://docs.dask.org/en/latest/setup/single-distributed.html
from dask import compute, delayed
# %%time
compute(delayed(slow_square)(n) for n in range(32))
# ## 3. Analyze millions of Instacart orders
# ### Download data
# https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2
# !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# %cd instacart_2017_05_01
# !ls -lh *.csv
# ### With Pandas
# #### Load & merge data
import pandas as pd
# +
# %%time
order_products = pd.concat([
pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
order_products.info()
# -
order_products.head()
products = pd.read_csv('products.csv')
products.info()
products.head()
# %%time
order_products = pd.merge(order_products, products[['product_id', 'product_name']])
order_products.head()
# #### Most popular products?
# %%time
order_products['product_name'].value_counts()
# #### Organic?
# %%time
order_products['organic'] = order_products['product_name'].str.contains('Organic')
# %%time
order_products['organic'].value_counts()
# ### With Dask
# https://examples.dask.org/dataframe.html
# +
import dask.dataframe as dd
from dask.distributed import Client
client = Client(n_workers = 16)
client
# -
# #### Load & merge data
# https://examples.dask.org/dataframes/01-data-access.html#Read-CSV-files
# %%time
order_products = dd.read_csv('order_products*.csv')
# %%time
order_products = dd.merge(order_products,products[['product_id','product_name']])
# http://docs.dask.org/en/latest/dataframe-performance.html#persist-intelligently
type(order_products)
# #### Most popular products?
# %%time
order_products['product_name'].value_counts().compute()
# #### Organic?
# %%time
order_products['organic'] = order_products['product_name'].str.contains('Organic')
# %%time
order_products['organic'].value_counts().compute() / len(order_products)
# ## 4. Fit a machine learning model
# ### Load data
# %cd ../ds1-predictive-modeling-challenge
# +
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
train_features = pd.read_csv('train_features.csv')
train_labels = pd.read_csv('train_labels.csv')
X_train_numeric = train_features.select_dtypes(np.number)
y_train = train_labels['status_group']
# -
# ### With 2 cores (like Google Colab)
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
# %%time
model = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=2, random_state=42, verbose=1)
model.fit(X_train_numeric, y_train)
print('Out-of-bag score:', model.oob_score_)
# ### With 16 cores (on AWS m4.4xlarge)
# %%time
model = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=16, random_state=42, verbose=1)
model.fit(X_train_numeric, y_train)
print('Out-of-bag score:', model.oob_score_)
# ## ASSIGNMENT
#
# Revisit a previous assignment or project that had slow speeds or big data.
#
# Make it better with what you've learned today!
#
# You can use `wget` or Kaggle API to get data. Some possibilities include:
#
# - https://www.kaggle.com/c/ds1-predictive-modeling-challenge
# - https://www.kaggle.com/ntnu-testimon/paysim1
# - https://github.com/mdeff/fma
# - https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2
#
#
#
# Also, you can play with [Datashader](http://datashader.org/) and its [example datasets](https://github.com/pyviz/datashader/blob/master/examples/datasets.yml)!
| module1-aws-sagemaker/LS_DS_331_AWS_SageMaker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ml
# language: python
# name: ml
# ---
import os
os.chdir(os.path.join(os.getcwd(),".."))
# %load_ext autoreload
# %autoreload 2
import warnings
warnings.filterwarnings('ignore')
# +
import numpy as np
import os
import pandas as pd
import tensorflow as tf
from tensorflow import keras
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
from sklearn.metrics import confusion_matrix, classification_report
from tqdm.notebook import tqdm
import yaml
# -
from tasks.nn.classifiers import build_model, keras_classifier
# # Load Configuration
config_nn_path = os.path.join(os.getcwd(), "tasks", "project_configuration", "nn_tuning.yaml")
with open(config_nn_path, 'r') as stream:
try:
config_nn = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# # Load Dataset
digits_mnist = keras.datasets.mnist
(train_X, train_y), (test_X, test_y) = digits_mnist.load_data()
test_y[:3]
# summarize loaded dataset
print('Train: X=%s, y=%s' % (train_X.shape, train_y.shape))
print('Test: X=%s, y=%s' % (test_X.shape, test_y.shape))
test_y[:3]
# # Load the number of classes parameter to the configuration
config_nn["clf_params"]["n_classes"] = len(list(np.unique(train_y)))
config_nn["clf_params"]["n_classes"]
# # Explore the Dataset
# plot first few images
for i in range(9):
# define subplot
plt.subplot(330 + 1 + i)
# plot raw pixel data
plt.imshow(train_X[i], cmap=plt.get_cmap('gray'))
# show the figure
plt.show()
# reshape dataset to have a single channel
train_X = train_X.reshape((train_X.shape[0], 28, 28, 1))
test_X = test_X.reshape((test_X.shape[0], 28, 28, 1))
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# prepare pixel data
train_X, test_X = prep_pixels(train_X, test_X)
train_X.shape
test_X.shape
# # Create Classifier
model = build_model(n_classes=config_nn["clf_params"]["n_classes"])
model.summary()
keras.backend.clear_session()
# grid mode: "randomized", "gridsearch"
clf = keras_classifier(config=config_nn, clf_params=config_nn["clf_params"],
param_grid=config_nn["param_distribs"],
grid_mode="randomized")
# # Train the Classifier
from tensorflow.keras.callbacks import EarlyStopping, History
callback_early_stopping = EarlyStopping(patience=20)
clf.fit(train_X, train_y, callbacks=[callback_early_stopping])
clf.best_params_
clf.best_score_
clf.best_estimator_
clf.score(test_X, test_y)
model = clf.best_estimator_.model
model
# evaluate model on test dataset
loss, acc = model.evaluate(test_X, test_y, verbose=1)
print('> %.3f' % (acc * 100.0))
clf.cv_results_
# # Train and Evaluate the best Classifier
params = clf.best_params_
params
keras.backend.clear_session()
model = build_model(n_classes=config_nn["clf_params"]["n_classes"])
history = model.fit(train_X,
train_y,
validation_split=config_nn["clf_params"]["validation_split"],
use_multiprocessing=config_nn["clf_params"]["use_multiprocessing"],
shuffle=config_nn["clf_params"]["shuffle"],
epochs=config_nn["clf_params"]["epochs"],
verbose = config_nn["clf_params"]["verbose"],
batch_size=params['batch_size'])
history.params
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
# evaluate model on test dataset
loss, acc = model.evaluate(test_X, test_y, verbose=1)
print('> %.3f' % (acc * 100.0))
predictions = np.argmax(model.predict(test_X, verbose=1), axis=-1)
predictions.shape
predictions[:3]
print(confusion_matrix(test_y, predictions))
print(classification_report(test_y, predictions))
#
| NBs/NN08 - CNNs with Keras Classifier and Trust Score - MNIST digits (Classification).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic regression model analysis: training time & AUC score
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# %matplotlib inline
# ## Read results of multiple logistic regression models training
results = './logreg.csv'
df = pd.read_csv(results)
df
# ## Fit a simple linear regression model for training time = f(number of training samples)
linreg = LinearRegression()
x = df.Nb_training_samples.values.reshape(-1, 1)
y = df.Training_time
linreg.fit(x, y)
time_linreg = linreg.predict(x)
# ## Plot the training time, a linear regression of training time, and AUC score
fig, ax1 = plt.subplots(figsize=(12,5))
# Axis 1 = training time in seconds
ax1.plot(df.Nb_training_samples, df.Training_time, 'bo')
ax1.plot(df.Nb_training_samples, time_linreg, 'b-')
ax1.legend(['Training time (s)'])
# Axis 2 = cross validation AUC score
ax2 = ax1.twinx()
ax2.plot(df.Nb_training_samples, df.AUC_cross_validation, 'rx', label='AUC score')
ax2.set_ylim(0,1)
ax2.legend(['AUC score'], loc=4)
# * We can see that the training time of the logistic regression model (using sag solver) is linear in number of training samples. It should therefore be easy to predict how long the model training would take.
#
# * However, calibrating on the full training set might not be useful, since we can see from the AUC scores results that increasing the training set size does not result in significant gains in the AUC score.
# ## How much time would it take to train the logistic model on the full dataset (168m examples)?
time_168m = linreg.predict(168e6)[0]
print("Training time for 168m examples: {:.0f}min".format(time_168m / 60))
| talkingdata2/results/logreg_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# # Load dataset using keras
# +
# mnist = tf.keras.datasets.mnist
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# -
# # Load dataset manually
# - Download the data manually from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
# - Create a folder in a location that you have saved your jupyter notebook and call it dataset
# - Move the downloaded file from 'Downloads' to 'dataset'
# +
# Load dataset manually
mnist = np.load('dataset/mnist.npz')
print(mnist.files)
x_train = mnist['x_train']
y_train = mnist['y_train']
x_test = mnist['x_test']
y_test = mnist['y_test']
# -
# ## Dataset Exploration
# In this workshop, we will use the 'mnist dataset' to classify the hand written digits images. In this dataset, there are 70000 images. As digits are from 0 to 9, therefore there are 10 unique digits in this data.
print("x_train shape: " , x_train.shape)
print("y_train shape: " , y_train.shape)
# - The shape of the x_train is (60000, 28, 28) <br>
# > 60000 means that we have 60000 images<br>
# > 28 means that our image size is 28x28 (28x28 pixels)<br>
# - The shape of the Y is (60000,)<br>
# > 60000 means that we have 60000 labels (10 unique lables from 0 to 9) <br>
# ## Let's visualize what is in our dataset
# +
img_size = 28
plt.subplot(2, 2, 1)
plt.imshow(x_train[100].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
plt.subplot(2, 2, 2)
plt.imshow(x_train[900].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(x_train[670].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(x_train[1999].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
# -
# # Normalize the data (scale it between 0 and 1)
# Let's first take a look at one sample in our dataset
x_train[0]
# As you can see in the printed array, currently our pixels (features) are between 0 and 255. When we normalize our data, pixels will be between 0 and 1. Therefore, our network will learn easier!
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
# Now, let's see how our data looks like:
x_train[0]
# ## Let's take a look at our data again
# +
img_size = 28
plt.subplot(2, 2, 1)
plt.imshow(x_train[100].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
plt.subplot(2, 2, 2)
plt.imshow(x_train[900].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(x_train[670].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(x_train[1999].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
# -
# ## Dataset Exploration
# In this workshop, we will use the 'mnist dataset' to classify the hand written digits images. In this dataset, there are 70000 images. As digits are from 0 to 9, therefore there are 10 unique digits in this data.
print("x_train shape: " , x_train.shape)
print("y_train shape: " , y_train.shape)
# - The shape of the x_train is (60000, 28, 28) <br>
# > 60000 means that we have 60000 images<br>
# > 28 means that our image size is 28x28 (28x28 pixels)<br>
# - The shape of the Y is (60000,)<br>
# > 60000 means that we have 60000 labels (10 unique lables from 0 to 9) <br>
# ## Now, let's build our feed forward model
# +
# a basic feed-forward model
model = tf.keras.models.Sequential()
# Input layer: Our input array (X) is a 3 dimensional array.
#In order to be able to use it in our first deep learning model,
#we need to make it flatten (2D).So, we need to take this 28x28 image, and make it a flat 1x784
model.add(tf.keras.layers.Flatten())
# a simple fully-connected layer, 128 units, relu activation
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
# a simple fully-connected layer, 128 units, relu activation
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
# our output layer. 10 units for 10 classes. Softmax for probability distribution
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
# -
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# train the model
model.fit(x_train, y_train, epochs=3)
# Test if the model generalize or overfit
val_loss, val_acc = model.evaluate(x_test, y_test)
print(val_loss)
print(val_acc)
model.save('digit.model')
digit_model = tf.keras.models.load_model('digit.model')
predictions = digit_model.predict(x_test)
print(predictions)
print(np.argmax(predictions[10]))
img_size = 28
plt.subplot(2, 2, 1)
plt.imshow(x_test[10].reshape(img_size, img_size), cmap = plt.cm.binary)
plt.axis('off')
| digits-reader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_input=15
num_hidden=9
num_output=12
learning_rate = 0.01
# +
### step1: load data
x_train =[
[0.2452, 0.1466, 0.1314, 0.2243, 0.5523, 0.6642, 0.7015, 0.6981, 0.6821, 0.6945, 0.7549, 0.8215, 0.2394, 0.3214, 1],
[0.2217, 0.1581, 0.1408, 0.2304, 0.5134, 0.5312, 0.6819, 0.7125, 0.7265, 0.6847, 0.7826, 0.8325, 0.2415, 0.3027, 0],
[0.2525, 0.1627, 0.1507, 0.2406, 0.5502, 0.5636, 0.7051, 0.7352, 0.7459, 0.7015, 0.8064, 0.8156, 0.2385, 0.3125, 0],
[0.2016, 0.1105, 0.1243, 0.1978, 0.5021, 0.5232, 0.6819, 0.6952, 0.7015, 0.6825, 0.7825, 0.7895, 0.2216, 0.2701, 1],
[0.2115, 0.1201, 0.1312, 0.2019, 0.5532, 0.5736, 0.7029, 0.7032, 0.7189, 0.7019, 0.7965, 0.8025, 0.2352, 0.2506, 0.5],
[0.2335, 0.1322, 0.1534, 0.2214, 0.5662, 0.5827, 0.7198, 0.7276, 0.7359, 0.7506, 0.8092, 0.8221, 0.2542, 0.3125, 0],
[0.2368, 0.1432, 0.1653, 0.2205, 0.5823, 0.5971, 0.7136, 0.7129, 0.7263, 0.7153, 0.8091, 0.8217, 0.2601, 0.3198, 0],
[0.2342, 0.1368, 0.1602, 0.2131, 0.5726, 0.5822, 0.7101, 0.7098, 0.7127, 0.7121, 0.7995, 0.8126, 0.2579, 0.3099, 0],
[0.2113, 0.1212, 0.1305, 0.1819, 0.4952, 0.5312, 0.6886, 0.6898, 0.6999, 0.7323, 0.7721, 0.7956, 0.2301, 0.2867, 0.5],
[0.2005, 0.1121, 0.1207, 0.1605, 0.4556, 0.5022, 0.6553, 0.6673, 0.6798, 0.7023, 0.7521, 0.7756, 0.2234, 0.2799, 1]
]
#print (x_train[0])
#print(np.reshape(x_train[0],(1,15)))
y_train = [
[0.2217, 0.1581, 0.1408, 0.2304, 0.5134, 0.5312, 0.6819, 0.7125, 0.7265, 0.6847, 0.7826, 0.8325],
[0.2525, 0.1627, 0.1507, 0.2406, 0.5502, 0.5636, 0.7051, 0.7352, 0.7459, 0.7015, 0.8064, 0.8156],
[0.2016, 0.1105, 0.1243, 0.1978, 0.5021, 0.5232, 0.6819, 0.6952, 0.7015, 0.6825, 0.7825, 0.7895],
[0.2115, 0.1201, 0.1312, 0.2019, 0.5532, 0.5736, 0.7029, 0.7032, 0.7189, 0.7019, 0.7965, 0.8025],
[0.2335, 0.1322, 0.1534, 0.2214, 0.5662, 0.5827, 0.7198, 0.7276, 0.7359, 0.7506, 0.8092, 0.8221],
[0.2368, 0.1432, 0.1653, 0.2205, 0.5823, 0.5971, 0.7136, 0.7129, 0.7263, 0.7153, 0.8091, 0.8217],
[0.2342, 0.1368, 0.1602, 0.2131, 0.5726, 0.5822, 0.7101, 0.7098, 0.7127, 0.7121, 0.7995, 0.8126],
[0.2113, 0.1212, 0.1305, 0.1819, 0.4952, 0.5312, 0.6886, 0.6898, 0.6999, 0.7323, 0.7721, 0.7956],
[0.2005, 0.1121, 0.1207, 0.1605, 0.4556, 0.5022, 0.6553, 0.6673, 0.6798, 0.7023, 0.7521, 0.7756],
[0.2123, 0.1257, 0.1343, 0.2079, 0.5579, 0.5716, 0.7059, 0.7145, 0.7205, 0.7401, 0.8019, 0.8316]
]
x_test =[0.2123, 0.1257, 0.1343, 0.2079, 0.5579, 0.5716, 0.7059, 0.7145, 0.7205, 0.7401, 0.8019, 0.8316, 0.2314, 0.2977, 0]
y_test =[0.2119, 0.1215, 0.1621, 0.2161, 0.6171, 0.6159, 0.7115, 0.7201, 0.7243, 0.7298, 0.8179, 0.8229]
# +
### step2: build Elman network
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name='weight')
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial, name='bias')
## 新建一个graph
with tf.Graph().as_default():
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, shape=[1,num_input],name = 'input')
with tf.name_scope('hidden'):
h_old = tf.placeholder(tf.float32, shape=[1,num_hidden], name = 'hidden_old')
w = weight_variable([num_input, num_hidden])
u_pre = weight_variable([num_hidden]) #1 dimentional tensor
print (u_pre)
u = tf.diag(u_pre, name='u_weight') #变换成对角矩阵
print (u)
b = bias_variable([1,num_hidden])
h_new = tf.sigmoid(tf.matmul(x, w) + tf.matmul(h_old,u) + b, name='activation')
#h_new = tf.nn.relu(tf.matmul(x, w) + tf.matmul(h_old,u) + b)
print (h_new)
with tf.name_scope('output'):
y_golden = tf.placeholder(tf.float32, shape=[1,num_output], name='output_golden')
w = weight_variable([num_hidden, num_output])
print (w)
b = bias_variable([1,num_output])
y = tf.sigmoid(tf.matmul(h_new, w) + b, name='activation')
#y = tf.nn.relu(tf.matmul(h_new, w) + b)
#y = tf.matmul(h_new, w) + b
#print (y)
loss = tf.reduce_sum(tf.square(y_golden - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
#optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
test_accuracy = (y-y_test)/y_test
test_accuracy_mean = tf.reduce_sum(tf.abs(test_accuracy))/num_output
# merge summary
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
### step3: training
with tf.Session() as session: #在这个graph里面打开session
## 必须先做初始化
session.run(init)
## 写入summary,用tensorboard查看graph
train_writer = tf.summary.FileWriter("./power/logs/elman/", session.graph)
h_old_initial = [[0.01]*num_hidden]
#print (h_old_initial)
steps = 20000
#cur_pattern = np.random.randint(0, 5)
cur_pattern = 0
## feed初始化,给所有placeholder传值,给hidden layer的结果传0
feed = {
x : np.reshape(x_train[cur_pattern],(1,num_input)),
y_golden : np.reshape(y_train[cur_pattern],(1,num_output)),
h_old: h_old_initial
}
for i in range(steps):
## 随机选择输入pattern
cur_pattern = np.random.randint(0, 10)
#cur_pattern = i%5
## run 优化器
session.run(optimizer,feed_dict=feed)
## 取出hidden layer的结果,用来做循环
h_new_fetched,u_fetched = session.run([h_new,u], feed_dict=feed)
#print (h_new_fetched)
#print (np.reshape(h_new_fetched,(1,num_hidden)))
## 把hidden layer结果传回去
feed = {
x: np.reshape(x_train[cur_pattern],(1,num_input)),
y_golden: np.reshape(y_train[cur_pattern],(1,num_output)),
h_old: np.reshape(h_new_fetched,(1,num_hidden))
}
if i%1000 == 0:
print('loss ', session.run(loss, feed_dict=feed))
#print('u', u_fetched)
#检验训练结果
feed = {
x: np.reshape(x_test,(1,num_input)),
y_golden: np.reshape(y_test,(1,num_output)),
h_old: np.reshape(h_new_fetched,(1,num_hidden))
}
test_accuracy_fetched, test_accuracy_mean_fetched=session.run([test_accuracy,test_accuracy_mean],feed_dict=feed)
print ('pridiction err ',test_accuracy_fetched)
print ('mean pridiction err ',test_accuracy_mean_fetched)
# +
# -
'''
### step1: load data
x_train =[
[0.2113, 0.1212, 0.1305, 0.1819, 0.4952, 0.5312, 0.6886, 0.6898, 0.6999, 0.7323, 0.7721, 0.7956, 0.2234, 0.2799, 1],
[0.2005, 0.1121, 0.1207, 0.1605, 0.4556, 0.5002, 0.6553, 0.6673, 0.6798, 0.7023, 0.7521, 0.7756, 0.2314, 0.2977, 0],
[0.2342, 0.1368, 0.1602, 0.2131, 0.5726, 0.5822, 0.7101, 0.7098, 0.7127, 0.7121, 0.7995, 0.8126, 0.2301, 0.2867, 0.5],
[0.2368, 0.1432, 0.1653, 0.2205, 0.5823, 0.5971, 0.7136, 0.7129, 0.7263, 0.7153, 0.8091, 0.8217, 0.2579, 0.3099, 0],
[0.2335, 0.1322, 0.1534, 0.2214, 0.5623, 0.5727, 0.7198, 0.7276, 0.7359, 0.7506, 0.8092, 0.8221, 0.2601, 0.3198, 0]
]
#print (x_train[0])
#print(np.reshape(x_train[0],(1,15)))
y_train = [
[0.2005, 0.1121, 0.1207, 0.1605, 0.4556, 0.5002, 0.6553, 0.6673, 0.6798, 0.7023, 0.7521, 0.7756],
[0.2342, 0.1368, 0.1602, 0.2131, 0.5726, 0.5822, 0.7101, 0.7098, 0.7127, 0.7121, 0.7995, 0.8126],
[0.2368, 0.1432, 0.1653, 0.2205, 0.5823, 0.5971, 0.7136, 0.7129, 0.7263, 0.7153, 0.8091, 0.8217],
[0.2335, 0.1322, 0.1534, 0.2214, 0.5623, 0.5727, 0.7198, 0.7276, 0.7359, 0.7506, 0.8092, 0.8221],
[0.2115, 0.1201, 0.1312, 0.2019, 0.5532, 0.5736, 0.7029, 0.7032, 0.7189, 0.7019, 0.7965, 0.8025]
]
x_test =[0.2115, 0.1201, 0.1312, 0.2019, 0.5532, 0.5736, 0.7029, 0.7032, 0.7189, 0.7019, 0.7965, 0.8025, 0.2542, 0.3125, 0]
y_test =[0.2016, 0.1105, 0.1243, 0.1978, 0.5021, 0.5232, 0.6819, 0.6952, 0.7015, 0.6825, 0.7825, 0.7895]
'''
| elman_power_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from IPython.display import display, Markdown, Latex
import math
# Euler's identity: $ e^{i \pi} + 1 = 0 $
# $\frac{a}{b}$
display(Latex('$ e^{%s \pi} + 1 = 0 $' % 4 ))
display(Latex(r'E = $z_{c}\frac{\sigma}{\sqrt{n}}$'))
display(Latex(r'$ n= (\frac{z_{c}\sigma}{E}) $'))
# +
def sample_size_for_estimating_the_mean(level_of_confidence, E, sigma):
critical_value = get_critical_value(level_of_confidence)
display(Latex(r'$ n= (\frac{z_{c}\sigma}{E})^2 $'))
n=((critical_value * sigma)/E)**2
n_rounded=math.ceil(((critical_value * sigma)/E)**2)
display(Latex(r'$ n= (\frac{(%s)(%s)}{%s})^2 = %s $'%(critical_value,sigma,E, n)))
display(Latex('Rounded up to next whole number : %s' % n_rounded))
sample_size_for_estimating_the_mean(level_of_confidence=.99, E=.2,sigma=2.15 )
# -
def get_critical_value(level_of_confidence):
critical_value = round(norm.ppf(1-((1-level_of_confidence)/2)),2)
return critical_value
from scipy.stats import t
def get_students_critical_value(level_of_confidence,n):
df = n-1
if(df > 30):
print("CHECK TABLE, NEED TO ADJUST DF FOR HOMEWORK")
adjusted_level_of_confidence = level_of_confidence + (1-level_of_confidence)/2
return(round(t.ppf(adjusted_level_of_confidence, df),3))
| Estimation Chapter 7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from transformers import AutoTokenizer
NTDBGPT2_tokenizer = AutoTokenizer.from_pretrained('dracoglacius/NTDB-GPT2')
from utils import *
from mdf import *
from outputs import *
# + [markdown] tags=[]
# ## Overview
#
# We have the intuition that given a distribution with center of the hyper-elliposid $c$ and the shape of the ellipsoid defined by $\Sigma$, $c$ and $\sigma$ should not deviate from the empirical mean ($\hat{c}$) and the covariance estimations ($\hat{\Sigma}$) taken from the training data.
#
# To obtain these estimates we need to:
#
# 1. Feed the NTDB model with the training data (length $n$) and from the last token get the features of each layer ($n$ x 13 x 768)
# 1. Calculate the sample mean ($\hat{c}$) and the covariance estimate ($\hat{\Sigma}$), while also getting the estimated pseudo-inverse (called `.precision_` in sklearn)
#
# To obtain the OOD estimation we need to:
#
# 1. Calculate the Mahalanobis Distance Feature (MDF) using the generated data (should be a vector equal to the number of layers)
# 1. Calculate the Anomaly Score
# 1. This is a one-class SVM with a linear kernel with MDF as features
# -
# # Configurations
PRINT_SEQUENCES = 20
TRN_FRAC = 0.9
# ## Load Data
# +
trn_seq = np.load("../data/25k_train_seqs_3_22_E8859_E8120_E9660_E9654_E9240.npy")
gen_seq = np.load("../data/25k_gen_seqs_3_22_E8859_E8120_E9660_E9654_E9240.npy")
e8120_trn_seq = filter_token_length([x for x in trn_seq if 'E812.0' in x], NTDBGPT2_tokenizer) # 5000 items
e8859_trn_seq = filter_token_length([x for x in trn_seq if 'E885.9' in x], NTDBGPT2_tokenizer) # 5000 items
e9660_trn_seq = filter_token_length([x for x in trn_seq if 'E966.0' in x], NTDBGPT2_tokenizer) # 5000 items
e9654_trn_seq = filter_token_length([x for x in trn_seq if 'E965.4' in x], NTDBGPT2_tokenizer) # 5000 items
e9240_trn_seq = filter_token_length([x for x in trn_seq if 'E924.0' in x], NTDBGPT2_tokenizer) # 5000 items
e8120_gen_seq = filter_token_length([x for x in gen_seq if 'E812.0' in x], NTDBGPT2_tokenizer) # 5000 items
e8859_gen_seq = filter_token_length([x for x in gen_seq if 'E885.9' in x], NTDBGPT2_tokenizer) # 5000 items
e9660_gen_seq = filter_token_length([x for x in gen_seq if 'E966.0' in x], NTDBGPT2_tokenizer) # 5000 items
e9654_gen_seq = filter_token_length([x for x in gen_seq if 'E965.4' in x], NTDBGPT2_tokenizer) # 5000 items
e9240_gen_seq = filter_token_length([x for x in gen_seq if 'E924.0' in x], NTDBGPT2_tokenizer) # 5000 items
# -
# # E812.0
# + tags=[]
e8120_report, e8120_border, e8120_model, e8120_trn, e8120_tst, e8120_ood = oc_svm_detector('e8120', e8120_trn_seq, e8120_gen_seq, TRN_FRAC, PRINT_SEQUENCES)
# -
e8120_gen_realistic = []
for i, p in enumerate(e8120_model.predict(e8120_ood)):
if p == 1:
e8120_gen_realistic.append({
'label': 'GEN',
'ecode': 'E8120',
'seq': string_seq_dsc(e8120_gen_seq[i])
})
# + tags=[]
#for s in e8120_trn_seq[:5]:
# print_seq_dsc(s)
# print()
# + tags=[]
#n = e8120_report[e8120_report['score']<e8120_border]
#
#for _,r in n[:5].iterrows():
# print(r['output'])
# + tags=[]
#w = e8120_report[e8120_report['score']>e8120_border]
#
#for _,r in w[:5].iterrows():
# print(r['output'])
# + [markdown] tags=[]
# # E885.9
# -
e8859_report, e8859_border, e8859_model, e8859_trn, e8859_tst, e8859_ood = oc_svm_detector('e8859', e8859_trn_seq, e8859_gen_seq, TRN_FRAC, PRINT_SEQUENCES)
e8859_gen_realistic = []
for i, p in enumerate(e8859_model.predict(e8859_ood)):
if p == 1:
e8859_gen_realistic.append({
'label': 'GEN',
'ecode': 'E8859',
'seq': string_seq_dsc(e8859_gen_seq[i])
})
# +
#for s in e8859_trn_seq[:5]:
# print_seq_dsc(s)
# print()
# +
#n = e8859_report[e8859_report['score']<e8859_border]
#
#for _,r in n[:5].iterrows():
# print(r['output'])
# +
#w = e8859_report[e8859_report['score']>e8859_border]
#
#for _,r in w[:5].iterrows():
# print(r['output'])
# -
# # E966.0
e9660_report, e9660_border, e9660_model, e9660_trn, e9660_tst, e9660_ood = oc_svm_detector('e9660', e9660_trn_seq, e9660_gen_seq, TRN_FRAC, PRINT_SEQUENCES)
e9660_gen_realistic = []
for i, p in enumerate(e9660_model.predict(e9660_ood)):
if p == 1:
e9660_gen_realistic.append({
'label': 'GEN',
'ecode': 'E9660',
'seq': string_seq_dsc(e9660_gen_seq[i])
})
# +
#for s in e9660_trn_seq[:5]:
# print_seq_dsc(s)
# print()
# +
#n = e9660_report[e9660_report['score']<e9660_border]
#
#for _,r in n[:5].iterrows():
# print(r['output'])
# +
#w = e9660_report[e9660_report['score']>e9660_border]
#
#for _,r in w[:5].iterrows():
# print(r['output'])
# -
# # E965.4
e9654_report, e9654_border, e9654_model, e9654_trn, e9654_tst, e9654_ood = oc_svm_detector('e9654', e9654_trn_seq, e9654_gen_seq, TRN_FRAC, PRINT_SEQUENCES)
e9654_gen_realistic = []
for i, p in enumerate(e9654_model.predict(e9654_ood)):
if p == 1:
e9654_gen_realistic.append({
'label': 'GEN',
'ecode': 'E9654',
'seq': string_seq_dsc(e9654_gen_seq[i])
})
# +
#for s in e9654_trn_seq[:5]:
# print_seq_dsc(s)
# print()
# +
#n = e9654_report[e9654_report['score']<e9654_border]
#
#for _,r in n[:5].iterrows():
# print(r['output'])
# +
#w = e9654_report[e9654_report['score']>e9654_border]
#
#for _,r in w[:5].iterrows():
# print(r['output'])
# -
# # E924.0
e9240_report, e9240_border, e9240_model, e9240_trn, e9240_tst, e9240_ood = oc_svm_detector('e9240', e9240_trn_seq, e9240_gen_seq, TRN_FRAC, PRINT_SEQUENCES)
e9240_gen_realistic = []
for i, p in enumerate(e9240_model.predict(e9240_ood)):
if p == 1:
e9240_gen_realistic.append({
'label': 'GEN',
'ecode': 'E9240',
'seq': string_seq_dsc(e9240_gen_seq[i])
})
# +
#for s in e9240_trn_seq[:5]:
# print_seq_dsc(s)
# print()
# +
#n = e9240_report[e9240_report['score']<e9240_border]
#
#for _,r in n[:5].iterrows():
# print(r['output'])
# +
#w = e9240_report[e9240_report['score']>e9240_border]
#
#for _,r in w[:5].iterrows():
# print(r['output'])
# -
e8120_trn_seq[0]
e8120_trn_seq_sample = [{'label': 'NTDB', 'ecode': 'E8120', 'seq': string_seq_dsc(x)} for x in e8120_trn_seq]
e8859_trn_seq_sample = [{'label': 'NTDB', 'ecode': 'E8859', 'seq': string_seq_dsc(x)} for x in e8859_trn_seq]
e9660_trn_seq_sample = [{'label': 'NTDB', 'ecode': 'E9660', 'seq': string_seq_dsc(x)} for x in e9660_trn_seq]
e9654_trn_seq_sample = [{'label': 'NTDB', 'ecode': 'E9654', 'seq': string_seq_dsc(x)} for x in e9654_trn_seq]
e9240_trn_seq_sample = [{'label': 'NTDB', 'ecode': 'E9240', 'seq': string_seq_dsc(x)} for x in e9240_trn_seq]
e8120_trn_seq_sample = pd.DataFrame(e8120_trn_seq_sample).sample(n=10, random_state=42).reset_index(drop=True)
e8859_trn_seq_sample = pd.DataFrame(e8859_trn_seq_sample).sample(n=10, random_state=42).reset_index(drop=True)
e9660_trn_seq_sample = pd.DataFrame(e9660_trn_seq_sample).sample(n=10, random_state=42).reset_index(drop=True)
e9654_trn_seq_sample = pd.DataFrame(e9654_trn_seq_sample).sample(n=10, random_state=42).reset_index(drop=True)
e9240_trn_seq_sample = pd.DataFrame(e9240_trn_seq_sample).sample(n=10, random_state=42).reset_index(drop=True)
e8120_gen_seq_sample = pd.DataFrame(e8120_gen_realistic).sample(n=10, random_state=42).reset_index(drop=True)
e8859_gen_seq_sample = pd.DataFrame(e8859_gen_realistic).sample(n=10, random_state=42).reset_index(drop=True)
e9660_gen_seq_sample = pd.DataFrame(e9660_gen_realistic).sample(n=10, random_state=42).reset_index(drop=True)
e9654_gen_seq_sample = pd.DataFrame(e9654_gen_realistic).sample(n=10, random_state=42).reset_index(drop=True)
e9240_gen_seq_sample = pd.DataFrame(e9240_gen_realistic).sample(n=10, random_state=42).reset_index(drop=True)
pd.concat([
e8120_trn_seq_sample,
e8859_trn_seq_sample,
e9660_trn_seq_sample,
e9654_trn_seq_sample,
e9240_trn_seq_sample,
e8120_gen_seq_sample,
e8859_gen_seq_sample,
e9660_gen_seq_sample,
e9654_gen_seq_sample,
e9240_gen_seq_sample,
]).to_json("../outputs/clinical_eval.json", orient='records', lines=True)
| sequence_ood_score_generated_end.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Your optional code here
#You can import some modules or create additional functions
def checkio(data: list) -> list:
#Your code here
#It's main function. Don't remove this function
#It's used for auto-testing and must return a result for check.
#replace this for solution
s = set(data)
for i in s:
if data.count(i) == 1:
data.remove(i)
return data
#Some hints
#You can use list.count(element) method for counting.
#Create new list with non-unique elements
#Loop over original list
if __name__ == "__main__":
#These "asserts" using only for self-checking and not necessary for auto-testing
assert list(checkio([1, 2, 3, 1, 3])) == [1, 3, 1, 3], "1st example"
assert list(checkio([1, 2, 3, 4, 5])) == [], "2nd example"
assert list(checkio([5, 5, 5, 5, 5])) == [5, 5, 5, 5, 5], "3rd example"
assert list(checkio([10, 9, 10, 10, 9, 8])) == [10, 9, 10, 10, 9], "4th example"
print("It is all good. Let's check it now")
| code/Home/Checkio_deluniq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DAVIDnHANG/DS-Unit-2-Regression-Classification/blob/master/module3/A3_regression_classification_TurnIN_W3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="BSA-IKWP0n4D" colab_type="code" outputId="f63c0da5-683a-4dbb-9c72-89007166333e" colab={"base_uri": "https://localhost:8080/", "height": 310}
pip install category_encoders
# + id="W_XjiI1PMN00" colab_type="code" colab={}
#imports here
import pandas as pd
import pandas_profiling
import datetime as dt
import numpy as np
# + id="0rOsZEci8W8u" colab_type="code" colab={}
# Read New York City property sales data
df = pd.read_csv('https://raw.githubusercontent.com/DAVIDnHANG/DS-Unit-2-Regression-Classification/master/data/condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int))
# + id="6ZwHgmzC-KW7" colab_type="code" colab={}
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# + id="aJd1H8cmel9O" colab_type="code" outputId="17b1c5c5-3737-4395-aeac-7b4293d5c119" colab={"base_uri": "https://localhost:8080/", "height": 279}
#restrict the data set between 100k to 2million of SALE_PRICE
#Turn the SALE PRICE, a object str, into a int.
df['SALE_PRICE'].astype(str).astype(int)
df['SALE_PRICE'] =df['SALE_PRICE'].replace(np.nan,0)
df=df.loc[(df['SALE_PRICE'] >= 100000) & (df['SALE_PRICE'] <= 2000000)]
df.head(3)
# + id="ziHOKz2ulrLT" colab_type="code" outputId="baf43f1c-719c-4793-cf63-165d1d2fcbfc" colab={"base_uri": "https://localhost:8080/", "height": 245}
#restrict the date set further into only '01 ONE FAMILY DWELLINGS'
df=df.loc[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS')]
df.head(3)
# + id="JcWjj7cb_IPz" colab_type="code" outputId="d61df4e3-8ed6-4f7d-e44f-cd893acb9996" colab={"base_uri": "https://localhost:8080/", "height": 417}
df.isnull().sum()
# + id="UP4AHsTOAqY9" colab_type="code" outputId="bf52f03f-bc78-4b19-8caf-80d49496bae0" colab={"base_uri": "https://localhost:8080/", "height": 417}
df.dtypes
# + [markdown] id="tAF5f1aJCMb0" colab_type="text"
# # ```Date
#
# let split the Data into two parts Jan 2019 - March 2019 and April 2019.
# Use the three step
#
# * Convert to numpy, pandas format
# * Mask
# * then .loc all dates; in other words, filter.
# Since we are training, testing, create test, train df
#
#
#
#
# + id="kLiwkoitAyRj" colab_type="code" outputId="385c6be5-5d03-40ea-ee21-61214bb6c21d" colab={"base_uri": "https://localhost:8080/", "height": 54}
#SALE_DATE is in the format DD/MM/YYYY
df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'])
start_date = '01/01/2019'
end_date = '03/30/2019'
mask = (df['SALE_DATE'] > start_date) & (df['SALE_DATE'] <= end_date)
train_Date_March_April = df.loc[mask]
print(train_Date_March_April.shape)
df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'])
start_date_test = '04/01/2019'
end_date_test = '04/30/2019'
mask_test = (df['SALE_DATE'] > start_date_test) & (df['SALE_DATE'] <= end_date_test)
test_April = df.loc[mask_test]
print(test_April.shape)
# + [markdown] id="i7is5tQZTeJ9" colab_type="text"
# # 1-hot encoding of categorical features
# why do this?
#
# * Let say we have a catagory column which has index can be represented T and F.
# * So we can make those index on that column be turned into a new column which has 0 = F and F=1.
# Now we can represents the categorical data.
#
#
# + id="w6tWm9tOLaxl" colab_type="code" outputId="4dced8f3-17a6-4f80-a1bd-21e94655ee50" colab={"base_uri": "https://localhost:8080/", "height": 92}
#Before encoding let see how many columns a particular column would make.
#InThisArea=train_Date_March_April.groupby('BUILDING_CLASS_CATEGORY').sum()
#print(InThisArea)
TAX_CLASS=train_Date_March_April.groupby('EASE-MENT').sum()
print(TAX_CLASS)
#BLOCK=train_Date_March_April.groupby('BLOCK').sum()
#print(BLOCK)
#RESIDENTIAL_UNITS=train_Date_March_April.groupby('RESIDENTIAL_UNITS').sum()
#print(RESIDENTIAL_UNITS)
#BUILDING_CLASS_AT_TIME_OF_SALE = train_Date_March_April.groupby('BUILDING_CLASS_AT_TIME_OF_SALE').sum()
#print(BUILDING_CLASS_AT_TIME_OF_SALE)
# + [markdown] id="Qberzm4PlNud" colab_type="text"
# # why show top?
#
#
# 1. To articulate how many columns we need to build.
# 2. If I "1-hot encode" blocks there will be 2065 new columns."
# 3. 1-hot encode building class will give me a dozen categoricals.
# 4. so let encode tax_class at present because we will only be making less than 6 categoricals.
#
#
# + [markdown] id="LPIyH1I453qz" colab_type="text"
# Before I can 1-hot encode there are some columns that I want to be left alone, but are object-string. need to convert them to floats.
# + id="rAUm3-vS-ki_" colab_type="code" outputId="5542a72d-7754-4d5c-8097-39ad7c3e5e68" colab={"base_uri": "https://localhost:8080/", "height": 255}
#converting col Land SQ Feet into float64
train_Date_March_April['LAND_SQUARE_FEET'] = train_Date_March_April['LAND_SQUARE_FEET'].str.replace(',','').astype(np.float64)
test_April['LAND_SQUARE_FEET'] = test_April['LAND_SQUARE_FEET'].str.replace(',','').astype(np.float64)
# + id="d3IT6a3na3xq" colab_type="code" colab={}
#let exclude catagory data with +6 cateogricals
target = 'SALE_PRICE'
high_cardiantlity = ['BOROUGH','NEIGHBORHOOD','BUILDING_CLASS_CATEGORY',
'BLOCK','LOT' ,'LOT', 'EASE-MENT','BUILDING_CLASS_AT_PRESENT', 'BUILDING_CLASS_AT_PRESENT',
'ADDRESS','ADDRESS','APARTMENT_NUMBER' ,'APARTMENT_NUMBER', 'ZIP_CODE', 'BUILDING_CLASS_AT_TIME_OF_SALE',
'ZIP_CODE', 'GROSS_SQUARE_FEET']
features = train_Date_March_April.columns.drop(high_cardiantlity + [target])
#train
X_train = train_Date_March_April[features]
y_train = train_Date_March_April[target]
#test
X_test = test_April[features]
y_test = test_April[target]
# + id="bVslfTKUtbgc" colab_type="code" colab={}
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names=True)
##train
X_train = encoder.fit_transform(X_train)
###Test
X_test = encoder.transform(X_test)
# + id="8ptS81HA42RE" colab_type="code" outputId="2d533463-67ab-4f7f-c1ce-d4f5ce8490ee" colab={"base_uri": "https://localhost:8080/", "height": 108}
X_test.columns
# + [markdown] id="oJYYG6UL_7kc" colab_type="text"
# # Do feature selection with SelectKBest.
#
# #we get to make engineer -features before we use SelectKBest,
# #@which randomly magically picked K features for us.
# + id="QeX7-Q8ORtRU" colab_type="code" colab={}
train_Date_March_April['GROSS_SQUARE_FEET']
# + [markdown] id="W65V7D-IVV-i" colab_type="text"
# Now what is important? in other words what is not important?
#
# * Borough Bronx, brooklyn, manhattan, queens
# * NEIGHBORHOOD - Lower manhattan, midtown manhattan, soho
#
# * BUILDING_CLASS_CATEGORY
#
# * the whole dataframe can't be read because it either a string,or .dot
#
# + id="CZ1o9oqqdN2y" colab_type="code" colab={}
train_Date_March_April.columns
# + id="Sbfm_3p1OH0U" colab_type="code" colab={}
#train_Date_March_April
#test_April
def engineer_features(X):
X = X.copy()
#name all features
name_all = ['RESIDENTIAL_UNITS', 'COMMERCIAL_UNITS', 'TOTAL_UNITS',
'LAND_SQUARE_FEET', 'GROSS_SQUARE_FEET',
'TAX_CLASS_AT_TIME_OF_SALE', 'BUILDING_CLASS_AT_TIME_OF_SALE',
]
X['perk_count'] = X[name_all].sum(axis=1)
return X
XUni_train = engineer_features(train_Date_March_April)
XUni_test = engineer_features(test_April)
# + id="sldkHQH9ex2h" colab_type="code" colab={}
drop = ['LOT', 'ADDRESS', 'APARTMENT_NUMBER', 'ZIP_CODE', 'YEAR_BUILT','BUILDING_CLASS_CATEGORY','BLOCK','BOROUGH', 'NEIGHBORHOOD','TAX_CLASS_AT_PRESENT', 'EASE-MENT','BUILDING_CLASS_AT_PRESENT', 'RESIDENTIAL_UNITS', 'COMMERCIAL_UNITS', 'TOTAL_UNITS','LAND_SQUARE_FEET', 'GROSS_SQUARE_FEET', 'TAX_CLASS_AT_TIME_OF_SALE','BUILDING_CLASS_AT_TIME_OF_SALE']
XUni_train=XUni_train.columns.drop(drop)
XUni_test=XUni_test.columns.drop(drop)
# + id="qu_ZOtBignZK" colab_type="code" outputId="45c248f8-f849-443d-c4ac-17bdac1f25ff" colab={"base_uri": "https://localhost:8080/", "height": 34}
XUni_train
# + id="apH0RZo-I58G" colab_type="code" colab={}
#will not be using feature eng for selector
#remove dates
from sklearn.feature_selection import f_regression, SelectKBest
#similar API to what we've seen before
selector = SelectKBest(score_func=f_regression,k=8)
#fit_transform on the train set
#.transform on test set
X_train_selected = selector.fit_transform(XUni_train, y_train)
X_test_selected = selector.fit_transform(XUni_test)
X_train_selected.shape, X_test_selected.shape
# + id="yFBP0ARQd-jR" colab_type="code" colab={}
XUni_train.dtypes
# + id="t-1DiBmnOq8W" colab_type="code" colab={}
| module3/A3_regression_classification_TurnIN_W3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: r
# ---
# ### Plot t-distribution
options(repr.plot.width=5, repr.plot.height=5)
x <- seq(-5, 5, length=100)
dfree <- 5-1
hx <- dt(x, df=dfree)
title <- sprintf("t Distribution t(%d)", dfree)
plot(x, hx, type="l", lty=1, xlab="x value",
ylab="Density", main=title)
dfree <- 10-1
title <- sprintf("t Distribution t(%d)", dfree)
plot(x, hx, type="l", lty=1, xlab="x value",
ylab="Density", main=title)
dfree <- 30-1
hx <- dt(x, df=dfree)
title <- sprintf("t Distribution t(%d)", dfree)
plot(x, hx, type="l", lty=1, xlab="x value",
ylab="Density", main=title)
| t_distribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="DcngT5T1reef" colab_type="code" colab={}
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers import Conv1D, MaxPooling1D, Input, Flatten
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
# + id="KCbpCRKcreek" colab_type="code" colab={}
import pandas as pd
benign_domain = pd.read_csv('benign_domians.csv')
dga_domain = pd.read_csv('360_dga_domains.csv')
bambenek_dga_domain = pd.read_csv('bambenek_dga_domains.csv')
# + id="OWW-ZnCzsNHO" colab_type="code" colab={}
benign_domains = benign_domain['Domain'].tolist()
dga_domains = dga_domain['Domain'].tolist() + bambenek_dga_domain['Domain'].tolist()
# + [markdown] id="28CmJQC-reem" colab_type="text"
# ### Build CNN model
# + [markdown] id="VrBscbbNreen" colab_type="text"
# One CNN module, use adam as optimizer,add regularizer 0.01(larger than ex5)
# + id="wjqRZP5Xwmk1" colab_type="code" colab={}
from keras import regularizers
rglrzr = regularizers.l2(0.01)
# + id="05hYlhnYreeo" colab_type="code" colab={}
def build_model(max_features_num, maxlen):
"""Build CNN model"""
model = Sequential()
model.add(Conv1D(64, 3, strides=1,
input_shape=(maxlen,1),
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation='relu',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=rglrzr,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None))
model.add(MaxPooling1D(pool_size=2,
strides=None,
padding='valid',
data_format='channels_last'))
model.add(Dropout(0.5))
'''
model.add(Conv1D(32, 3, strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation='relu',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None))
model.add(MaxPooling1D(pool_size=2,
strides=None,
padding='valid',
data_format='channels_last'))
model.add(Dropout(0.3))
'''
model.add(Flatten())
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['binary_crossentropy','acc'])
return model
# + [markdown] id="wAycToKOreeq" colab_type="text"
# ### Load data
# + id="9TEPfchvrees" colab_type="code" colab={}
X = benign_domains + dga_domains
# + id="7TSRb_8oreeu" colab_type="code" colab={}
# Generate a dictionary of valid characters
unique_chars = enumerate(set(''.join(X)))
chars_dict = dict()
for i, x in unique_chars: #index of enum starts with 0
#print('i: ' + str(i) + ' x: ' + x)
chars_dict[x] = i + 1 #leave 0 for padding
# + id="fPSiQNORreex" colab_type="code" colab={}
#index 0 is also going to be a feature(padding/unknown).
max_features_num = len(chars_dict) + 1
#max length will be the max length of domain in our dataset
maxlen = np.max([len(x) for x in X])
# + id="QM00tUL-ree0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="4b282f81-ce43-488a-bfeb-6cb763bad9e2"
maxlen
# + id="udj4eS_xree3" colab_type="code" colab={}
# Convert characters to int
X_in_int = []
for domain in X:
domain_in_int = []
for c in domain:
domain_in_int.append(chars_dict[c])
X_in_int.append(domain_in_int)
# + id="gioiR5jRree6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="cb444885-1474-41d8-8c62-b1b3b8a9206f"
X_in_int[1]
# + id="OTMwxxR6refA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="75bc9341-74ad-4657-8133-3b82467b750d"
X[1]
# + id="pEakL4JdrefD" colab_type="code" colab={}
#update X
X = X_in_int
# + id="yGlfRhF8refG" colab_type="code" colab={}
#pad to max length
X = sequence.pad_sequences(X, maxlen=maxlen)
# + id="g0xSsqEsrefJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="7d3d80f9-1759-434e-e36f-18d66703e0c1"
X.shape
# + id="fB8hMNRVrefN" colab_type="code" colab={}
#Generate corresponding Y, 0 for 'benign'; 1 for 'dga'
Y = np.hstack([np.zeros(len(benign_domains)),np.ones(len(dga_domains))])
# + [markdown] id="5ffqcgZArefQ" colab_type="text"
# ### Train the model
# Beacuse of the huge size of data, we decided to resample to get a smaller set for training.
# + [markdown] id="h5mVdgacrefR" colab_type="text"
# Because of the size of benigns domains is 1M, the cut point will be the index of the last benigns domain, which will be:
# + id="dGoyOfkHrefR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="09150a00-9ae9-4aa9-b396-ab8e31f4ac6a"
pos_neg_cutpoint = len(benign_domains)
print("The cut point will be "+ str(pos_neg_cutpoint))
# + [markdown] id="scOALQG0refU" colab_type="text"
# Now, randomly select the same amount of domains from both benigns and dgas.
# + id="AdaiHDsSrefV" colab_type="code" colab={}
import random
# + id="8nWbbulxrefX" colab_type="code" colab={}
#set new sampling szie as 50K
sampling_size = 150000
pos_indices = random.sample(range(pos_neg_cutpoint),sampling_size)
neg_indices = random.sample(range(pos_neg_cutpoint, len(X)),sampling_size)
# + id="hN2l2GPTrefZ" colab_type="code" colab={}
new_X = X[pos_indices + neg_indices]
new_Y = Y[pos_indices + neg_indices]
# + id="hx6CRDBFrefb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="6cf25f3c-ccf0-448f-d07d-302ab7b97601"
new_X.shape
# + id="Otwx6GAoreff" colab_type="code" colab={}
newnew_x = np.reshape(new_X,(300000,73,1))
# + id="88aIouMsrefl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="c6199dfc-0557-4a1f-ffab-6723b19e2408"
newnew_x.shape
# + id="winEou6Trefn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="46178ddf-ec8b-4d84-c3ec-d9f7ed64a8b8"
len(new_X)
# + [markdown] id="tYBLazl1refq" colab_type="text"
# #### Fit the model
# + id="b9AdMqAwrefq" colab_type="code" colab={}
#training parameters
max_epoch=25
nfolds=10
batch_size=128
# + id="v55Na3o-reft" colab_type="code" colab={}
#call backs
from keras.callbacks import EarlyStopping
cb = []
cb.append(EarlyStopping(monitor='val_loss',
min_delta=0, #an absolute change of less than min_delta, will count as no improvement
patience=5, #number of epochs with no improvement after which training will be stopped
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False))
# + id="xjQVayyirefw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 301} outputId="5c4b4c7a-0f1f-4bfe-a031-91b697c7881f"
model = build_model(max_features_num, maxlen)
model.summary()
# + id="T90Jkt_uref0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 555} outputId="f8cca9fb-412c-49e2-889a-97e3ae16b719"
history = model.fit(x=newnew_x, y=new_Y,
batch_size=batch_size,
epochs=max_epoch,
verbose=1,
callbacks=cb,
validation_split=0.2, #
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None)
# + id="n3wSCPXBref2" colab_type="code" colab={}
model.save('CNN_on_300K')
# + [markdown] id="rU6LGfPiref3" colab_type="text"
# #### Plot training & validation accuracy values
# + id="rPVEhFG6ref4" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + id="G5QefLEQref5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 376} outputId="cda57501-c1e6-40dc-d461-055163a00971"
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# + id="mMFmt9EFref8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 376} outputId="73b30d05-9be3-4a4a-d80a-be9560210889"
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# + id="XxHee_6yref-" colab_type="code" colab={}
# + id="0Ken0x2HregA" colab_type="code" colab={}
# + id="UY1NjmgtregE" colab_type="code" colab={}
| experiments/CNN_EX6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nltk
import sklearn as sk
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
filename = "task1.train.txt"
# filename = "task1.train.txt"
df = pd.DataFrame(columns=["text","article_id","propaganda"])
# +
x = []
with open(filename,"r") as f:
# x = f.readline()
lines = f.readlines()
for i in lines:
i = i.split("\t")
x.append(i)
# x = f.readline().split()
# -
df = pd.DataFrame(x, columns=["text","article_id","propaganda"])
df
df['propaganda'] = df['propaganda'].map({'propaganda\n': 1, 'non-propaganda\n': 0})
df.describe(include='all')
counts = df.propaganda.value_counts()
counts
len(df)
# +
# This is a highly skewed dataset. There are only 4021
print("Number of Propaganda items", counts[1])
print("Number of Not Propaganda items", counts[0])
print("Total number of records", len(df))
percentage = (counts[1]*100)/len(df)
print("percentage of propaganda items {0:.2f}%".format(percentage))
# -
values = df['propaganda'].value_counts().keys().tolist()
counts = df['propaganda'].value_counts().tolist()
for i in values:
print(i)
# +
# printing some random posts
sent_0 = df['text'].values[0]
print(sent_0)
print("="*50)
sent_1000 = df['text'].values[1000]
print(sent_1000)
print("="*50)
sent_1500 = df['text'].values[1500]
print(sent_1500)
print("="*50)
sent_4900 = df['text'].values[4900]
print(sent_4900)
print("="*50)
# +
# Doing some basic preprocessing
# https://stackoverflow.com/a/47091490/4084039
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
# +
#NLTK STOPWORDS
from nltk.corpus import stopwords
stopWords = set(stopwords.words('english'))
# MY STOPWORDS
stopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't", "no", "nor","not"])
# -
# Combining all the above stundents
from tqdm import tqdm
preprocessed_text = []
# tqdm is for printing the status bar
for sentance in tqdm(df['text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_text.append(sentance.strip())
preprocessed_text[0]
# Writing the preprocessed text back into the Dataframe Final
df["Cleaned_Text"] = preprocessed_text
final = df
final.head()
# # Splitting into Train and Test
restart_here = final.copy()
# +
# Ordering the dataset
final = restart_here
# Splitting the dataset into Train, Test.
# Let's go for an 80 - 20 split.
labels = final["propaganda"]
finals = final.drop(columns=["propaganda"], axis=1, inplace=False)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(finals, labels, test_size=0.2, random_state=42)
print(len(X_train))
print(len(X_test))
# -
finals.head()
# # BAG OF WORDS
# +
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
# -
# +
#BoW
count_vect = CountVectorizer() #in scikit-learn
count_vect.fit(preprocessed_text)
print("some feature names ", count_vect.get_feature_names()[:10])
print('='*50)
final_counts = count_vect.transform(preprocessed_text)
print("the type of count vectorizer ",type(final_counts))
print("the shape of out text BOW vectorizer ",final_counts.get_shape())
print("the number of unique words ", final_counts.get_shape()[1])
# -
from sklearn.neighbors import KNeighborsClassifier
# +
# Please write all the code with proper documentation
def bag_of_words(train_data, test_data, validation_data = None):
count_vect = CountVectorizer() #in scikit-learn
count_vect.fit(train_data)
print("some feature names ", count_vect.get_feature_names()[:10])
print('='*50)
train_data_vectorized = count_vect.transform(train_data)
test_data_vectorized = count_vect.transform(test_data)
if validation_data:
validation_data_vectorized = count_vect.tranform(validation_data)
else:
validation_data_vectorized = None
print("the type of count vectorizer ",type(train_data_vectorized))
print("the shape of out text BOW vectorizer ",train_data_vectorized.get_shape())
print("the number of unique words ", train_data_vectorized.get_shape()[1])
return train_data_vectorized, test_data_vectorized
# -
train_data_vectorized , test_data_vectorized = bag_of_words(X_train["text"], X_test["text"])
# +
from sklearn.model_selection import GridSearchCV
knn = KNeighborsClassifier()
n_list = range(1,10,2)
parameters = {"n_neighbors":n_list}
clf = GridSearchCV(knn, parameters, cv=3, verbose=2, n_jobs=2)
clf.fit(train_data_vectorized,y_train)
# -
clf.best_params_
clf2 = KNeighborsClassifier(n_neighbors=7)
clf2.fit(train_data_vectorized,y_train)
clf2.score(test_data_vectorized,y_test)
y_pred = clf2.predict(test_data_vectorized)
y_true = y_test
# +
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
print(f1_score(y_true, y_pred))
print(recall_score(y_true, y_pred))
print(precision_score(y_true, y_pred))
# >>> y_true = [0, 1, 2, 0, 1, 2]
# >>> y_pred = [0, 2, 1, 0, 0, 1]
# >>> f1_score(y_true, y_pred, average='macro')
# -
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
train_data_vectorized_dense = train_data_vectorized.toarray()
test_data_vectorized_dense = test_data_vectorized.toarray()
gnb.fit(train_data_vectorized_dense,y_train)
gnb_pred = gnb.predict(test_data_vectorized_dense,y_test)
# +
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
print(f1_score(y_true, gnb_pred))
print(recall_score(y_true, gnb_pred))
print(precision_score(y_true, gnb_pred))
# >>> y_true = [0, 1, 2, 0, 1, 2]
# >>> y_pred = [0, 2, 1, 0, 0, 1]
# >>> f1_score(y_true, y_pred, average='macro')
# -
from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC(gamma="scale")
>>> clf = GridSearchCV(svc, parameters, cv=5)
>>> clf.fit(iris.data, iris.target)
...
GridSearchCV(cv=5, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params=None, iid=..., n_jobs=None,
param_grid=..., pre_dispatch=..., refit=..., return_train_score=...,
scoring=..., verbose=...)
>>> sorted(clf.cv_results_.keys())
...
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'mean_train_score', 'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split0_train_score', 'split1_test_score', 'split1_train_score',...
'split2_test_score', 'split2_train_score',...
'std_fit_time', 'std_score_time', 'std_test_score', 'std_train_score'...]
| .ipynb_checkpoints/Datathon-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.053004, "end_time": "2021-12-28T08:38:39.779816", "exception": false, "start_time": "2021-12-28T08:38:39.726812", "status": "completed"} tags=[]
# # Commodity skewness
#
# This notebook analyses commodity cross-sectional skewness strategy. The strategy takes long positions on contracts with most negative historical skewness and short positions on ones with most positive skewness.
# + papermill={"duration": 2.846246, "end_time": "2021-12-28T08:38:42.676055", "exception": false, "start_time": "2021-12-28T08:38:39.829809", "status": "completed"} tags=[]
# %matplotlib inline
from datetime import datetime
import logging
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.style.use('bmh')
from vivace.backtest import signal
from vivace.backtest import processing
from vivace.backtest.contract import all_futures_hollstein2020
from vivace.backtest.engine import BacktestEngine
from vivace.backtest.enums import Strategy
from vivace.backtest.stats import Performance
# + [markdown] papermill={"duration": 0.054632, "end_time": "2021-12-28T08:38:42.784517", "exception": false, "start_time": "2021-12-28T08:38:42.729885", "status": "completed"} tags=[]
# # Data
#
# 26 commodity futures are used as per Hollstein 2020.
# + papermill={"duration": 0.068195, "end_time": "2021-12-28T08:38:42.902854", "exception": false, "start_time": "2021-12-28T08:38:42.834659", "status": "completed"} tags=[]
all_futures_hollstein2020
# + papermill={"duration": 0.068825, "end_time": "2021-12-28T08:38:43.030631", "exception": false, "start_time": "2021-12-28T08:38:42.961806", "status": "completed"} tags=[]
all_futures_hollstein2020.shape
# + [markdown] papermill={"duration": 0.052731, "end_time": "2021-12-28T08:38:43.141587", "exception": false, "start_time": "2021-12-28T08:38:43.088856", "status": "completed"} tags=[]
# # Performance
# + [markdown] papermill={"duration": 0.052555, "end_time": "2021-12-28T08:38:43.246699", "exception": false, "start_time": "2021-12-28T08:38:43.194144", "status": "completed"} tags=[]
# ## Run backtest
#
# A simple portfolio is constructed by using trailing 1-year returns of each commodity futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis.
#
# Similar to other popular academic strategies, this commodity momentum signal has been stalling since around 2015.
# + papermill={"duration": 272.141536, "end_time": "2021-12-28T08:43:15.441711", "exception": false, "start_time": "2021-12-28T08:38:43.300175", "status": "completed"} tags=[]
engine = BacktestEngine(
strategy=Strategy.DELTA_ONE.value,
instrument=all_futures_hollstein2020.index,
signal=signal.XSSkewness(lookback=252,
post_process=processing.Pipeline([
processing.Negate(),
processing.AsFreq(freq='m', method='pad')
])),
log_level=logging.WARN,
)
engine.run()
# + papermill={"duration": 100.136211, "end_time": "2021-12-28T08:44:55.631461", "exception": false, "start_time": "2021-12-28T08:43:15.495250", "status": "completed"} tags=[]
portfolio_return = (engine.calculate_equity_curve(calculate_net=False)
.rename('Commodity skewness portfolio'))
# + papermill={"duration": 0.607431, "end_time": "2021-12-28T08:44:56.312478", "exception": false, "start_time": "2021-12-28T08:44:55.705047", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
portfolio_return.plot(ax=ax, logy=True);
ax.set_title('Commodity skewness portfolio')
ax.set_ylabel('Cumulative returns');
# + papermill={"duration": 0.138579, "end_time": "2021-12-28T08:44:56.509926", "exception": false, "start_time": "2021-12-28T08:44:56.371347", "status": "completed"} tags=[]
portfolio_return.pipe(Performance).summary()
# + [markdown] papermill={"duration": 0.058445, "end_time": "2021-12-28T08:44:56.627446", "exception": false, "start_time": "2021-12-28T08:44:56.569001", "status": "completed"} tags=[]
# ## Post publication
# + papermill={"duration": 0.84118, "end_time": "2021-12-28T08:44:57.527453", "exception": false, "start_time": "2021-12-28T08:44:56.686273", "status": "completed"} tags=[]
publication_date = datetime(2018, 1, 11)
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
portfolio_return.plot(ax=ax[0], logy=True);
ax[0].set_title('Commodity skewness portfolio')
ax[0].set_ylabel('Cumulative returns');
ax[0].axvline(publication_date, lw=1, ls='--', color='black')
ax[0].text(publication_date, 100, 'Publication date ', ha='right')
portfolio_return.loc[publication_date:].plot(ax=ax[1], logy=True);
ax[1].set_title('Commodity skewness portfolio (post publication)');
# + [markdown] papermill={"duration": 0.069077, "end_time": "2021-12-28T08:44:57.665317", "exception": false, "start_time": "2021-12-28T08:44:57.596240", "status": "completed"} tags=[]
# Fernandez-Perez et al (2018) only uses data between 1987 and 2014. And in fact the chart below looks better than the full sample period. This result suggests that the skewness signal is weaker than others (e.g., trend, carry) even if it's a priced risk.
# + papermill={"duration": 0.46302, "end_time": "2021-12-28T08:44:58.196029", "exception": false, "start_time": "2021-12-28T08:44:57.733009", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
portfolio_return['1987':'2014'].plot(ax=ax, logy=True);
ax.set_title('Commodity skewness portfolio')
ax.set_ylabel('Cumulative returns');
# + [markdown] papermill={"duration": 0.063686, "end_time": "2021-12-28T08:44:58.323699", "exception": false, "start_time": "2021-12-28T08:44:58.260013", "status": "completed"} tags=[]
# ## Recent performance
# + papermill={"duration": 0.495575, "end_time": "2021-12-28T08:44:58.884521", "exception": false, "start_time": "2021-12-28T08:44:58.388946", "status": "completed"} tags=[]
fig, ax = plt.subplots(figsize=(8, 4.5))
portfolio_return.tail(252 * 2).plot(ax=ax, logy=True);
ax.set_title('Commodity skewness portfolio')
ax.set_ylabel('Cumulative returns');
# + [markdown] papermill={"duration": 0.066481, "end_time": "2021-12-28T08:44:59.017808", "exception": false, "start_time": "2021-12-28T08:44:58.951327", "status": "completed"} tags=[]
# # Reference
# - <NAME>. and <NAME>., 2019. Cross-Asset Skew. Available at SSRN.
# - <NAME>., <NAME>., <NAME>. and <NAME>., 2018. The skewness of commodity futures returns. Journal of Banking & Finance, 86, pp.143-158.
# - <NAME>., <NAME>. and <NAME>., 2020. Anomalies in commodity futures markets: Risk or mispricing?. Available at SSRN.
# + papermill={"duration": 0.072497, "end_time": "2021-12-28T08:44:59.157508", "exception": false, "start_time": "2021-12-28T08:44:59.085011", "status": "completed"} tags=[]
print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}')
| commodity_skewness.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# <h2>Exploratory Data Analysis Notebook</h2>
# <br>
# Let's begin by importing the necessary libraries and modules.
# + id="AmIFEboc622k"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
import os
# -
# We set a few figure parameters for the <i>matplotlib</i> plots.
# + id="Upr02oLypUkT"
plt.rcParams.update({'figure.figsize': (10.0, 30.0)})
plt.rcParams.update({'font.size': 14})
# -
# We download the necessary files from <i>Google Drive.</i>
# <br>
# <br>
# The local directory is not utilised since this notebook was created on <i>Google Colaboratory</i> and hence, having the data on the clould and downloading them whenever required was the better option.
# + colab={"base_uri": "https://localhost:8080/"} id="IaKUXf6C9HOd" outputId="123d8047-b736-4e4c-8b9c-c111fb07c4fe"
# !gdown https://drive.google.com/uc?id=1oCIVZbIWmg2RvcCHcxko5alyZh19nGoF
# !gdown https://drive.google.com/uc?id=1vCS6gDbsS3mVE5WLHM0nu-ww0GcI1ybN
# !gdown https://drive.google.com/uc?id=10_pO2fGiAKSdNjO-p3d1dFo3VuY1juL5
# !gdown https://drive.google.com/uc?id=1kSH1dteCOdFT5eFFM-1DCdbl7TZ4gXzS
# !gdown https://drive.google.com/uc?id=1nRKTC6Pez1fRAMYkFIHQNLfB0Hhj5fok
# !gdown https://drive.google.com/uc?id=1lA19-bvYJ_btc1WSXpKqU9L6nD3dpUjR
# !gdown https://drive.google.com/uc?id=1Av_X9N5BB38edPLK0vtYIBQ9toUdDB1e
# !gdown https://drive.google.com/uc?id=11u-MTRj8qiqC0rsB23F37lc4MjNh3Fhk
# -
# We now have all the datasets <i>(Monday to Friday)</i> in our pressent working directory.
# <br>
# <br>
# Since this is the initial stage of the EDA process, let us examine a random dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 275} id="Axl6yCeXdNwQ" outputId="46bb8b9d-daba-4c77-98b3-c9e38ea4badb"
df = pd.read_csv("Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv")
df.head()
# -
# Let's get into the <i>schema</i> of the data.
# + colab={"base_uri": "https://localhost:8080/"} id="6TbT2thGfgY-" outputId="45896498-1f28-4019-a11c-26a04c2e75c5"
df.info()
# -
# We notice that for every few columns, the column names have an <i>extra " "</i> in the beginning.<br>
# Thus, let's add it to our checklist that we should rectify this.<br>
# <br>
# Let's now move on with the statistics of the data.
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="8z1bawixfk9W" outputId="7b10f164-cf4f-42f5-a9fe-b309955ff95a"
df.describe()
# -
# We notice that two columns <i>(Flow Bytes/s and Flow Packets/s)</i> have <i>NULL</i> and <i>INF</i> values in them.<br>
# Let's add this to our checklist as well.<br>
# <br>
# Hence, our checklist contains of two tasks:
# - Rectify column names
# - Remove invalid data
# <br>
# <br>
# Let's start with them
# + id="W9B4E9jifj7j"
cols = list(df.columns)
for col in cols:
if col[0] == ' ':
df = df.rename(columns={col: col[1:]})
cols = list(df.columns)
# -
# Since the number of NULL and INF records are very few, we can go ahead and delete the records since they may not amount to much of a difference.
# + id="nbYzBUWpfIiB"
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(inplace=True)
df = df.reset_index(drop=True)
# -
# Now that we have a fairly clean dataframe, we can go ahead with further analytics.<br>
# <br>
# Let's start with understanding by what magnitude each feature is relevant to the infiltration attacks.<br>
# There are several ways to do this:
# - Correlation Matrix
# - Gini Importance
# - Permutation based Feature Importance
# - SHAP (SHapley Additive exPlanations)
# <br>
# <br>
# Let's go ahead with the classic Gini Importance using Random Forest Regression.
# + id="18S6JdlZeSbs"
X = df[cols[:-1]]
y = df['Label']
# -
# We use <i>Label Encoder</i> to convert our String labels to integers to enable training the Regressor. (Machines only understand numbers)
# + id="CjjRBaK7f2W_"
le = LabelEncoder()
y = le.fit_transform(y)
# + colab={"base_uri": "https://localhost:8080/"} id="VqwDZYOPe2mP" outputId="824e6195-643d-4304-a83a-95a1e0c2c338"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=12)
rf = RandomForestRegressor(n_estimators=100)
rf.fit(X_train, y_train)
# -
# Now that our Regressor is ready, let's plot the importance bar graph in descending order of importance.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="OZSV78AFgdSY" outputId="72d4d2d3-f9ba-42bf-ddcb-a223a9d004a8"
feature_names = cols[: -1]
sorted_idx = rf.feature_importances_.argsort()
plt.barh(np.array(feature_names)[sorted_idx], rf.feature_importances_[sorted_idx])
plt.xlabel("Random Forest Feature Importance")
plt.ylabel("Features")
plt.title("Feature Importances plot")
# -
# With this plot, we obtain insights about how each column affects the malware detection process.<br>
# The few most important features are:
# - Destination Port
# - Subflow Fwd Bytes
# - Total Length of Fwd Packets
# These results are extremely similar to the ones produced by the researchers in the research article.<br>
# <br>
# Similarly, this process can be utilized to obtain the important features for individual malware categories <i>(the results of which are stated in the article)</i>.<br>
# <br>
# Let's move ahead by simply merging all the datasets to create one master dataframe.
# + colab={"base_uri": "https://localhost:8080/"} id="MWYKM4ko9Hy9" outputId="65aa8df8-b8e0-4ebc-d86e-aa9210a96590"
datasets = os.listdir()
datasets.remove('.config')
datasets.remove('sample_data')
datasets
# + id="mv-sEvOI9MU1"
df = pd.read_csv(datasets[0])
for i in range(1, len(datasets)):
df_inter = pd.read_csv(datasets[i])
df = pd.concat([df, df_inter], axis=0)
cols = list(df.columns)
df = df.sample(frac = 1)
# -
# Let's perform similar analysis as done earlier.
# + colab={"base_uri": "https://localhost:8080/", "height": 275} id="n7JWuWwb9PnD" outputId="9696242e-828e-4fc4-db59-b8b004cf631f"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="EB78XsW_9RJr" outputId="b0400c4d-bcb2-4716-d0d2-25167e696f09"
df.info()
# + id="Eqk0jZbEpGZM"
cols = list(df.columns)
for col in cols:
if col[0] == ' ':
df = df.rename(columns={col: col[1:]})
cols = list(df.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="fiXJdxP79S3D" outputId="41e2ee19-5169-46d6-ff42-95a8ce49dda4"
df.describe()
# -
# Let's now look at the number of unique values in each column.
# + colab={"base_uri": "https://localhost:8080/"} id="aTRWLn729W_E" outputId="21152e82-8ff6-4434-cb5c-005fb53abbf0"
print(df.nunique())
print()
for col in df:
if(df[col].nunique()<20):
print(col, ": ",df[col].unique())
# -
# We notice that the <i>Label</i> column various <i>Web</i> attacks stated.<br>
# But, they work in similar ways (according to the dataset) and hence, grouping them as 'Web' would be a good idea.
# + colab={"base_uri": "https://localhost:8080/"} id="WFviNG4_9ZLc" outputId="44b3481a-65c2-46e3-d68e-03671838fe01"
df.iloc[:,-1] = df.iloc[:, -1].apply(lambda x : 'Web' if x[:3] == 'Web' or x[:3] == 'Sql' else x)
df.iloc[:, -1].value_counts()
# -
# We have an idea of how our data looks like. Hence, the EDA process is complete.<br>
# Let's move ahead with the ETL process now.
| EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp losses
# -
# # Losses
#
# > Custom loss functions.
#hide
from nbdev.showdoc import *
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
# export
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from htools import add_docstring, valuecheck, identity
from incendio.layers import SmoothLogSoftmax
# +
# Used for testing only.
from fastai2.metrics import LabelSmoothingCrossEntropy
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
from htools import assert_raises, InvalidArgumentError
# -
# export
def smooth_soft_labels(labels, alpha=.1):
"""Add uniform probability to a tensor of soft labels or
one-hot-encoded hard labels. Classes with zero probability
will therefore end up with alpha/k probability, where k is
the total number of classes. Note that some implementations
may use alpha/(k-1), so if you notice a difference in output
that could be the source.
Parameters
----------
labels: torch.tensor
Tensor of labels where each row is a sample and each column
is a class. These can be one hot encodings or a vector of soft
probabilities. Shape (batch_size, num_classes).
alpha: float
A positive value which will be used to assign nonzero
probabilities to the classes that are currently zeros. A larger
alpha corresponds to a higher degree of smoothing (useful when
accounting for noisier labels, trying to provide a stronger
regularizing effect, or encouraging less confident predictions).
"""
if alpha < 0:
raise InvalidArgumentError('Alpha must be non-negative.')
# Avoid unnecessary computation.
if not alpha:
return labels
length = labels.shape[-1]
nonzeros = (labels > 0).sum(-1).unsqueeze(-1).float()
return torch.clamp_min(labels - alpha/nonzeros, 0) + alpha/length
# export
def soft_label_cross_entropy_with_logits(y_pred, y_true, alpha=0.0,
reduction='mean'):
"""Compute cross entropy with soft labels. PyTorch's built in
multiclass cross entropy functions require us to pass in integer
indices, which doesn't allow for soft labels which are shaped like
a one hot encoding. FastAI's label smoothing loss uniformly divides
uncertainty over all classes, which again does not allow us to pass
in our own soft labels.
Parameters
----------
y_pred: torch.FloatTensor
Logits output by the model.
Shape (bs, num_classes).
y_true: torch.FloatTensor
Soft labels, where values are between 0 and 1.
Shape (bs, num_classes).
alpha: float
Label smoothing hyperparameter: a positive value which will be used to
assign nonzero probabilities to the classes that are currently zeros.
A larger alpha corresponds to a higher degree of smoothing (useful when
accounting for noisier labels, trying to provide a stronger
regularizing effect, or encouraging less confident predictions).
reduction: str
One of ('mean', 'sum', 'none'). This determines how to reduce
the output of the function, similar to most PyTorch
loss functions.
Returns
-------
torch.FloatTensor: If reduction is 'none', this will have shape
(bs, ). If 'mean' or 'sum', this will be be a tensor with a
single value (no shape).
"""
res = (-smooth_soft_labels(y_true, alpha)
* F.log_softmax(y_pred, dim=-1)).sum(-1)
if reduction == 'none': return res
return getattr(res, reduction)(0)
# export
@add_docstring(soft_label_cross_entropy_with_logits)
def soft_label_cross_entropy(y_pred, y_true, alpha=0.0, reduction='mean'):
"""Same as `soft_label_cross_entropy_with_logits` but operates on
softmax output instead of logits. The version with logits is
recommended for numerical stability. Below is the docstring for the logits
version. The only difference in this version is that y_pred will not be
logits.
"""
res = -smooth_soft_labels(y_true, alpha) * torch.log(y_pred)
res = torch.where(torch.isnan(res) | torch.isinf(res),
torch.zeros_like(res), res).sum(-1)
if reduction == 'none': return res
return getattr(res, reduction)(0)
# For demonstration, each row of our soft labels tries to capture a different case:
# - Row 0: High confidence label for one class, model has high confidence in the correct class.
# - Row 1: Moderate confidence in 2 different classes, model has high confidence in the best class.
# - Row 2: Confidence is split between a few classes, model predictions assign most probability to the two nonzero but non-ideal classes.
#
# Row 2 should benefit slightly from label smoothing since it predicts answers that are not ideal but not entirely wrong.
# 3 row mini batch
y_label = torch.tensor([0, 1, 3])
y_ohe = torch.tensor([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=torch.float)
y_smooth = torch.tensor([[.9, .1, 0, 0, 0],
[.35, .65, 0, 0, 0],
[0, .1, .2, .7, 0]])
logits = torch.tensor([[6, 1, 3, 1, 2],
[5, 8, 1, 1, 2],
[1, 3, 4, 2, 0]], dtype=torch.float)
y_hat = F.softmax(logits, dim=-1)
y_hat
# +
# Test: smooth_soft_labels
assert torch.allclose(
smooth_soft_labels(y_smooth, .1),
torch.tensor([[0.8700, 0.0700, 0.0200, 0.0200, 0.0200],
[0.3200, 0.6200, 0.0200, 0.0200, 0.0200],
[0.0200, 0.0867, 0.1867, 0.6867, 0.0200]]),
atol=1e-3), 'Case 1: alpha=0.1'
assert torch.allclose(smooth_soft_labels(y_smooth, 0.0), y_smooth, atol=1e-3),\
'Case 2: alpha=0.0'
with assert_raises(InvalidArgumentError):
smooth_soft_labels(y_smooth, -.1)
# -
modes = ('none', 'sum', 'mean')
for mode in modes:
j = soft_label_cross_entropy_with_logits(logits, y_ohe, reduction=mode)
print(f'reduction={mode}:', j)
# +
print('soft_label_cross_entropy_with_logits:\n' + '-'*37)
for mode in modes:
j = soft_label_cross_entropy_with_logits(logits, y_smooth, reduction=mode)
print(f'reduction={mode}:', j)
print('\nsoft_label_cross_entropy:\n' + '-'*25)
for mode in modes:
j = soft_label_cross_entropy(y_hat, y_smooth, reduction=mode)
print(f'reduction={mode}:', j)
# -
for mode in modes:
j = soft_label_cross_entropy_with_logits(logits, y_smooth, alpha=.2,
reduction=mode)
print(f'reduction={mode}:', j)
def plot_loss_by_alpha(y_pred, y_true, loss_func=None, loss_class=None):
alphas = np.arange(0, 1, .05)
if loss_class:
js = {a: loss_class(a, 'none')(y_pred, y_true) for a in alphas}
else:
js = {a: loss_func(y_pred, y_true, a, 'none') for a in alphas}
fig, ax = plt.subplots()
for i in range(3):
ax.plot(alphas, [x[i] for x in js.values()], label=i)
plt.xlabel('Alpha (Smoothing Parameter)')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Notice the gap between row 2 begins to narrow, as predicted.
plot_loss_by_alpha(y_hat, y_smooth, soft_label_cross_entropy)
plot_loss_by_alpha(logits, y_smooth, soft_label_cross_entropy_with_logits)
plot_loss_by_alpha(logits, y_ohe, soft_label_cross_entropy_with_logits)
# For comparison, here is the fastai label smoothing loss. It should return the same results in this case (just adding uniform noise). The advantage of ours is the ability to specify a prior non-uniform distribution.
plot_loss_by_alpha(logits, y_label, loss_class=LabelSmoothingCrossEntropy)
def classification_mae_with_logits(y_pred, y_true, reduction='mean'):
"""Mean absolute error for classification using soft or one hot encoded
labels (can be helpful for dealing with noisy labels). Classification
models often forego the final softmax layer and use a loss function which
computes the log softmax for numerical stability reasons. This loss
function makes it easy to swap different loss functions in and out without
changing the model or the training loop.
Parameters
----------
y_pred: torch.FloatTensor
Logits. Shape (bs, num_classes).
y_true: torch.FloatTensor
Labels, either one hot encoded or soft. Shape (bs, num_classes).
reduction: str
Just like any PyTorch loss function, this determines how to aggregate
the loss over a mini batch. One of ('mean', 'sum', 'none').
"""
# Built-in reduction will aggregate over every value in the tensor because
# it expects that we're predicting a single value per sample. Because
# we're predicting a vector for each sample, we compute row-wise sums
# regardless of our choice of reduction, then aggregate over the batch
# dimension if specified.
rowwise_j = F.l1_loss(F.softmax(y_pred, dim=-1),
y_true, reduction='none').sum(-1)
if reduction == 'none': return rowwise_j
return getattr(rowwise_j, reduction)(dim=-1)
logits
F.softmax(logits, dim=-1)
y_ohe
for mode in modes:
j = classification_mae_with_logits(logits, y_ohe, mode)
print(mode, j)
# export
class PairwiseLossReduction(nn.Module):
"""Basically lets us use L2 or L1 distance as a loss function with the
standard reductions. If we don't want to reduce, we could use the built-in
torch function, but that will usually output a tensor rather than a
scalar.
"""
@valuecheck
def __init__(self, reduce:('sum', 'mean', 'none')='mean', **kwargs):
super().__init__()
self.distance = nn.PairwiseDistance(**kwargs)
self.reduce = identity if reduce == 'none' else getattr(torch, reduce)
def forward(self, y_proba, y_true):
return self.reduce(self.distance(y_proba, y_true))
# export
def reduce(x, reduction='mean'):
"""This is a very common line in my custom loss functions so I'm providing
a convenience function for it. I think this function call is also more
intuitive than the code behind it.
Parameters
----------
x: torch.Tensor
The object to reduce.
reduction: str
Should be one of ('mean', 'sum', 'none'), though technically some
other operations (e.g. 'std') are supported.
Returns
-------
torch.Tensor: Scalar if using 'mean' or 'sum', otherwise the same as
input `x`.
Examples
--------
def squared_error(x, reduction='mean'):
return reduce(x.pow(2), reduction)
"""
return x if reduction == 'none' else getattr(torch, reduction)(x)
# export
def contrastive_loss(x1, x2, y, m=1.25, p=2, reduction='mean'):
"""Functional form of the contrastive loss as described by Hadsell,
Chopra, and LeCun in
"Dimensionality Reduction by Learning an Invariant Mapping":
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Similar examples always contribute to the loss while negatives only do
if they are sufficiently similar.
A couple words of caution: this uses the convention that y=1 represents a
positive label (similar examples) despite the paper doing the opposite.
Also, the value of m (margin) might benefit from some tuning as I'm not
very confident in the default value.
Parameters
----------
x1: torch.Tensor
Shape (bs, n_features).
x2: torch.Tensor
Shape (bs, n_features).
y: torch.Tensor
Labels. Unlike the paper, we use the convention that a label of 1
means images are similar. This is consistent with all our existing
datasets and just feels more intuitive.
m: float
Margin that prevents dissimilar pairs from affecting the loss unless
they are sufficiently far apart. I believe the reasonable range of
values depends on the size of the feature dimension. The default is
based on a figure in the paper linked above but I'm not sure how
much stock to put in that.
p: int
The p that determines the p-norm used to calculate the initial
distance measure between x1 and x2. The default of 2 therefore uses
euclidean distance.
reduction: str
One of ('sum', 'mean', 'none'). Standard pytorch loss reduction. Keep
in mind 'none' will probably not allow backpropagation since it
returns a rank 2 tensor.
Returns
-------
torch.Tensor: Scalar measuring the contrastive loss. If no reduction is
applied, this will instead be a tensor of shape (bs,).
"""
dw = F.pairwise_distance(x1, x2, p, keepdim=True)
# Loss_similar + Loss_different
res = y*dw.pow(p).div(2) + (1-y)*torch.clamp_min(m-dw, 0).pow(p).div(2)
return reduce(res, reduction)
# export
class ContrastiveLoss1d(nn.Module):
@add_docstring(contrastive_loss)
def __init__(self, m=1.25, p=2, reduction='mean'):
"""OOP version of contrastive loss. The docs for the functional
version are below:
"""
super().__init__()
self.m = m
self.p = p
self.reduction = reduction
self.loss = partial(contrastive_loss, m=m, p=p, reduction=reduction)
def forward(self, x1, x2, y_true):
"""
Parameters
----------
x1: torch.Tensor
Shape (bs, feature_dim).
x2: torch.Tensor
Shape (bs, feature_dim).
y_true: torch.Tensor
Shape (bs, 1). 1's indicate the inputs are "similar", 0's indicate
they are dissimilar.
Returns
-------
torch.Tensor: scalar if `reduction` is not 'none', otherwise tensor
has same shape as `y_true`.
"""
assert y_true.ndim == 2, "y_true must be rank 2."
return self.loss(x1, x2, y_true)
# +
bs = 2
x1 = torch.randn(bs, 5)
x2 = torch.randn(bs, 5)
y = torch.tensor([1, 0]).unsqueeze(-1)
# Make pair 1 similar, pair 2 dissimilar.
x1[0] += torch.arange(0, 100, 20)
x1[1] -= 50
x2[0] += torch.arange(0, 100, 20)
x2[1] += 25
print(x1)
print(x2)
# -
loss = ContrastiveLoss1d(reduction='mean')
loss(x1, x2, y)
loss = ContrastiveLoss1d(reduction='sum')
loss(x1, x2, y)
loss = ContrastiveLoss1d(reduction='none')
loss(x1, x2, y)
# export
class ContrastiveLoss2d(nn.Module):
@add_docstring(contrastive_loss)
def __init__(self, m=1.25, p=2, reduction='mean'):
"""OOP version of contrastive loss. We're using the name "2d" somewhat
differently here: we use this module to compare 1 image to n different
variants. Picture, for instance, a task where a single example
contains n+1 images and we want to find which of the final n images
are similar to the first image. Concretely, this would be a
multi-label (not multi-class) classification problem with OHE labels.
The docs for the functional
version are below:
"""
super().__init__()
self.m = m
self.p = p
self.loss = partial(contrastive_loss, m=m, p=p, reduction='none')
if reduction == 'none':
self.reduction = identity
elif reduction == 'row':
self.reduction = partial(torch.sum, dim=-1)
else:
self.reduction = getattr(torch, reduction)
def forward(self, x1, x2, y_true):
"""
Parameters
----------
x1: torch.Tensor
Shape (bs, feats).
x2: torch.Tensor
Shape (bs, n_item, n_feats).
y_true:
Shape (bs, n_item).
Returns
---------
torch.Tensor: scalar if reduction is 'mean' or 'sum', same shape as y
"""
# if reduction is 'none', or shape (bs,) if reduction is 'row'.
bs, n, dim = x2.shape
res = self.loss(x1.repeat_interleave(n, dim=0),
x2.view(-1, dim),
y_true.view(-1, 1))
return self.reduction(res.view(bs, -1))
x2
# +
# x3 provides 3 examples for each source image in x2. Some are different
# and some are similar (notice where noise is amplified vs. dampened).
noise = torch.rand(bs, 3, 5) * 10
noise[0, [0, -1]] /= 100
noise[0, 1] *= 5
noise[-1, -1] += 500
x3 = x2[:, None, ...] + noise
print(x3.shape)
print(x3)
# -
y2d = torch.tensor([[1, 0, 1],
[1, 1, 0]])
y2d
loss = ContrastiveLoss2d()
loss(x2, x3, y2d)
loss = ContrastiveLoss2d(reduction='sum')
loss(x2, x3, y2d)
loss = ContrastiveLoss2d(reduction='row')
loss(x2, x3, y2d)
loss = ContrastiveLoss2d(reduction='none')
loss(x2, x3, y2d)
| notebooks/08_losses.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sklearn
import numpy as np
from random import randrange
from keras import layers, models, optimizers, backend, metrics, callbacks
import codecs
from keras.preprocessing import text, sequence
from nltk.tokenize import word_tokenize
import random
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# %matplotlib inline
plt.rcParams['figure.figsize'] = (15, 12) # set default size of plots
# -
import csv
# Решим задачу классификации текстовых данных. В качестве датасета возьмём базу рецензий сайта IMDB, рецензии размечены на два класса: позитивные и негативные. Такая задача называется sentiment analysis
# 1. Считаем данные из CSV файла
texts = []
labels = []
with open('labeledTrainData.tsv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
_ = next(reader)
for l in reader:
texts.append(l[2])
labels.append(l[1])
# Убедимся, что данные считались корректно:
texts[0]
# В данных сохранилась HTML-разметка. Вероятно, её стоит убрать.
# Теперь необходимо перевести текст в такое представление, с которым нам будет удобно работать. Существует модель [мешка слов](https://en.wikipedia.org/wiki/Bag-of-words_model), которая долгое время использовалась в классических методах. К сожалению, эта модель не учитывает семантическую информацию и векторы, присваиваемые словам, имеют большую размерность, что делает её не лучшим выбором для тренировки нейронной сети.
#
# Мы будем пользоваться [word embeddings](https://en.wikipedia.org/wiki/Word_embedding), специальными векторами рассчитанными таким образом, чтобы учитывать сематническую информацию и при этом иметь небольшой размер. Подробно про рассчёт embeddings на примере Word2Vec можно прочесть в [википедии](https://en.wikipedia.org/wiki/Word2vec), [оригинальносй статье](https://arxiv.org/abs/1301.3781) или слайдах курса.
#
# Существует несколько видов embedding'ов. Сначала воспользуйтесь [Glove](https://nlp.stanford.edu/pubs/glove.pdf).
#
# Скачанный файл состоит из двух столбцов: слово и вектор, который ему соответсвует.
#
# Слова упорядочены по частоте встречаемости в языке. Вам необходимо загрузить эти векторы в словарь.
# Vocab это вспомогательный класс, помогающий работать с вокабуляром. Внутри находятся два словаря, в одном хранится соответсвие между словами и индексами в glove (порядковый номер слова), а в другом -- между индексами и словами.
class Vocab(object):
w2i = None # Word to index
i2w = None
# Следующая функция загружает эмбеддинги с диска. Обратите внимание, что добавляются два специальных ветора: один (PAD) отвечает за отступ (состоит из нулей и имеет нулевой индекс), а другой (UNK) -- за неизвестное слово (заполните его средним значением). Дополните её!
np.mean([[1,2], [2,3], [3,1], [3,1]], axis=0)
def load_embeddings(filename, embedding_size=300):
# список векторов
embeddings_list = []
# словарь слово-индекс
vocabulary_mapping = {'<PAD>': 0} # занесём ключ соотетствующий отступу
pad = np.zeros(embedding_size) # создадим вектор для PAD
embeddings_list.append(pad)
with codecs.open(filename, 'rb', 'utf-8') as glove_file:
for line in glove_file:
token, vector = line.strip().split() # возможно этот код будет работать не всегда, исправьте его
# Впишите сюда свой код
# Обратите внимание, что значения в словаре должны совпадать с индексами в списке
vocabulary_mapping['<UNK>'] = len(embeddings_list)
unk = np.mean(embeddings_list[1:], axis=0) # считаем средний вектор
embeddings_list.append(unk)
embeddings = np.array(embeddings_list)
# создаём вокабуляр
vocab = Vocab()
vocab.i2w = {v: k for k, v in vocabulary_mapping.items()}
vocab.w2i = vocabulary_mapping
print('loaded!')
return embeddings, vocab
# Загрузим векторы с диска
embeddings, vocab = load_embeddings('путь до файла', 100)
# Теперь напишем функцию, которая будет разбивать предложения на токены (слова), а затем каждому токену ставить в соответсвие индекс вектора. Ограничим марсимальный размер текста 128 словами (из соображений скорости вычислений). Данное ограничение сильно влияет на качество, если Вы хотите достичь лучших результатов, то необходимо использовать весь текст (и изменить архитектуру сети).
def sent_to_id_vec(sent, vocab, max_len=128, mode='tokenize'):
sent = sent.lower()
# два режима токенизации
if mode == 'tokenize':
tokens = word_tokenize(sent)
elif mode == 'split':
tokens = sent.split()
else:
raise Error(f'Unknown mode: {mode}')
if max_len is not None and len(tokens) > max_len:
tokens = tokens[:max_len]
result = []
for token in tokens:
if token in vocab.w2i:
result.append(vocab.w2i[token])
else:
result.append(vocab.w2i['<UNK>'])
return result
# Векторизуем наш датасет:
sequences = []
for t in texts:
temp = sent_to_id_vec(t, vocab)
sequences.append(temp)
# Теперь в каждом объекте датасета находится не текст, а последовательность идентификаторов.
sequences[0]
# Проверим, что мы не ошиблись и выполним обратное преобразование для произвольного предложения.
' '.join([vocab.i2w[i] for i in sequences[0]])
# Предложения совпадают, значит мы сделали верные преобразования
# Рекуррентные сети способны улавливать зависимости между словами и чувствительны к порядку. Однако, они склонны "забывать" хвосты длинных последовательностей.
#
# Однако, наивная реализация RNN-ячейки не способна показать сколько-нибудь значимые результаты. Воспользуемся ячейкой специального вида, называющейся LSTM. Про LSTM можно прочесть [в блоге Криса Ола](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) и слайдах лекций.
#
#
embeddings.shape
def build_LSTM_classifier():
# Точка входа в граф задаётся при помощи специальных тензоров типа Input
# Первая координата соответсвует длине текста, так как тексты в датасете имеют разную длину
# значение считается переменным
text_input = layers.Input(shape=(None,), dtype='int32')
# Создаём специальный слой для работы с embedding,
# Его функция -- заменять индентификатор вектором из Glove
# Указываем trainable = False, чтобы векторы embedding'ов не изменялись в процессе обучения
embedding_layer = layers.Embedding(input_dim = embeddings.shape[0],
output_dim = embeddings.shape[1],
weights=[embeddings],
mask_zero=True,
trainable = False)
x = embedding_layer(text_input)
# Создаём рекуррентную ячейку
# Первый параметр отвечает за размер внутреннего состояния (памяти ячейки)
# По умолчанию такой слой возвращает только последнее состояние (см. картинку),
# Если мы хотим получить состояния на каждом шаге необходимо указать return_sequences = True
x = layers.LSTM(256, recurrent_dropout=0.25)(x)
# Полученный результат направляем в полносвязный слой, который будет осуществлять классификацию
output = layers.Dense(1, activation='sigmoid')(x)
model = models.Model(inputs=[text_input], outputs=[output], name = 'LSTM_classifier')
# Для оптимизации будем использовать Adam
adam = optimizers.Adam(lr=0.0001)
#Перед испльзованием модель необходимо скомпилировать
model.compile(adam, 'binary_crossentropy', metrics=['acc'])
return model
backend.clear_session()
model = build_LSTM_classifier()
# Выведем информацию по модели
model.summary()
# Разобьём датасет на три части
def split_train_val(train_size = 0.6, val_size = 0.1, test_size = 0.3):
boundary_train = int(len(sequences) * train_size)
boundary_val = int(len(sequences) * (train_size + val_size))
train_set = (sequences[:boundary_train], labels[:boundary_train])
val_set = (sequences[boundary_train:boundary_val], labels[boundary_train:boundary_val])
test_set = (sequences[boundary_val:], labels[boundary_val:])
return train_set, val_set, test_set
(x_train, y_train), (x_val, y_val), (x_test, y_tes) = split_train_val()
# Почти всё готово, чтобы начать обучение. Но так, как все предлдожения разной длины мы не можем конвертировать x в тензор, нам необходимо выровнять длину. Для этого мы воспользуемся специальной функцией pad_sequences(), доступной в keras. Недостающие элементы будут заполнены специальным символом PAD
#
# Но нам не хотелось бы увеличивать все элементы в датасете по размеру максимального. По этогому будем генерировать батчи сами и применять pad_sequences к ним независимо
def generate_batches(x, y, batch_size=64):
i = 0
while True:
i = i % len(x)
yield sequence.pad_sequences(x[i:i+batch_size]), y[i:i+batch_size]
i += batch_size
train_generator = generate_batches(x_train, y_train)
val_generator = generate_batches(x_train, y_train)
# Теперь обучим нашу модель
# +
cbs = [
callbacks.ModelCheckpoint('models/LSTM/{epoch:02d}-{val_loss:.4f}.h5', save_best_only=True),
callbacks.TensorBoard(log_dir='models/LSTM/'),
callbacks.ReduceLROnPlateau(factor=0.1, patience=3, verbose=1, min_lr=0.00001, epsilon=0.01)]
model.fit_generator(generator= train_generator,
validation_data = val_generator,
validation_steps= 40,
steps_per_epoch=256,
epochs=20,
callbacks=cbs)
# -
model.evaluate(sequence.pad_sequences(x_test), y=y_tes, batch_size=64)
# 77% Неплохой результат, но можно лучше.
#
# Проблема в том, что RNN забывают начало последовательности, а в нашем датасете все рецензии достаточно велики. Есть способ уменьшить влияние этого эффекта -- обучить два LSTM, один идёт от начала к концу предложения, а другой -- от конца к началу. Такой подход называется bidirectional
# ** Обучите Bidirectional LSTM **
#
# ** Поэксперементируйте с эмбеддингами **
#
# ** Поэксперементируйте с видом ячеек, попробуйте сделать стэк **
#
# ** Уберите ограничение на 128 слов, адаптируйте модель к возросшей длине **
# ** Попробуйте реализовать attention (или успешно применить чей-то) **
# ** Вы вольны делать всё что угодно, чтобы достичь наилучших результатов **
| lesson_work/HW text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
# # %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('train.csv')
# -
aaa = pd.DataFrame(df.isnull().sum())
aaa['test'] = [i+len(df) if i==0 else len(df)-i for i in aaa[0]]
# +
sns.set_style(style='white')
g = sns.barplot(aaa.index, aaa['test'], color=(0.133, 0.529, 0.643))
def show_values_on_bars(axs):
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width()/2
_y = p.get_y() + p.get_height() + 2
value = '{:.0f}'.format(p.get_height())
ax.text(_x, _y, value, ha="center")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
sns.set(rc={'figure.figsize':(12,8)})
show_values_on_bars(g)
# -
g
dfff = df[['Survived', 'Pclass', 'Sex', 'Embarked']]
xxx = {}
yyy = {}
for i in dfff.columns:
ff = pd.DataFrame(dfff[i].value_counts())
sns.set_style(style='white')
h = sns.barplot(ff.index, ff[i], color=(0.133, 0.529, 0.643))
sns.set(rc={'figure.figsize':(12,8)})
show_values_on_bars(h)
xxx[str(i)] = h
yyy[str(i)] = ff.to_dict()
plt.show()
xxx
xxx['Sex'].get_figure()
yyy
# +
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = sns.load_dataset("tips")
groupedvalues=df.groupby('day').sum().reset_index()
pal = sns.color_palette("Greens_d", len(groupedvalues))
rank = groupedvalues["total_bill"].argsort().argsort()
fig = plt.figure(figsize=(5,5))
g=sns.barplot(x='day',y='tip',data=groupedvalues, palette=np.array(pal[::-1])[rank])
for index, row in groupedvalues.iterrows():
g.text(row.name,row.tip, round(row.total_bill), color='black', ha="center")
g.get_figure().savefig('correlation_histogram.png')
# -
groupedvalues
| missing_new_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.12 ('vtag')
# language: python
# name: python3
# ---
# %matplotlib inline
# # Essential Python Libraries
# ## NumPy
# Numpy is one of the most used Python library in data sciences. It provides an versatile interface for users to manipulate multi-dimensional arrays, which are commonly used in representing data, particularlly for storing images.
# In this chapter we will cover the basic usages of NumPy, including indexing, reshaping, and basic visualization.
# ### Load NumPy
# Practically, we use `np` as an aliase to call the library. But the aliase can be arbitrary.
# load NumPy
import numpy as np
# validate the library version
np.__version__
# ### Basic usages
# #### Create an array
# * With assigned values. Use `[]` to represent a list of numbers
a = np.array([1, 2, 3, 4])
print(a)
# * An empty array containing all zeros
b = np.zeros(4)
print(b)
# * An empty array containing all ones
c = np.ones(4)
print(c)
# #### Create a sequence of numbers
# * By interval: `np.arange(st, ed, interval)`
d = np.arange(1, 10, 2.5)
print(d)
# * By size: `np.arange(st, ed, size)`
e = np.linspace(1, 10, 4)
print(e)
# Display results with `print()` in a reader-friendly way. This is very helpful when ones need to debug their codes without using any debugger.
print("A list of numbers: ", a)
print("A list of zeros: ", b)
print("A list of ones: ", c)
print("A sequence assigned by intervals: ", d)
print("A sequence assigned by size: ", e)
# ### Multi-dimensional matrix
# NumPy is powerful in manipulating N-dimensional data structure. Here we will introduce several ways to create a two-dimensional matrix.
# Explicitly assign values to create a matirx
A = np.array([[1, 2, 3],
[4, 5, 6]])
print(A)
# All-zero or all-one matrix by assigning the dimension (`shape`). In this example, we generate a matrix with two `rows` and three `columns`.
B = np.zeros((2, 3))
C = np.ones((2, 3))
print("B\n", B)
print("C\n", C)
# `reshape()` is another handy function to turn a one-dimension array to a two-dimension matrix.
print("'a'\n", a)
a_reshape = a.reshape((2, 2))
print("Reshaped 'a' (2x2)\n", a_reshape)
# Examine the matrix by the function `shape()`
a_reshape.shape
# You can "flatten" the matrix back to a one-dimension array
a_reshape.flatten()
# ### Indexing
# Indexing in `Numpy` is a flexible way to subset the matrix by certain conditions (e.g., positions, values). First, we will create a two-dimension matrix. Note we can connect multiple functions in one line using dot (`.`) to enhance the code readability.
data = np.linspace(5, 150, 30).reshape((5, 6))
print("data: ", data.shape)
print(data)
# A `NumPy` 2D-matrix is stored in the order of [rows, columns]. And is manipulated by `0-based` indexing. For exaple, if we want to call the first (index = 0) column, we can do:
data[:, 0]
# Use `:` to tell `NumPy` to return all elements in that specific dimension. In our case, it returns all numbers in the `first` column. We can also query a single element, for example, the number in the `third` (index = 2) rows and the `fourth` (index = 3) column.
data[2, 3]
# It's also possible to call the last N elements using negative index. For example, we can call `the last second row` by:
data[-2, ]
# We can query a range of elements as well. Below we call elements from the `second` row to the `four` row, and the `third` column to the `fifth` column.
data[1:4, 2:5]
# Or extract every two columns (i.e., 1st, 3rd, and 5th) from the matrix.
data[:, ::2]
# ### Basic Statistics
# Several basic summary statistics, including `mean` and `standard deviation`, can be derived in a `NumPy` matrix.
print("The data mean is", data.sum()) # np.sum(data)
print("The data standard deviation is", data.std()) # np.std(data)
print("The data median is", np.median(data))
print("The 25th quantile of the data is", np.quantile(data, 0.25))
# It's possible to calculate these statistics by dimensions. Use the parameter `axis` to specify which dimension is summarized. This function is handy in image processing when multi-channel images are handeled.
print(data)
print("The mean of axis 0 (the first dimension): ", data.mean(axis=0))
# ## Matplotlib
# `Matplotlib` is a basic visualization library in Python. The functionalities are similar to `Plot()` in R. In this section, we will use a portrait from Dr. <NAME> as an example to practice the usages of `NumPy` and `Matplotlib`.
# ### Import Matplotlib and the example image
# rename pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# open a file streame to load the image file
with cbook.get_sample_data('grace_hopper.jpg') as image_file:
image = plt.imread(image_file)
# Visualize the image with `plt.imshow()`
plt.imshow(image)
print("The image dimension (h, w, c) =", image.shape)
# ### Matrix manipulation
# An image matrix
# Down-sample by one pixel of every ten pixels
plt.imshow(image[::10, ::10])
# ### Crop the image
plt.imshow(image[10:400, 120:400])
#
fig, axes = plt.subplots(ncols=3, figsize=(10, 5))
axes[0].imshow(image[:, :, 0], cmap="Reds")
axes[1].imshow(image[:, :, 1], cmap="Greens")
axes[2].imshow(image[:, :, 2], cmap="Blues")
fig, axes = plt.subplots(nrows=4, figsize=(10, 5))
axes[0].imshow(image[480:530, 300:450, :])
axes[1].imshow(image[480:530, 300:450, 0], cmap="Reds")
axes[2].imshow(image[480:530, 300:450, 1], cmap="Greens")
axes[3].imshow(image[480:530, 300:450, 2], cmap="Blues")
fig, axes = plt.subplots(ncols=4, figsize=(10, 5))
axes[0].imshow(image[:350, :150, :])
axes[1].imshow(image[:350, :150, 0], cmap="Reds")
axes[2].imshow(image[:350, :150, 1], cmap="Greens")
axes[3].imshow(image[:350, :150, 2], cmap="Blues")
| Phenomics/note1_python_libraries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine learning - Features extraction
#
# Runs binary and multi-class classifiers on a given dataset.
# Dataset are read as Parquet file. The dataset must contain a feature vector named "features" and a classification column.
#
# ## Imports
from mmtfPyspark.ml import SparkMultiClassClassifier, datasetBalancer
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.ml.classification import DecisionTreeClassifier, LogisticRegression, MultilayerPerceptronClassifier, RandomForestClassifier
# ## Configure Spark Session
spark = SparkSession.builder.appName("datasetClassifier").getOrCreate()
# ## Read in data from parquet file
# +
parquetFile = './input_features/'
data = spark.read.parquet(parquetFile).cache()
print(f"Total number of data: {data.count()}")
data.toPandas().head()
# -
# ## Select only alpha and beta foldType
# +
data = data.where((data.foldType == 'alpha') | (data.foldType == 'beta')) #| (data.foldType == 'other'))
print(f"Total number of data: {data.count()}")
data.toPandas().head()
# -
# ## Basic dataset information and setting
# +
label = 'foldType'
testFraction = 0.1
seed = 123
vector = data.first()["features"]
featureCount = len(vector)
print(f"Feature count : {featureCount}")
classCount = int(data.select(label).distinct().count())
print(f"Class count : {classCount}")
print(f"Dataset size (unbalanced) : {data.count()}")
data.groupby(label).count().show()
data = datasetBalancer.downsample(data, label, 1)
print(f"Dataset size (balanced) : {data.count()}")
data.groupby(label).count().show()
# -
# ## Random Forest Classifier
rfc = RandomForestClassifier()
mcc = SparkMultiClassClassifier(rfc, label, testFraction, seed)
matrics = mcc.fit(data)
for k,v in matrics.items(): print(f"{k}\t{v}")
# ## Logistic Regression Classifier
lr = LogisticRegression()
mcc = SparkMultiClassClassifier(lr, label, testFraction, seed)
matrics = mcc.fit(data)
for k,v in matrics.items(): print(f"{k}\t{v}")
# ## Simple Multilayer Perception Classifier
layers = [featureCount, 32, 32, classCount]
mpc = MultilayerPerceptronClassifier().setLayers(layers) \
.setBlockSize(128) \
.setSeed(1234) \
.setMaxIter(100)
mcc = SparkMultiClassClassifier(mpc, label, testFraction, seed)
matrics = mcc.fit(data)
for k,v in matrics.items(): print(f"{k}\t{v}")
# ## Terminate Spark
spark.stop()
| 6-machine-learning/2-Classification-PySpark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # * 리스트, 튜블, 딕셔너리, 셋 학습
# ### 1. 리스트 클래스
# >list1 = list() # 공백 리스트 생성<br>
# list2 = list("Hello") # 문자 H, e, l, l, o를 요소로 가지는 리스트 생성<br>
# list3 = list(range(0, 5)) # 0, 1, 2, 3, 4를 요소가 가지는 리스트 생성<br>
# list4 = [ ] # 공백 리스트 생성<br>
# list5 = [ "H", "e", "l", "l", "o" ] # 문자 H, e, l, l, o를 요소로 가지는 리스트<br>
# list6 = [ 0, 1, 2, 3, 4 ]# 0, 1, 2, 3, 4를 요소가 가지는 리스트 생성<br>
#
# ### 2. 복잡한 list
# >list1 = [12, "dog", 180.14] # 혼합 자료형 <br>
# list2 = [["Seoul", 10], ["Paris", 12], ["London", 50]] # 내장 리스트<br>
# list3 = ["aaa", ["bbb", ["ccc", ["ddd", "eee", 45]]]] # 내장 리스트 <br>
#
# ### 3. 튜플
# > 변경 불가능한 리스트, 연산 사용 가능<br>
# > tuple1 = ()<br>
# > tuple2 = tuple()<br>
# > t1 = (1, 2, 3, 4, 5)<br>
# > t2 = ("red", "green", "blue")<br>
# > t = t1 + t2 <br>
# > t3 = 'test', 'test1', 'test2' #괄호없이 나영될 객체들도 튜플로 간주
# ### 4. 셋
# > 중복되지 않는 순서 없는 항목들 <br>
# > set1 = {2, 1, 3} <br>
# > set2 = set()
# #### 부분 집합 연산 & 합집합 연산
# > issubset() - 부분 집합 여부 검사<br>
# > issuperset() - 상위집합 여부 검사<br>
#
# > union() - 합집합 <br>
# > intersection() - 교집합, & 연산자 사용 가능<br>
# > difference() - 차집합, - 연산자 사용 가능<br>
# 부분 집합 연산
# 파이썬에서는 boolean 값은 True/False 표현
# 자바, js - true/false
A = {1, 2, 3}
B = {1, 2, 3}
A == B
# 부분 집합 연산
A = {1, 2, 3, 4, 5}
B = {1, 2, 3}
B < A # True
B > A # False
# 부분 집합 연산
A = {1, 2, 3, 4, 5}
B = {1, 2, 3}
B.issubset(A) # True - B는 A에 속한다
A.issubset(B) # False
# +
# 집합 연산
A = {1, 2, 3}
B = {3, 4, 5}
A | B # or(Union-합집합)
# -
A & B # and(Intersection-교집합)
A - B # 차집합
# ### Lab1 ?- 파티 동시 참석자 알아내기
# > [문제] <br>
# > 파티에 참석한 사람들의 명단이 세트 A와 B에 각각 저장되어 있다.<br>
# > 2개 파티에 모두 참석한 사람들의 명단을 출력하려면 어떻게 해야 할까? <br>
#
# > [출력 결과] <br>
# > 2개의 파티에 모두 참석한 사람은 다음과 같습니다. <br>
# > {'Park'}
# +
# A = {"김현성","황승호"}
# B = {"김용진","박성민","김현성"}
A = set(["김현성","황승호"])
B = set(["김용진","박성민","김현성"])
print("2개의 파티에 모두 참석한 사람은 다음과 같습니다.")
print(A & B)
# -
# ### 5. 딕셔너리
#
# > 키(key)와 값(value)의 쌍으로 관리되는 객체
# #### 딕셔너리를 활용가능하기 위한 반복문과 API 활용
#
# > items() - 반복문에서 사용 빈도가 높고, key와 value를 반환<br>
# > keys() - key들만 리스트로 반환<br>
# > values() - value값만 리스트로 반환<br>
# +
#사전의 모든 값을 순차적으로 참조하는 방법
data = {"a":1, "b":2, "c":3}
print("방법1")
for key in data:
print(key, data[key])
print("방법2")
for (key,value) in data.items():
print(key, value)
print("---------")
print(list(data.keys()))
print("---------")
print(list(data.values()))
# -
contacts = {'Kim':'01012345678', 'Park':'01012345679', 'Lee':'01012345680' }
contacts['Kim']
contacts.get('Kim')
if "Kim" in contacts:
print("키가 딕셔너리에 있음")
# +
# 항목 순회하기
scores = { 'Korean': 80, 'Math': 90, 'English': 80}
for item in scores.items():
print(item)
# -
for (k,v) in scores.items():
print(k, " ", v)
# ### Lab2 ? - 각각의 단어가 몇 번이나 나오는지를 계산하는 프로그램을 작성하기
#
# > 문제<br>
# > - 사용자가 지정하는 파일을 읽어서 파일에 저장된 각각의 단어가 몇 번이나 나오는지를 계산하는 프로그램을 작성하여 보자.<br>
#
# > 힌트<br>
# - key는 단어로, 나온 횟수는 value로 처리하기
# - 필요한 타입 : 딕셔너리
#
# > 질문<br>
# - 데이터 분해 -> 각각 분해를 할 때 사용하고자 하는 자료 구조 타입?
# - 데이터는 문자열
# - 단어별로 구분해야 하는 상황이라면 단어가 구분 데이터 : key
# +
data = '''내 속엔 내가 너무도 많아 당신의 쉴 곳 없네
내 속엔 헛된바램들로 당신의 편할 곳 없네
내 속엔 내가 어쩔 수 없는 어둠 당신의 쉴 자리를 뺏고
내 속엔 내가 이길 수 없는 슬픔 무성한 가시나무숲같네
바람만 불면 그 메마른가지 서로 부대끼며 울어대고
쉴곳을 찾아 지쳐 날아온 어린새들도 가시에 찔려 날아가고
바람만 불면 외롭고 또 괴로워 슬픈 노래를 부르던 날이 많았는데
내 속엔 내가 너무도 많아서 당신의 쉴 곳 없네
바람만 불면 그 메마른가지 서로 부대끼며 울어대고
쉴곳을 찾아 지쳐 날아온 어린새들도 가시에 찔려 날아가고
바람만 불면 외롭고 또 괴로워 슬픈 노래를 부르던 날이 많았는데
내 속엔 내가 너무도 많아서 당신의 쉴 곳 없네'''
table = dict()
for line in data:
#words = line.split(" ")
for word in line:
if word in table:
table[word] += 1
else:
table[word] = 1
print(table)
# +
data = '''내 속엔 내가 너무도 많아 당신의 쉴 곳 없네
내 속엔 헛된바램들로 당신의 편할 곳 없네
내 속엔 내가 어쩔 수 없는 어둠 당신의 쉴 자리를 뺏고
내 속엔 내가 이길 수 없는 슬픔 무성한 가시나무숲같네
바람만 불면 그 메마른가지 서로 부대끼며 울어대고
쉴곳을 찾아 지쳐 날아온 어린새들도 가시에 찔려 날아가고
바람만 불면 외롭고 또 괴로워 슬픈 노래를 부르던 날이 많았는데
내 속엔 내가 너무도 많아서 당신의 쉴 곳 없네
바람만 불면 그 메마른가지 서로 부대끼며 울어대고
쉴곳을 찾아 지쳐 날아온 어린새들도 가시에 찔려 날아가고
바람만 불면 외롭고 또 괴로워 슬픈 노래를 부르던 날이 많았는데
내 속엔 내가 너무도 많아서 당신의 쉴 곳 없네'''
table = dict()
for line in data:
#words = line.split(" ")
for word in line:
# "", "\n", " " 제거
if (word != "" and word !="\n" and word !=" "):
if word in table:
table[word] += 1
else:
table[word] = 1
# del(table[""])
# del(table["\n"])
print(table)
# +
data = '''내 속엔 내가 너무도 많아 당신의 쉴 곳 없네
내 속엔 헛된바램들로 당신의 편할 곳 없네
내 속엔 내가 어쩔 수 없는 어둠 당신의 쉴 자리를 뺏고
내 속엔 내가 이길 수 없는 슬픔 무성한 가시나무숲같네
바람만 불면 그 메마른가지 서로 부대끼며 울어대고
쉴곳을 찾아 지쳐 날아온 어린새들도 가시에 찔려 날아가고
바람만 불면 외롭고 또 괴로워 슬픈 노래를 부르던 날이 많았는데
내 속엔 내가 너무도 많아서 당신의 쉴 곳 없네
바람만 불면 그 메마른가지 서로 부대끼며 울어대고
쉴곳을 찾아 지쳐 날아온 어린새들도 가시에 찔려 날아가고
바람만 불면 외롭고 또 괴로워 슬픈 노래를 부르던 날이 많았는데
내 속엔 내가 너무도 많아서 당신의 쉴 곳 없네'''
table = dict()
import re
#words = data.split(" ", "\n") # 여러개 구분자 적용 안됨
words = re.split(' |\n',data) # 정규표현식으로 구분자 여러개 적용
for word in words:
# "", "\n", " " 제거
if (word != "" and word !="\n" and word !=" "):
if word in table:
table[word] += 1
else:
table[word] = 1
# # del(table[""])
# # del(table["\n"])
print(table)
# -
# ### Lab3 ? - 함축된 표현식으로 변경해 보기
# > 1. 소스 분석하기<br>
# > 2. 반복문을 함축 표현으로 변경해 보기
# > 3. 알고리즘 문제로 많이 나오는 로직
# +
v = []
values = ['v1 test', 'v2 test', 'v3 test', 'v4', 'v5']
for x in values:
if len(x) > 2:
v.append(x.upper());
# 함축된 표현식
v = [x.upper() for x in values if len(x) > 2]
v
# -
| Python/basic/step04Type3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ray Crash Course - Ray Clusters and the Ray CLI
#
# © 2019-2021, Anyscale. All Rights Reserved
#
# 
#
# In the previous lessons, we let `ray.init()` start a mini-cluster on your laptop or connect to the running Ray cluster in the Anyscale hosted platform. This lesson discusses using the Ray CLI command `ray` to create and manage Ray clusters. We won't cover all the subcommands `ray` supports. Try `ray --help` and see the [Ray CLI documentation](https://docs.ray.io/en/latest/package-ref.html#the-ray-command-line-api) for more details.
# > **Tip:** If any of the CLI commands used here print a lot of output, right click on the output and select _Enable Scrolling for Outputs_.
#
# > **Notes:**
# >
# > 1. The Anyscale hosted platform has its own CLI command, `anyscale`, which integrates the `ray` CLI and provides other capabilities for managing and running Ray projects and sessions, including automated cluster integration, synchronization of code to your local development environment, etc. Further information on this service will be available soon. [Contact us](mailto:<EMAIL>) for details.
# > 2. Ray can now be used with [Docker](https://www.docker.com/). You can find the published Docker images [here](https://hub.docker.com/search?q=Rayproject&type=image). For more details, see the documentation [here](https://docs.ray.io/en/latest/installation.html?highlight=docker#launch-ray-in-docker) and [here](https://docs.ray.io/en/latest/cluster/cloud.html?highlight=docker#common-cluster-configurations).
# ## ray --help
#
# The typical `help` information is available with `--help` or with no arguments:
# !ray --help
# Some of these commands are aliases, e.g., `down` and `teardown`, `get-head-ip` and `get_head_ip`, etc. `kill-random-node` looks strange, but it is useful for [Chaos Engineering](https://en.wikipedia.org/wiki/Chaos_engineering) purposes.
#
# For more details on a particular command, use `ray <command> --help`:
# !ray start --help
# ## ray --version
#
# Show the version of Ray you are using.
# ## ray stat
#
# Your first question might be, is Ray already running on this node as part of a cluster? The command `ray stat` can be used to determine this.
# !ray stat
# If Ray is running on this node, the output can be very long. It shows the status of the nodes, running worker processes and various other Python processes being executed, and [Redis](https://redis.io/) processes, which are used as part of the distributed object store for Ray. We discuss these services in greater detail in the [Advance Ray tutorial](../advanced-ray/00-Advanced-Ray-Overview.ipynb).
# If there are multiple Ray instances running on this node, you'll have to specify the correct address. Run `ray stat` to see a list of those addresses, then pick the correct one:
#
# ```shell
# ray stat --address IP:PORT
# ```
# `ray stat` returns the exit code `0` if Ray is running locally or a nonzero value if it isn't. The following command exploits this feature and starts a _head_ node for Ray:
#
# ```shell
# ray stat > /dev/null 2>&1 || ray start --head
# ```
#
# All output of `ray stat` is sent to `/dev/null` (which throws it away) and if the status code is nonzero, then the command after the `||` is executed, `ray start --head`.
# You can also get cluster information inside your application using API methods.
#
# See [Inspect the Cluster State](
# https://docs.ray.io/en/latest/package-ref.html#inspect-the-cluster-state) for details.
# ## ray start and ray stop
#
# As shown in the previous cell, `ray start` is used to start the Ray processes on a node. When the `--head` flag is used, it means this is the master node that will be used to bootstrap the cluster.
#
# When you want to stop Ray running on a particular node, use `ray stop`.
#
# > **WARNING:** Running `ray stop` will impact any Ray applications currently running on this node, including all other lesson notebooks currently running Ray, so if you intend to stop Ray, first save your work, close those notebooks, and stop their processes using the _Running_ tab on the left of the Jupyter Lab UI. The tab might be labelled with a white square surrounded by a dark circle instead of _Running_.
#
# We won't actually run `ray start` or `ray stop` in what follows, to avoid causing problems for other lessons. We'll just describe what they do and the output they print.
# When you run `ray start --head` you see output like the following (unless an error occurs):
#
# ```shell
# $ ray start --head
# 2020-05-23 07:47:47,469 INFO scripts.py:357 -- Using IP address 192.168.1.149 for this node.
# 2020-05-23 07:47:47,489 INFO resource_spec.py:212 -- Starting Ray with 4.3 GiB memory available for workers and up to 2.17 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
# 2020-05-23 07:47:47,865 INFO services.py:1170 -- View the Ray dashboard at localhost:8265
# 2020-05-23 07:47:47,912 INFO scripts.py:387 --
# Started Ray on this node. You can add additional nodes to the cluster by calling
#
# ray start --address='192.168.1.149:10552' --redis-password='<PASSWORD>'
#
# from the node you wish to add. You can connect a driver to the cluster from Python by running
#
# import ray
# ray.init(address='auto', redis_password='<PASSWORD>')
#
# If you have trouble connecting from a different machine, check that your firewall is configured properly. If you wish to terminate the processes that have been started, run
#
# ray stop
# ```
#
# (You'll see a different IP address.)
# The output includes a line like this:
#
# ```shell
# ray start --address='192.168.1.149:10552' --redis-password='<PASSWORD>'
# ```
#
# This is the `ray start` command you would use on the other machines where you want to start Ray and have them join the same cluster.
#
# Note also the instructions for code to add to your application.
#
# ```python
# import ray
# ray.init(address='auto', ignore_reinit_errors=True, redis_password='<PASSWORD>')
# ```
#
# The `redis_password` shown is the default value. We didn't specify this argument when we called `ray.init()` in other notebooks.
# You can actually call `ray start --head` multiple times on the same node to create separate clusters. They may appear at first to be a bug, but it is actually useful for testing purposes.
# The `ray stop` command usually prints no output. Add the `--verbose` flag for details.
#
# > **Warning:** `ray stop` stops all running Ray processes on this node. There is no command line option to specify which one to stop.
# ## ray memory
#
# A new feature of the Ray CLI is the `memory` command which prints a snapshot of the current state of actors and tasks in memory in the cluster. It is useful debugging issues and understanding how Ray has distributed work around your cluster.
#
# Here is an example captured on a laptop while the first two lessons in this tutorial were evaluating their cells:
#
# ```
# $ ray memory
# 2020-06-26 06:08:55,158 INFO scripts.py:1042 -- Connecting to Ray instance at 192.168.1.149:6379.
# WARNING: Logging before InitGoogleLogging() is written to STDERR
# I0626 06:08:55.163417 90759 489258432 global_state_accessor.cc:25] Redis server address = 192.168.1.149:6379, is test flag = 0
# I0626 06:08:55.164857 90759 489258432 redis_client.cc:141] RedisClient connected.
# I0626 06:08:55.167277 90759 489258432 redis_gcs_client.cc:88] RedisGcsClient Connected.
# I0626 06:08:55.168231 90759 489258432 service_based_gcs_client.cc:75] ServiceBasedGcsClient Connected.
# -----------------------------------------------------------------------------------------------------
# Object ID Reference Type Object Size Reference Creation Site
# =====================================================================================================
# ; driver pid=89861
# ffffffffffffffff6ec7e2960c0000c001000000 LOCAL_REFERENCE ? (actor call) <ipython-input-7-a62036e0309c>:<module>:7
# 55be66b7df500ad56ec7e2960c0000c003000000 LOCAL_REFERENCE 23 (actor call) <ipython-input-7-a62036e0309c>:<module>:8
# 55be66b7df500ad56ec7e2960c0000c002000000 LOCAL_REFERENCE 15 (actor call) <ipython-input-7-a62036e0309c>:<module>:8
# ffffffffffffffffffffffff0c00008001000000 LOCAL_REFERENCE 27 (put object) <ipython-input-9-57253d54e26a>:<module>:1
# 0f8aa561996c6719ffffffff0c0000c001000000 LOCAL_REFERENCE 88 (task call) <ipython-input-6-9667649da5b7>:<module>:13
# 55be66b7df500ad56ec7e2960c0000c001000000 LOCAL_REFERENCE 16 (actor call) <ipython-input-7-a62036e0309c>:<module>:8
# ; driver pid=90154
# aa0e49cf6481351dffffffff100000c001000000 LOCAL_REFERENCE 23 (task call) <ipython-input-17-f5cad4404199>:<module>:1
# 082755fdfe469abcffffffff100000c001000000 LOCAL_REFERENCE ? (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# 57c6dbda70012254ffffffff100000c001000000 LOCAL_REFERENCE ? (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# fab196f393a5de36ffffffff100000c001000000 LOCAL_REFERENCE 88 (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# 10473efa8f620095ffffffff100000c001000000 LOCAL_REFERENCE 88 (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# dc7dc79e27e8e5b7ffffffff100000c001000000 LOCAL_REFERENCE 23 (task call) <ipython-input-19-e197d2c09385>:<listcomp>:1
# 16053fa58b987ab5ffffffff100000c001000000 LOCAL_REFERENCE ? (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# 852d61559823797effffffff100000c001000000 LOCAL_REFERENCE 23 (task call) <ipython-input-19-e197d2c09385>:<listcomp>:1
# 2e1f2a844f6b2fd4ffffffff100000c001000000 LOCAL_REFERENCE 23 (task call) <ipython-input-19-e197d2c09385>:<listcomp>:1
# a52080f6c7937c01ffffffff100000c001000000 LOCAL_REFERENCE ? (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# a1e6529f26e2773cffffffff100000c001000000 LOCAL_REFERENCE ? (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# 9991ac8b6172b3f2ffffffff100000c001000000 LOCAL_REFERENCE 23 (task call) <ipython-input-18-a0b7fb747444>:<module>:1
# 3cdffb6f345ef8f3ffffffff100000c001000000 LOCAL_REFERENCE 88 (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# 0a51ce9438517c13ffffffff100000c001000000 LOCAL_REFERENCE ? (task call) <ipython-input-31-dd50cc550d0b>:<listcomp>:3
# -----------------------------------------------------------------------------------------------------
# ```
#
# All references are local because this is the output for a single machine. There are tasks and actors running in the workers, all of which are associated with _driver_ processes that originate with `ipython` processes used by the notebooks.
# ## ray status
#
# A new feature of the Ray CLI is the `status` command for printing various status information about the cluster.
#
| ray-crash-course/07-Running-Ray-Clusters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import transformers
tokenizer = transformers.GPT2Tokenizer.from_pretrained("distilgpt2")
# +
txt = '''Vivek is 😅'''
# -
a = tokenizer(txt)['input_ids']
a
for t in a:
print(tokenizer.decode(t))
from transformers import pipeline
ft_generator = pipeline('text-generation', model='../output')
ft_generator("Vivek: Yo ", max_length=50, num_return_sequences=3)
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Spyder)
# language: python3
# name: python3
# ---
# # Module 3.2 Boolean Operators, Summarized
#
# Created By: <NAME> from http://learningdata.io
#
# Each code block is designed to be an independent program for ease of use!
#
# ---
#
# ***Disclaimer***
#
# > Copyright (c) 2020 <NAME>
#
# > Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# > The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ## Part 1: Boolean Operators Overview
#
# In Python, boolean operations either evaluate to `True` or `False`.
#
# ### Standard Boolean Operators
#
# We have the standard ones that you'd expect from math:
#
# | Operator | Description |
# | --- | ------ |
# | `==` | Equal to |
# | `!=` | Not equal to |
# | `>` | Greater than |
# | `>=` | Greater than or equal to |
# | `<` | Less than |
# | `<=` | Less than or equal to |
#
# ### Complex Boolean Operations
#
# And there's operators for stringing multiple conditional statements together:
#
# | Operator | Description |
# | --- | --- |
# | `and` | Evaluates true if and only if both statements evaluate true |
# | `or` | Evaluates true if at least one statement evaluates true |
#
# Conditional table for `and` / `or`. `0` is `False` and `1` is `True`
#
# | a | b | `and` | `or` |
# | --- | --- | --- | --- |
# | 0 | 0 | 0 | 0 |
# | 1 | 0 | 0 | 1 |
# | 0 | 1 | 0 | 1 |
# | 1 | 1 | 1 | 1 |
#
# ### Belonging Operators
#
# In Python, we can easily check if an item belongs in a list or another structure with the `in` operator:
#
# | Operator | Description |
# | --- | --- |
# | `in` | Evaluates true if the item is in the data structure |
# | `not in` | Reverse of the above |
# ## Part 2: Boolean Operator Examples
#
# ### Standard Operators
# +
x = 5
y = 7
print(x > y)
print(x < y)
print(x == y)
print(x != y)
# -
# ### Complex Operations
# +
x = 4
y = 9
z = 7
eval_1 = (x > y) and (z == x)
eval_2 = (x == z) or (y > z)
eval_3 = (x*3 > y-3) and (x == 4)
print(eval_1, eval_2, eval_3)
# -
# ### Belonging Operators
# +
a = 3
my_list = [1,2,3,4,5]
print(a in my_list)
print(a not in my_list)
# -
# ### Reversal Operator `not`
#
# We can also use the keyword `not` in front of a boolean expression to reverse the result easily.
#
# Try to not get confused!
# +
d = 5
e = 8
print(not d == e)
# -
# ## There is a lot more to boolean operators!
#
# We can combine these with functions in the next module, which starts control flow!
| modules/module-03/module3-boolean-operators.ipynb |
"""
4. How to combine many series to form a dataframe?
"""
"""
Difficulty Level: L1
"""
"""
Combine ser1 and ser2 to form a dataframe.
"""
"""
Input
"""
"""
import numpy as np
ser1 = pd.Series(list('abcedfghijklmnopqrstuvwxyz'))
ser2 = pd.Series(np.arange(26))
"""
| pset_pandas_ext/101problems/nb/p4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (gpu2)
# language: python
# name: myenv
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Scraping IMDb
#
# The quotes website was very scraping friendly
#
# - Very structured
# - Not much "noise" to mentally sort through
# - Consistent in css classes
#
# Let's do something a bit more ambitious...
# + [markdown] slideshow={"slide_type": "subslide"}
# In an upcoming seminar we will learn how to use NLP to predict a user's overall movie rating based on the text in their review of the movie
#
# Today, we will start building that dataset by scraping movie and reviews from [IMDb.com](IMDb.com)
# -
# ## Getting started
#
# Let's start by scraping user reviews for the top rated movies
#
# First let's go here: [https://www.imdb.com/chart/top/?ref_=nv_mv_250](https://www.imdb.com/chart/top/?ref_=nv_mv_250)
#
# **Quiz**:
#
# - How do we view source of website?
# - What structure is there for the list of movies?
# - What tags do they have in common?
# - What about classes?
# ### Get list of movies
#
# **WANT**: For each of the top 250 movies collect
#
# - rank
# - name
# - year
# - IMDb rating
# - a link to the movie's detail page
#
# I've written the structure of our spider, but we'll fill it in together based on what we learned by looking at the source...
#
#
# Let's open up [03_top_movies.py](03_top_movies.py) and get to work
# ### Reviews
#
# Let's take a look at a page of reviews and see what we are up against
#
# Here [https://www.imdb.com/title/tt0111161/reviews](https://www.imdb.com/title/tt0111161/reviews?ref_=tt_ql_3)
#
# Quiz:
#
# - What structure is there about each review?
# - What classes are applied to each review?
# - How are reviews laid out relative to eachother and other content?
# ### Scraping reviews
#
# **WANT**: For the first page of reviews for each of the top 250 movies, obtain the following:
#
# - Rating
# - Title of review
# - User
# - Date
# - Review text
#
#
# Again, I've written some boilerplate to get us started, let's work together to fill it in
#
# Go to [04_review_page.py](04_review_page.py)
| Year19-20/2020-02-07_web_scraping/04_imdb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CH.11_Time_Series
# ## 0. Import Packages
# +
from IPython.core.display import display, HTML
display(HTML("<style> .container{width:100% !important;}</style>"))
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
from pandas.tseries.offsets import Day, MonthEnd, Hour, Minute
import pytz
# -
# ## 1. Date and Time Data Types and Tools
now = datetime.now()
now
now.year, now.month, now.day
delta = datetime(2019, 2, 12, 12) - datetime(2019, 2, 10, 11)
print(delta)
print(delta.days)
print(delta.seconds)
start = datetime(2019,2,12)
print(start + timedelta(2))
print(start - 2*timedelta(2))
stamp = datetime(2019,2,12)
print(stamp)
print(str(stamp))
print(stamp.strftime('%Y-%m-%d'))
value = '2019-02-12'
datetime.strptime(value, '%Y-%m-%d')
print(parse('2019-02-12'))
print(parse('Jan 31, 1997, 10:45 PM'))
print(parse('12/2/2019', dayfirst = True))
idx = pd.to_datetime(['2/12/2019', '1/12/2019', '', None])
print(idx)
print(idx[2])
# ## 2. Time Series Basics
# +
dates = [datetime(2019,2,12), datetime(2019,2,15), datetime(2019,2,1),
datetime(2019,2,2), datetime(2019,2,19), datetime(2019,2,20)]
ts = Series(np.random.permutation(6), index = dates)
print(ts)
print(type(ts))
print(ts.index)
# -
ts+ts
stamp = ts.index[2]
print(stamp)
print(ts[stamp])
print(ts['2/01/2019'])
print(ts['20190201'])
# +
longer_ts = Series(np.random.permutation(1000),
index = pd.date_range('2/1/2019', periods = 1000))
longer_ts.head()
# -
longer_ts['2020']
ts[datetime(2019,2,1):]
# ## 3. Date Ranges, Frequencies, and Shifting
dates = [datetime(2019,2,12), datetime(2019,2,15), datetime(2019,2,1),
datetime(2019,2,2), datetime(2019,2,19), datetime(2019,2,20)]
ts = Series(np.random.permutation(6), index = dates)
ts.resample('D').mean()
# date range
index = pd.date_range('2/1/2019', '2/28/2019')
index
pd.date_range(start = '2/1/2019', periods = 20)
pd.date_range(end = '2/20/2012', periods = 20)
pd.date_range('1/1/2019', '3/31/2019', freq = 'BM')
pd.date_range('2/1/2019 12:59:59', periods = 5, normalize = True)
# frequency
hour = Hour()
hour
four_hours = Hour(4)
four_hours
pd.date_range('2/1/2019', '2/3/2019 23:59', freq = '4h')
pd.date_range('2/1/2019', '2/3/2019 23:59', freq = four_hours)
rng = pd.date_range('1/1/2019', '12/31/2019', freq = 'WOM-3FRI')
list(rng)
# +
# shifting
ts = Series(np.random.permutation(4),
index = pd.date_range('2/1/2019', periods = 4, freq = 'D'))
ts
# -
print("[1]", ts.shift(1))
print("[2]", ts.shift(-2))
print("[3]", ts.shift(2, freq = 'D'))
now = datetime(2019,2,12)
print(now + 3 * Day())
print(now + MonthEnd())
# +
offset = MonthEnd()
print(offset)
print(offset.rollforward(now))
print(offset.rollback(now))
# +
ts = Series(np.random.permutation(20),
index = pd.date_range('2/12/2019', periods = 20, freq = '4d'))
ts
# -
# ## 4. Time Zone Handling
pytz.common_timezones
# +
rng = pd.date_range('2/13/2019 10:30', periods = 6, freq = 'D')
ts = Series(np.random.permutation(len(rng)), index = rng)
ts
# -
pd.date_range('2/13/2019 10:30', periods = 6, freq = 'D', tz = 'UTC')
ts_utc = ts.tz_localize('UTC') # Localize
ts_utc
ts_utc.tz_convert('US/Eastern')
stamp = pd.Timestamp('2019-02-15 11:12')
stamp_utc = stamp.tz_localize('utc')
stamp_utc.tz_convert('US/Eastern')
stamp_moscow = pd.Timestamp('2019-02-13', tz = 'Europe/Moscow')
stamp_moscow
stamp = pd.Timestamp('2018-03-11 01:30', tz = 'US/Eastern')
stamp + Hour()
stamp = pd.Timestamp('2018-11-04 00:30', tz = 'US/Eastern')
print(stamp + Hour())
print(stamp + 2*Hour())
# +
rng = pd.date_range('2/13/2019 9:30', periods = 10, freq = 'B')
ts = Series(np.random.randn(len(rng)), index = rng)
ts
# +
ts1 = ts[:7].tz_localize('Europe/London')
ts2 = ts1[2:].tz_convert('Europe/Moscow')
print(ts1)
print(ts2)
# -
# ## 5. Periods and period arithmetic
p = pd.Period(2007, freq = 'A-DEC')
print(p)
print(p+5)
print(p-2)
rng = pd.period_range('2001-01-01', '2000-06-30', freq = 'M')
rng
print(p.asfreq('M', how = 'start'))
print(p.asfreq('M', how = 'end'))
# +
rng = pd.period_range('2006', '2009', freq = 'A-DEC')
ts = pd.Series(np.random.randn(len(rng)), index = rng)
print(ts)
print(ts.asfreq('M', how = 'start'))
print(ts.asfreq('B', how = 'end'))
# -
rng = pd.date_range('2000-01-01', periods = 100, freq = 'D')
ts = pd.Series(np.random.randn(len(rng)), index = rng)
ts
pts = ts.to_period()
pts
# ## 6. Resampling and frequency conversion
ts.resample('M').mean()
ts.resample('M', kind = 'period').mean()
ts.resample('5min', closed = 'right').sum()
ts.resample('5min').sum()
ts.resample('5min', closed = 'right', label = 'right').sum()
ts.resample('5min', closed = 'right', label = 'right', loffset = '-1s').sum()
ts.resample('5min').ohlc()
| 01. Python_Tutorials/9_Time_Series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.graphics.tsaplots as sgt
import statsmodels.tsa.stattools as sts
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.seasonal import seasonal_decompose
plt.rcParams["figure.figsize"] = 10, 5
# +
def to_float(v):
try:
return float(v)
except:
pass
ftse = pd.read_csv("/data/FTSE.csv")
ftse.index = pd.to_datetime(ftse.Date)
ftse = ftse[["Adj Close"]]
ftse.columns = ["Close"]
ftse.Close = ftse.Close.apply(to_float)
ftse = ftse.dropna()
ftse = ftse.sort_index().asfreq(freq='B', method = "ffill")
ftse.head()
# -
| demos/Time series exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from scipy.optimize import linprog
c = [-2,-1,1]
A_eq = [[0,1,2]]
b_eq = [3]
A_ub= [[1, 2,1], [-1,1,-2]]
b_ub= [8,4]
x1_bounds = (None, None)
x2_bounds = (0, None)
x3_bounds = (0, None)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=(x1_bounds, x2_bounds,x3_bounds),options={"disp": True})
# -
res.x
from scipy.optimize import linprog
c = [-2,-1,1]
A = [[1, 2,1], [-1,1,-2]]
b = [16,4]
x0_bounds = (0, None)
x1_bounds = (0, None)
x2_bounds = (0, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds,x2_bounds), options={"disp": True})
res.x
import numpy as np
from numpy.linalg import inv
Binv = [[1/3, 0, -2/3],[0, 1, 1], [1/3, 0, 1/3]]
Binvb = [[1/3], [6], [13/3]]
BinvA = [[1, -1/3,0, 1/3,0, -2/3],[0,2, 0,0, 1,1], [0,2/3, 1,1/3,0, 1/3]]
A = np.matmul(inv(Binv), BinvA)
A = [[1,1,2,1,0,0,], [1,1,-1,0,1,0], [-1,1,1,0,0,1]]
B = [[2,0,0],[-1,1,0],[1,0,1]]
CBT = [-4,0,0]
np.matmul(CBT, np.matmul(inv(B), A))
A = [[1,1,2,1,0,0,], [1,1,-1,0,1,0], [-1,1,1,0,0,1]]
Binv = [[1/3, 0, -2/3],[0, 1, 1], [1/3, 0, 1/3]]
Binvb = [[1/3], [6], [13/3]]
b = np.matmul(inv(Binv), Binvb)
CBT = [1,0,-4]
Cbar = [1,1,-4,0,0,0]-np.matmul([1,0,-4], np.matmul(Binv, A))
b
[1,1,-4,0,0,0]-np.matmul([1,0,-4], BinvA)
import numpy as np
from numpy.linalg import inv
Binv = [[1/4, 0],[-1/6, 1/3]]
BinvA = [[0, 1/4, 1, 1/4, 0],[1,-1/2,0,-1/6, 1/3]]
Binvb = [5/2, 2]
A = np.matmul(inv(Binv), BinvA)
b= np.matmul(inv(Binv), Binvb)
np.matmul(Binv, [-1,0,0])
b
import numpy as np
from numpy.linalg import inv
Binv = [[1/2, 1/5, -1],[-1, 0, 1/2], [5, -3/10, 2]]
Binvb = [3,1,7]
BinvA = [[1,0,0,-1,0,1/2,1/5,-1],[0,1,0,2,1,-1,0,1/2], [0,0,1,-1,-2,5,-3/10,2]]
np.matmul([2,1/10,2], inv(Binv))
b = np.matmul(inv(Binv), Binvb)
b
c_1 = np.matmul([2,1/10, 2],inv(Binv))
c_2 = [-2,0] + np.matmul(c_1, [[-1,0],[2,1],[-1,-2]])
c = np.concatenate((c_1,c_2))
c
A.shape
# +
from scipy.optimize import linprog
A = A[:,0:5]
b = np.matmul(inv(Binv), Binvb)
c = -c
x0_bounds = (0, None)
x1_bounds = (0, None)
x2_bounds = (0, None)
x3_bounds = (0, None)
x4_bounds = (0, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds,x2_bounds, x3_bounds, x4_bounds), options={"disp": True})
# -
import numpy as np
from numpy.linalg import inv
Binv = [[5/2,0,1], [-10,1,-10],[-1,0,1/2]]
b = [14/9, 110/3, 46/9]
np.matmul(Binv, [-1,0,0])
np.matmul(Binv, b)
from scipy.optimize import linprog
c = [-2,1,-1]
A = [[0, 1,1], [-1,2,0]]
b = [6,4]
x0_bounds = (0, None)
x1_bounds = (0, None)
x2_bounds = (0, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds,x2_bounds), options={"disp": True})
n
| Python_IE411/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import matplotlib as mpl
from pandas import *
import csv
import warnings as warnings
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tools.sm_exceptions import ConvergenceWarning # to ignore ConvergenceWarning
from tabulate import tabulate
# +
#READING CSV FILES AND CREATING TRAINING/TEST DATA SETS
def num_rows_and_cols(filename):
cur_file = open(filename,newline='')
raw_data = csv.reader(cur_file)
num_rows = 0
num_cols = 0
is_num_cols_known = 0
for rows in raw_data:
if is_num_cols_known == 0:
for element in rows:
num_cols += 1
is_num_cols_known = 1
num_rows += 1
cur_file.close()
return num_rows,num_cols
def read_file(filename):
Y = []
X = []
Y_training = []
Y_test = []
X_training = []
X_test = []
num_rows,num_columns = num_rows_and_cols(filename)
num_cities = num_columns-1
for i in range(1,num_cities+1):
empty_list = [];
X.append(empty_list)
with open(filename,newline='') as cur_file:
raw_data = csv.reader(cur_file)
for rows in raw_data:
Y.append(float(rows[0]))
for i in range(1,num_cities+1):
X[i-1].append(float(rows[i]))
return X,Y
X,Y = read_file("data1.csv")
x1 = X
X,Y = read_file("data2.csv")
x2 = X
X,Y = read_file("data3.csv")
x3 = X
x_combined = np.vstack((x1,x2,x3))
year = Y
Y_training = []
Y_test = []
for i in range(0, 10):
Y_test.append(float(Y[i]))
for i in range(10, 30):
Y_training.append(float(Y[i]))
def x_sets(z):
X_training = []
X_test = []
X = x_combined[z]
for i in range(0, 10):
X_test.append(float(X[i]))
for i in range(10, 30):
X_training.append(float(X[i]))
return X_training, X_test
X_training, X_test = x_sets(0)
alameda_training = X_training
alameda_test = X_test
X_training, X_test = x_sets(1)
sanmateo_training = X_training
sanmateo_test = X_test
X_training, X_test = x_sets(2)
losangeles_training = X_training
losangeles_test = X_test
X_training, X_test = x_sets(3)
merced_training = X_training
merced_test = X_test
X_training, X_test = x_sets(4)
santaclara_training = X_training
santaclara_test = X_test
X_training, X_test = x_sets(5)
riverside_training = X_training
riverside_test = X_test
X_training, X_test = x_sets(6)
sandiego_training = X_training
sandiego_test = X_test
X_training, X_test = x_sets(7)
santabarbara_training = X_training
santabarbara_test = X_test
X_training, X_test = x_sets(8)
santacruz_training = X_training
santacruz_test = X_test
x_training = np.vstack((alameda_training,sanmateo_training,losangeles_training,merced_training,santaclara_training,riverside_training,sandiego_training,santabarbara_training,santacruz_training))
# generate test stacked vectors
x_test = np.vstack((alameda_test,sanmateo_test,losangeles_test,merced_test,santaclara_test,riverside_test,sandiego_test,santabarbara_test,santacruz_test))
# +
def ARIMA_val_loss(ytrain, ytest, p, d, q):
model = ARIMA(ytrain,order=(p,d,q))
try:
model_fit = model.fit()
y_pred = model_fit.predict(len(ytrain),len(ytrain)+len(ytest)-1)
val_loss = np.sum((y_pred - ytest)**2)
return val_loss
except:
return []
def polynomial_regression(xtrain,ytrain,degree):
x = np.array(xtrain)
y = np.array(ytrain)
coeff = np.polyfit(x,y,degree)
return coeff
# this function takes training and test data, and returns optimal loss alongside coefficients of best-fit polynomial
def validation_loss(xtrain,ytrain,xtest,ytest):
max_degree = len(xtrain)-1
for deg in range(max_degree+1):
cur_coeff = polynomial_regression(xtrain,ytrain,deg)
y_pred = np.polyval(cur_coeff,xtest)
cur_val_loss = 0
for j in range(len(xtest)):
cur_val_loss += (y_pred[j]-ytest[j])**2
if deg == 0:
best_val_loss = cur_val_loss
best_deg = deg
best_coeff = cur_coeff
else:
if cur_val_loss < best_val_loss:
best_val_loss = cur_val_loss
best_deg = deg
best_coeff = cur_coeff
return best_deg,best_val_loss,best_coeff
# disable RankWarning from np.polyfit using the following line of code
warnings.simplefilter('ignore', np.RankWarning)
# disable ConvergenceWarning from ARIMA using the following line of code
warnings.simplefilter('ignore', ConvergenceWarning)
# disable UserWarning from ARIMA using the following line of code
warnings.simplefilter('ignore', category=UserWarning)
header = ["County number", "Lowest Polynomial Validation Loss", "Lowest ARIMA Validation Loss", "Most Effective Method"]
table_info = []
for city_index in range(9): # for each of the 9 cities
xtrain = Y_training
xtest = Y_test
ytrain = x_training[city_index]
ytest = x_test[city_index]
# calculate best parameter (deg) in polynomial regression
poly_best_deg,poly_lowest_val_loss,poly_coeff = validation_loss(xtrain,ytrain,xtest,ytest)
# calculate best parameters (p,q,r) in ARIMA
arima_lowest_val_loss = np.inf # initially set lowest validation loss to +infinity
arima_best_params = []
for p in range(3):
for d in range(3):
for q in range(3):
cur_arima_val_loss = ARIMA_val_loss(ytrain,ytest,p,d,q)
if cur_arima_val_loss < arima_lowest_val_loss:
arima_lowest_val_loss = cur_arima_val_loss
arima_best_params = (p,d,q)
if arima_lowest_val_loss < poly_lowest_val_loss:
lowest_val_loss = 'ARIMA'
else:
lowest_val_loss = 'Polynomial Regression'
table_row = ([city_index+1, poly_lowest_val_loss, arima_lowest_val_loss, lowest_val_loss])
table_info.append(table_row)
print('City number '+str(city_index+1))
print('Best polynomial degree:')
print(poly_best_deg)
print('Best polynomial validation loss:')
print(poly_lowest_val_loss)
print('Best ARIMA parameters:')
print(arima_best_params)
print('Best ARIMA validation loss:')
print(arima_lowest_val_loss)
print(tabulate(table_info, headers=header, tablefmt='fancy_grid'))
# -
| modules/module4/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use the structure and attribute to generate an embedding
#
# This notebook takes as inputs the adjacency lists generated from the OpenBEL graph and label maps, from the Open Targets database. It calcultates and compares the performance of the classifications after learning the embeddings using GAT2Vec using different hyperparameters.
#
# ## Imports and definitions
# +
import warnings
from GAT2VEC.evaluation.classification import Classification
from GAT2VEC.gat2vec import Gat2Vec
from guiltytargets.constants import gat2vec_config
import pandas as pd
from ppi_network_annotation.parsers import parse_gene_list
from guilty_phewas import utils
dir_ = "/home/mau/Uni/Thesis/gat2vecdemo/GAT2VEC/data/M10"
dir_ = "/home/mau/Uni/Thesis/tau"
dir_ = "C:/Users/Mauricio/Thesis/bel_data/alzh2" # windows
dir_ = "C:/Users/Mauricio/Thesis/bel_data/alzh" # windows
# Ignore warnings
warnings.simplefilter('ignore')
# -
# ## Optimizing the hyperparameters
#
# ### The default parameters used in GuiltyTargets
print("Default parameters:")
print(f'num_walks {gat2vec_config.num_walks}\n'
f'walk_length {gat2vec_config.walk_length}\n'
f'dimension {gat2vec_config.dimension}\n'
f'window_size {gat2vec_config.window_size}\n')
# ### Training using the default parameters
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
model = g2v.train_gat2vec(
gat2vec_config.num_walks,
gat2vec_config.walk_length,
gat2vec_config.dimension,
gat2vec_config.window_size,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
# -
print(f"#### Initial values ####")
print(auc_df)
# ### Optimizing Number of Walks
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
optmization = pd.DataFrame()
num_walks_list = [2, 5, gat2vec_config.num_walks, 20, 30, 40]
for nw in num_walks_list:
print(f'# Training num walks {nw}')
model = g2v.train_gat2vec(
nw,
gat2vec_config.walk_length,
gat2vec_config.dimension,
gat2vec_config.window_size,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
optmization.loc[:, f'num_walks {nw}'] = auc_df['auc']
# -
optmization.boxplot(figsize=[15, 8]);
# ### Optimizing Walk Length
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
num_walks = 30
walk_length_list = [4, 20, 40, gat2vec_config.walk_length, 120, 160]
optmization = pd.DataFrame()
for wl in walk_length_list:
print(f'# Training walk length {wl}')
model = g2v.train_gat2vec(
num_walks,
wl,
gat2vec_config.dimension,
gat2vec_config.window_size,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
optmization.loc[:, f'walk_length {wl}'] = auc_df['auc']
# -
# using nw = 30
optmization.boxplot(figsize=[15, 8]);
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
num_walks = 2
# walk_length_list = [4, 20, 40, gat2vec_config.walk_length, 120, 160]
optmization2 = pd.DataFrame()
for wl in walk_length_list:
print(f'# Training walk length {wl}')
model = g2v.train_gat2vec(
num_walks,
wl,
gat2vec_config.dimension,
gat2vec_config.window_size,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
optmization.loc[:, f'nw 2 wl {wl}'] = auc_df['auc']
optmization2.loc[:, f'walk_length {wl}'] = auc_df['auc']
# -
optmization2.boxplot(figsize=[15, 8]);
# comparing nw = 2 and nw = 30
optmization.boxplot(figsize=[15, 8]);
# ### Optimizing Window Size
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
num_walks = 30
walk_length = 4
window_size_list = [2, gat2vec_config.window_size, 10, 20, 30]
optmization = pd.DataFrame()
for ws in window_size_list:
print(f'# Training window size {ws}')
model = g2v.train_gat2vec(
num_walks,
walk_length,
gat2vec_config.dimension,
ws,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
optmization.loc[:, f'window_size {ws}'] = auc_df['auc']
# -
# NW = 2
optmization.boxplot(figsize=[15, 8]);
# NW = 30
optmization.boxplot(figsize=[15, 8]);
# ### Optimizing Number of Dimensions
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
num_walks = 30
walk_length = 4
window_size = 10
dimension_list = [32, 64, gat2vec_config.dimension, 256, 512]
optmization = pd.DataFrame()
for dim in dimension_list:
print(f'# Training dimension {dim}')
model = g2v.train_gat2vec(
num_walks,
walk_length,
dim,
window_size,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
optmization.loc[:, f'dimension {dim}'] = auc_df['auc']
# -
optmization.boxplot(figsize=[15, 8]);
# ## Results after optimization
# +
g2v = Gat2Vec(dir_, dir_, label=False, tr=gat2vec_config.training_ratio)
walk_length = 4
num_walks = 30
window_size = 10
dimension = 256
model = g2v.train_gat2vec(
num_walks,
walk_length,
dimension,
window_size,
output=True,
)
classifier = Classification(dir_, dir_, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
print(auc_df)
# -
# ## Print the probabilities
# +
probs_df = pd.DataFrame(classifier.get_prediction_probs_for_entire_set(model))
print(probs_df)
# -
| notebooks/rep_learn_script.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Measure the Difference between Preprint-Published similarity and Published Articles
# This notebook is designed to answer the question how much do preprints change with each new version.
# Based on results within my manuscript a review wanted to know the answer to the above question.
# Also this notebook outputs an excel file that contains all preprint published pairs and their respective publication information.
# Allows people to manually inspect practical consequences (if any) for preprints that take longer to publish.
from datetime import timedelta
import numpy as np
import pandas as pd
import plotnine as p9
from scipy.stats import linregress
# # Load the Document Distances
published_date_distances = pd.read_csv(
"output/preprint_published_distances.tsv", sep="\t"
)
for col in ["preprint_date", "published_date"]:
published_date_distances[col] = pd.to_datetime(published_date_distances[col])
published_date_distances["time_to_published"] = pd.to_timedelta(
published_date_distances["time_to_published"]
)
print(published_date_distances.shape)
published_date_distances.head()
# # Plot Version count against Doc Distances
# Reviewer wanted to see if there is an association between version count and document distances. (i.e. if preprints with more versions have more text changes).
# +
x = (published_date_distances["version_count"].values.tolist(),)
y = published_date_distances["doc_distances"].values.tolist()
results = linregress(x, y)
print(results)
# -
published_date_distances["version_count"] = pd.Categorical(
published_date_distances["version_count"].tolist()
)
g = (
p9.ggplot(published_date_distances, p9.aes(x="version_count", y="doc_distances"))
+ p9.geom_boxplot(fill="#b2df8a")
+ p9.geom_line(
data=pd.DataFrame(
dict(
version_count=np.arange(1, 13),
doc_distances=np.arange(1, 13) * 0.02669 + 0.8697,
)
),
linetype="dashed",
color="#1f78b4",
size=1,
)
+ p9.annotate(
"text",
label=f"y={results.slope:0.4f}*X + {results.intercept:0.4f}",
x=9,
y=7.5,
size=13,
color="#1f78b4",
)
+ p9.labs(
x="# of Preprint Versions",
y="Euclidean Distance of Preprint-Published Versions",
)
+ p9.theme_seaborn(style="white", context="notebook")
)
g.save("output/version_count_doc_distances.svg")
g.save("output/version_count_doc_distances.png", dpi=600)
print(g)
# Overall, preprints change with each new version; however, based on the magnitude of the slope I'd argue that these changes are minor compared to substantial changes (~6 distance units)
# # Output published dates to Excel
# Reviewer asked if manually pursuing preprints that take longer to publish would produce any interesting results. Great question, but not enough time to go into that; however, providing a supplementary file for others to look into could provide an in depth answer.
excel_print_df = published_date_distances.drop(
["document", "category", "pmcoa"], axis=1
).rename(
index=str,
columns={
"preprint_date": "posted_date",
"time_to_published": "days_till_published",
"doc_distances": "preprint_published_distance",
},
)[
[
"preprint_doi",
"posted_date",
"pmcid",
"published_doi",
"journal",
"published_date",
"days_till_published",
"preprint_published_distance",
"version_count",
]
]
excel_print_df
excel_print_df = excel_print_df[excel_print_df["days_till_published"] > pd.Timedelta(0)]
excel_print_df["posted_date"] = excel_print_df.posted_date.dt.date
excel_print_df["published_date"] = excel_print_df.published_date.dt.date
(
excel_print_df.sort_values("days_till_published", ascending=False).to_excel(
"output/published_preprints_information.xlsx", engine="xlsxwriter"
)
)
| biorxiv/publication_delay_experiment/03_publication_delay_experiment_reviewer_request.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
import pickle
# -
df = pd.read_csv("insurance.csv")
df.head()
cat = ["sex","children","smoker","region"]
num = ["age","bmi","charges"]
for col in cat:
plt.figure()
sns.countplot(data=df,x=col,hue="insuranceclaim")
plt.show()
for col in num:
plt.figure()
sns.boxplot(data=df,y=col,x="insuranceclaim")
plt.show()
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=1)
dt = DecisionTreeClassifier()
dt.fit(X_train,y_train)
y_pred = dt.predict(X_test)
print(classification_report(y_test,y_pred))
file = open("model.ser","wb")
pickle.dump(dt,file)
file.close()
| Insurance Claim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''graph'': conda)'
# name: python3710jvsc74a57bd0563d867f513b1db22a50c0772d3498166496ad16228d7c1d495fd4c7e7a93ad6
# ---
# + [markdown] id="4714VKOKo8cK"
# # Modeling
#
# This notebook will guide you through the process of loading heterogeneous graphs and training models. The heterogeneous graph is based on Deep Graph Library (DGL) implementation, and the training process is based on PyTorch implementation.
#
# The CPU is recommended for training when the equipment allows it. If you need to use GPU, please install GPU-based DGL separately.
# + [markdown] id="lsBPiu9io8cR"
# ## Colab setting
#
# If you want to train in Colab, please run both cells first and mount to the corresponding path.
# + colab={"base_uri": "https://localhost:8080/"} id="0AICBHAJHa56" executionInfo={"status": "ok", "timestamp": 1620840220547, "user_tz": 300, "elapsed": 21466, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="1d9c0d9a-9dea-42cd-aeea-fe211b1b6585"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="LtJfGQ6GHh88" executionInfo={"status": "ok", "timestamp": 1620840221150, "user_tz": 300, "elapsed": 597, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="35cda3a1-ec52-4f5d-f037-ae6266140b64"
import os
cur_path = "/content/drive/MyDrive/graph-fraud-detection/"
os.chdir(cur_path)
# !pwd
# + colab={"base_uri": "https://localhost:8080/"} id="aprSAaVaHiEX" executionInfo={"status": "ok", "timestamp": 1620840224088, "user_tz": 300, "elapsed": 3527, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="443432ac-762b-4b86-fcef-937bf37e0fa8"
# !pip install dgl
#-cu101
# + [markdown] id="cgVPj47IMMG4"
# ## Training (All in 1)
#
# In this part, you can use this all-in-one method to train the model easily.
# + colab={"base_uri": "https://localhost:8080/"} id="W0mfuNcvBy6n" executionInfo={"status": "ok", "timestamp": 1620886395532, "user_tz": 300, "elapsed": 45982333, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="9b56c3b7-e546-4314-e2c8-51b6a09282a2"
# !python train.py --n-epochs 1000
# + id="PVRHogOMXee2"
# + id="MZJmjKzHXepy"
# + [markdown] id="lSmqRg1eo8cT"
# ## Training (Detailed)
#
# Besides the approach mentioned before, you can also use this detailed approach.
# + [markdown] id="TZWNPZTPOJSb"
# ### Prepare environment
# + colab={"base_uri": "https://localhost:8080/"} id="E6TREv-bfXKZ" executionInfo={"status": "ok", "timestamp": 1620765396200, "user_tz": 300, "elapsed": 9124, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="27c66779-c91f-4b46-ac2a-9f5c29002c9b"
import os
import sys
import glob
os.environ['DGLBACKEND'] = 'pytorch'
import torch as th
import dgl
import numpy as np
from gnn.estimator_fns import *
from gnn.graph_utils import *
from gnn.data import *
from gnn.utils import *
from gnn.pytorch_model import *
from train import *
# + id="_9CmUjgaHXdj"
# + [markdown] id="E881gvwobfSz"
# ### Load data
#
# Recall the edges we defined before and the csv files we used to save them.
# + id="BmqGrWm-HXdk"
file_list = glob.glob('./data/*edgelist.csv')
edges = ",".join(map(lambda x: x.split("/")[-1], [file for file in file_list if "relation" in file]))
# + [markdown] id="cE2--V7AbfS0"
# To train the graph neural network, we need to define a few hyperparameters that determine properties such as the class of graph neural network models we will be using, the network architecture and the optimizer and optimization parameters.
#
# Here we're setting only a few of the hyperparameters, to see all the hyperparameters and their default values, see `gnn/estimator_fns.py`. The parameters set below are:
#
# - **nodes** is the name of the file that contains the node_ids of the target nodes and the node features.
# - **edges** is a regular expression that when expanded lists all the filenames for the edgelists
# - **labels** is the name of the file tha contains the target node_ids and their labels
# - **model** specify which graph neural network to use, this should be set to r-gcn
#
# The following hyperparameters can be tuned and adjusted to improve model performance
#
# - **batch-size** is the number nodes that are used to compute a single forward pass of the GNN
# - **embedding-size** is the size of the embedding dimension for non target nodes
# - **n-neighbors** is the number of neighbours to sample for each target node during graph sampling for mini-batch training
# - **n-layers** is the number of GNN layers in the model
# - **n-epochs** is the number of training epochs for the model training job
# - **optimizer** is the optimization algorithm used for gradient based parameter updates
# - **lr** is the learning rate for parameter updates
# + [markdown] id="BDQKAwpYo8cW"
# ### Generate graph
# + colab={"base_uri": "https://localhost:8080/"} id="23aFsML_bfS1" executionInfo={"status": "ok", "timestamp": 1620765400708, "user_tz": 300, "elapsed": 318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="d584ec26-25d7-4c80-a41e-42785674428f"
print('numpy version:{} PyTorch version:{} DGL version:{}'.format(np.__version__,
th.__version__,
dgl.__version__))
args = parse_args()
print(args)
# + colab={"base_uri": "https://localhost:8080/"} id="C4PfzLQKbfS1" executionInfo={"status": "ok", "timestamp": 1620765634870, "user_tz": 300, "elapsed": 233720, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="a00d8856-b3c7-49b4-d3ca-6f98d20620a5"
args.edges = edges
args.edges = get_edgelists('relation*', args.training_dir)
g, features, target_id_to_node, id_to_node = construct_graph(args.training_dir,
args.edges,
args.nodes,
args.target_ntype)
mean, stdev, features = normalize(th.from_numpy(features))
print('feature mean shape:{}, std shape:{}'.format(mean.shape, stdev.shape))
# + colab={"base_uri": "https://localhost:8080/"} id="8Bw1ceNebfS2" executionInfo={"status": "ok", "timestamp": 1620765636388, "user_tz": 300, "elapsed": 234609, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="f56605cc-8069-4363-fdd5-df07dd4ac626"
g.nodes['target'].data['features'] = features
print("Getting labels")
n_nodes = g.number_of_nodes('target')
labels, _, test_mask = get_labels(target_id_to_node,
n_nodes,
args.target_ntype,
os.path.join(args.training_dir, args.labels),
os.path.join(args.training_dir, args.new_accounts))
print("Got labels")
labels = th.from_numpy(labels).float()
test_mask = th.from_numpy(test_mask).float()
n_nodes = th.sum(th.tensor([g.number_of_nodes(n_type) for n_type in g.ntypes]))
n_edges = th.sum(th.tensor([g.number_of_edges(e_type) for e_type in g.etypes]))
print("""----Data statistics------'
#Nodes: {}
#Edges: {}
#Features Shape: {}
#Labeled Test samples: {}""".format(n_nodes,
n_edges,
features.shape,
test_mask.sum()))
# + [markdown] id="FjhjypJEo8cX"
# ### Start training
#
# The training process and result will be saved in the same folder.
# + id="UETWzSN6bfS3"
if args.num_gpus:
cuda = True
device = th.device('cuda:0')
else:
cuda = False
device = th.device('cpu')
# + id="AWQp2MxLHXdo" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1620799361346, "user_tz": 300, "elapsed": 5097409, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="5a0c315c-0e7e-4391-ec98-0411d0a4c59a"
print("Initializing Model")
in_feats = features.shape[1]
n_classes = 2
ntype_dict = {n_type: g.number_of_nodes(n_type) for n_type in g.ntypes}
model = get_model(ntype_dict, g.etypes, vars(args), in_feats, n_classes, device)
print("Initialized Model")
features = features.to(device)
labels = labels.long().to(device)
test_mask = test_mask.to(device)
# g = g.to(device)
loss = th.nn.CrossEntropyLoss()
# print(model)
optim = th.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print("Starting Model training")
initial_record()
model, class_preds, pred_proba = train_fg(model, optim, loss, features, labels, g, g,
test_mask, device, args.n_epochs,
args.threshold, args.compute_metrics)
print("Finished Model training")
print("Saving model")
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
save_model(g, model, args.model_dir, id_to_node, mean, stdev)
print("Model and metadata saved")
# + id="Asfws7O3oZLb"
# + id="i8VQMM62By1k"
# + id="ZJ2Cf2ieBy37"
# + id="DsId7ZeqNQNd"
# + id="-u3-n_xdoZLc" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1618992670492, "user_tz": 300, "elapsed": 219, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh140SMb0c80XwwVdi_E7-Lh2XNkXKVWOakF5qL=s64", "userId": "05904147758498762863"}} outputId="8b9f8314-147b-44a9-9a4c-ec4f84793e9f"
# %tb
# + id="0JTmmYlyoZLc"
# + id="RUaiLCITHXdo"
| 20_modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="j4wp4Golo3gs" outputId="0b269d97-720f-4d4b-9a74-bf60fb34c88c"
# !pip install yfinance
# + id="tbhrF0Kbq8EK"
# Importando librerías
import numpy as np
import matplotlib. pyplot as plt
import pandas as pd
import yfinance as yf
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import datetime as dt
import seaborn as sns
import matplotlib.dates as mdates
import math
import tensorflow as tf
from tensorflow.keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
# + [markdown] id="6ZcXZasoZCOD"
# Se importa la librería "Import Data", la cual proporciona el precio del mercado para una acción.
# + colab={"base_uri": "https://localhost:8080/"} id="BEewjaRApebF" outputId="82d8d785-1cf0-4d43-b85f-56fe1c9fdafc"
# Importar el fichero .PY, el cual se conecta a Yahoo Finance e importa los datos
from google.colab import drive
drive.mount('/content/drive')
execfile('/content/drive/MyDrive/StockPrediction/import_data_stock_prediction.py')
# + [markdown] id="5WrgtRKhr1aq"
# # LSTM Univariante
# + [markdown] id="vZunosmDL3zJ"
# ## Funciones
# + [markdown] id="BgefpxaQS14F"
# ### Transformación de datos
# + id="HGDwWnAEL7Mn"
def TransformData(tickerDf, split_data=0.8):
data = tickerDf.filter(['Adj Close'])
dataset = data.values
# Separación de datos. Por defecto, 80% entrenamiento, 20% testeo
training_data_len = math.ceil(len(dataset) * split_data)
# Normalización de datos
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
# Crear set de datos de entrenamiento
train_data = scaled_data[0:training_data_len, :]
# Split
x_train = []
y_train = []
for i in range(train_last_days, len(train_data)):
x_train.append(train_data[i-train_last_days:i, 0])
y_train.append(train_data[i, 0])
# Convertir x_train y y_train a numpy arrays
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
return x_train, y_train, dataset, training_data_len, scaler, scaled_data
# + [markdown] id="_Y_jZP-aS5sY"
# ### Entrenamiento
# + id="1YQgFRz5MwtY"
def LSTM_Model(x_train, model_dropout=0.2, model_neuron=50, model_learning_rate=1e-4):
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(units = model_neuron,
input_shape=(x_train.shape[1], 1),
return_sequences=True))
# Model 2 - 11
model.add(tf.keras.layers.Dropout(model_dropout))
model.add(tf.keras.layers.LSTM(units = model_neuron, return_sequences=True))
model.add(tf.keras.layers.Dropout(model_dropout))
model.add(tf.keras.layers.LSTM(units = model_neuron))
model.add(tf.keras.layers.Dropout(model_dropout))
model.add(tf.keras.layers.Dense(units=25))
model.add(tf.keras.layers.Dense(units=1))
# Model 1 - 5.9
# model.add(tf.keras.layers.LSTM(units=50, return_sequences=False))
# model.add(tf.keras.layers.Dense(units=25))
# model.add(tf.keras.layers.Dense(units=1))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate = model_learning_rate),
loss = tf.keras.losses.MeanSquaredError(),
metrics=['accuracy']
)
return model
# + id="zSjmg3hANQga"
def LSTM_fit(model, x_train, y_train, model_epochs=30, model_batch_size=32):
# Técnica de EarlyStopping para monitorizar la pérdida durante la validación cada 8 épocas
early_stop = tf.keras.callbacks.EarlyStopping(
monitor='loss',
patience=8)
history = model.fit(x_train,
y_train,
epochs = model_epochs,
batch_size = model_batch_size,
validation_split = 0.1,
callbacks = [early_stop],
verbose=0) # Verbose 0 para no mostrar las líneas
return history
# + id="CZbcj-fw-XvD"
def LSTM_PlotLoss(mfit, n_epochs=30):
N = len(mfit.history['accuracy'])
plt.style.use("ggplot")
fig, (ax2) = plt.subplots(1, 1, figsize=(10,5))
fig.suptitle('Función de Pérdida')
ax2.plot(np.arange(0, N), mfit.history["loss"], label="Pérdida en el Entrenamiento")
ax2.plot(np.arange(0, N), mfit.history["val_loss"], label="Pérdida en la validación")
ax2.set_title("Pérdida")
ax2.set_xlabel("Nº Épocias")
ax2.set_ylabel("Pérdida")
ax2.legend(loc="upper right")
plt.show()
# + [markdown] id="yhvXKbYbS87h"
# ### Testeo
# + id="XRmTsIkyNk0A"
def LSTM_Predictions(model, training_data_len, scaled_data, scaler, dataset):
test_data = scaled_data[training_data_len - train_last_days:, :]
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(train_last_days, len(test_data)):
x_test.append(test_data[i-train_last_days:i, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
return predictions, y_test
# + id="2UtR5jcz4EGR"
def LSTM_RMSE(predictions, y_test):
rmse = np.sqrt(np.mean(predictions - y_test)**2)
return rmse # Value of 0 means the model is exact
# + [markdown] id="uWXxbn6pS_hH"
# ### Predicciones
# + id="I5sN3guv4WOe"
import warnings
def LSTM_PlotPredictions(data, training_data_len, predictions):
warnings.filterwarnings("ignore")
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions
plt.figure(figsize=(16, 8))
plt.title('Model')
plt.xlabel('Date', fontsize = 18)
plt.ylabel('Close Price', fontsize = 18)
plt.plot(train['Adj Close'], color='RoyalBlue')
plt.plot(valid['Adj Close'], color='Green')
plt.plot(valid['Predictions'], color='Red')
plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
plt.show()
# + id="kyZtQgRl2Dmi"
def LSTM_PredictNextDay(tickerDf, scaler, model, train_last_days=60, last_day_predicted=0):
if last_day_predicted == 0:
tickerDf_test = tickerDf
else:
tickerDf_test = tickerDf.iloc[:-last_day_predicted,:]
last_days = tickerDf_test.filter(['Adj Close'])
# Obtenemos la fecha del día siguiente y el valor real asociado a esa fecha, si existe
day = last_days[-1:].index
day = day[0].strftime('%Y-%m-%d')
day = datetime.strptime(day, '%Y-%m-%d')
if last_day_predicted == 0:
day = day + timedelta(days=1)
real = 0
else:
day = tickerDf.loc[tickerDf.index >= day].iloc[1].name
real = round(tickerDf['Adj Close'].loc[tickerDf.index == day].values[0], 2)
last_days = last_days[-train_last_days:].values
last_days_scaled = scaler.transform(last_days)
X_test = []
X_test.append(last_days_scaled)
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
pred_price = model.predict(X_test)
pred_price = scaler.inverse_transform(pred_price)
prec = round(pred_price[0][0], 2)
return day.strftime('%Y-%m-%d'), real, prec
# + [markdown] id="SGcJtkoETHQR"
# ## Llamada al modelo LSTM
# + [markdown] id="M8PPuJrcTSxD"
# ### Descarga de datos y Parámetros
# + id="zGz7oJ6iY7-f"
tickerSymbol = 'BA'
# Obtenemos los datos desde el día actual hasta hace 5 años
one_year_ago = datetime.now() - relativedelta(years=5)
start_date = one_year_ago.strftime('%Y-%m-%d')
#one_year_ago = datetime.now() - relativedelta(years=1)
end_date = datetime.now().strftime('%Y-%m-%d')
# Se obtiene el datafram de la empresa seleccionada y en el rango de fechas
tickerDf = GetStockDataByTicker(tickerSymbol, start_date, end_date)
# + id="KsWQbUwxK_wI" colab={"base_uri": "https://localhost:8080/", "height": 214} outputId="baa2453f-6d87-439f-9efe-fcc88f995520"
GetStockInformationByTicker(tickerSymbol, start_date, end_date)
# + id="PzUIT4y0LBky" colab={"base_uri": "https://localhost:8080/", "height": 500} outputId="1376478f-7344-4f73-a9c7-0c94903cf79d"
Graph_StockEvolutionByTime(tickerSymbol, tickerDf)
# + id="-fLkWR_T2bYV"
train_last_days = 60
split_data = 0.8 #80% train, 20% test
model_neuron = 50
model_epochs = 30
model_batch_size = 32
model_learning_rate = 1e-4
model_dropout = 0.2
# + [markdown] id="eHPlCgqsWj4V"
# ### Transformación de datos
# + id="dN1QEtbmTWnI"
x_train, y_train, dataset, training_data_len, scaler, scaled_data = TransformData(tickerDf, split_data)
# + [markdown] id="wENw2SiDWmCo"
# ### Entrenamiento
# + colab={"base_uri": "https://localhost:8080/", "height": 976} id="rGa4Az4dTZeO" outputId="602ad74f-ab5a-49e9-aada-352f10d21d46"
LSTM_model = LSTM_Model(x_train, model_dropout, model_neuron, model_learning_rate)
LSTM_history = LSTM_fit(LSTM_model, x_train, y_train, model_epochs, model_batch_size)
tf.keras.utils.plot_model(LSTM_model, show_shapes=True, show_layer_names=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 374} id="vI4tVy13Tb0D" outputId="e1b856a7-2ec4-4a83-96d0-689d8551242e"
LSTM_PlotLoss(LSTM_history, model_epochs)
# + [markdown] id="B6UfiBzlWoRn"
# ### Testeo
# + colab={"base_uri": "https://localhost:8080/", "height": 540} id="80VpfIvdTe9t" outputId="a1a5cd12-6851-4c06-f782-4fc222c9e4c6"
predictions, y_test = LSTM_Predictions(LSTM_model, training_data_len, scaled_data, scaler, dataset)
LSTM_PlotPredictions(tickerDf.filter(['Adj Close']), training_data_len, predictions)
print("RSME = ", LSTM_RMSE(predictions, y_test))
# + [markdown] id="l6lNlaHhWp36"
# ### Predicción prox día
# + colab={"base_uri": "https://localhost:8080/"} id="I7mI5vESTjRZ" outputId="ff30b20f-561e-4e70-ecc2-5a2ff018fe2e"
# Si last_day_predicted = 0, predice el precio del día siguiente
# Si last_day_predicted >= 1, compara el precio predecido con el real, siendo X la cantidad de días hacia atrás con el que comparar
last_day_predicted = 0
day, real, predicted = LSTM_PredictNextDay(tickerDf, scaler, LSTM_model, train_last_days, last_day_predicted)
# Imprimir resultados
if last_day_predicted != 0:
# Comparación precio real vs predicción
if (real > predicted):
prec = abs(predicted/real*100)
else:
prec = (real/predicted)*100
prec = round(prec, 2)
print("El precio real del día", day, "es", real, ", y el predicho", predicted)
print("La precisión es de", prec, "%")
else:
print("Se estima que el precio de mañana de", tickerSymbol, "sea", predicted, "$")
| ipynb y Resultados/3_LSTM_Stock_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clustering Game of Thrones Characters
#
# The goal of this project was to find 'clusters' of characters for Game of Thrones based on a character's vocabulary.
import numpy as np
import pandas as pd
import pprint
import csv
import sys
import nltk
import re
import os
import codecs
from sklearn import feature_extraction
import matplotlib.pyplot as plt
d = {}
csv.field_size_limit(sys.maxsize)
with open('got_lines.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
lines = row['lines'].split(';')
if len(lines)>75:
d[row['character']] = lines
# +
stopwords = nltk.corpus.stopwords.words('english')
words = set(nltk.corpus.words.words())
from nltk.stem.snowball import SnowballStemmer
import contractions
stemmer = SnowballStemmer('english')
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
fixed_text = contractions.fix(text)
tokens = [word for sent in nltk.sent_tokenize(fixed_text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
fixed_text = contractions.fix(text)
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(fixed_text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
totalvocab_stemmed = []
totalvocab_tokenized = []
for i in d:
lines = ' '.join(d[i])
allwords_stemmed = tokenize_and_stem(lines) #for each item in tokenize/stem
totalvocab_stemmed.extend(allwords_stemmed) #extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(lines)
totalvocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)
print(vocab_frame)
# +
from sklearn.feature_extraction.text import TfidfVectorizer
#define vectorizer parameters
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.2, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
characters = list(d.keys())
documents = [' '.join(d[i]) for i in d]
# %time tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
print(tfidf_matrix.shape)
# +
terms = tfidf_vectorizer.get_feature_names()
from sklearn.metrics.pairwise import cosine_similarity
dist = 1 - cosine_similarity(tfidf_matrix)
# +
from sklearn.cluster import KMeans
Sum_of_squared_distances = []
K = range(1,15)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(tfidf_matrix)
Sum_of_squared_distances.append(km.inertia_)
plt.plot(K, Sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
# +
from sklearn.cluster import KMeans
from sklearn.externals import joblib
num_clusters = 5
km = KMeans(n_clusters=num_clusters)
# %time km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
joblib.dump(km, 'doc_cluster.pkl')
# -
km = joblib.load('doc_cluster.pkl')
clusters = km.labels_.tolist()
# +
groups = { 'character': characters, 'documents': documents, 'cluster': clusters,}
frame = pd.DataFrame(groups, index = [clusters] , columns = ['character', 'cluster'])
# -
frame['cluster'].value_counts()
# +
print('Top terms per cluster:')
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(num_clusters):
print(f'Cluster {i} words:')
s = ''
for ind in order_centroids[i, :40]:
if terms[ind] in vocab_frame.index:
s+=vocab_frame.loc[terms[ind]].values.tolist()[0][0] + ', '
else:
s+=terms[ind] + ', '
print(s)
print(f'Cluster {i} characters:')
c = ''
for character in frame.loc[i]['character'].values.tolist():
c+=character + ', '
print(c)
# +
import os # for os.path.basename
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.manifold import MDS
MDS()
# convert two components as we're plotting points in a two-dimensional plane
# "precomputed" because we provide a distance matrix
# we will also specify `random_state` so the plot is reproducible.
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(dist) # shape (n_components, n_samples)
xs, ys = pos[:, 0], pos[:, 1]
#set up cluster names using a dict
cluster_names = {0: 'The Off-In-The-Middle of Nowheres',
1: 'The Hoity-toity Schemers',
2: "The I'll Do What It Takes",
3: 'The Targaryen Supporters',
4: 'The Foulmouthed Brutes'
}
#some ipython magic to show the matplotlib plots inline
# %matplotlib inline
#create data frame that has the result of the MDS plus the cluster numbers and titles
df = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=characters))
#group by cluster
groups = df.groupby('label')
# set up plot
fig, ax = plt.subplots(figsize=(17, 9)) # set size
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
for name, group in groups:
ax.plot(group.x, group.y, marker='o', linestyle='', ms=12,
label=cluster_names[name],
mec='none')
ax.set_aspect('auto')
ax.tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
ax.tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
for i in range(len(df)):
ax.text(df.loc[i]['x'], df.loc[i]['y'], df.loc[i]['title'], size=8)
plt.show()
# -
| _jupyter/2019-07-03-game-of-thrones-clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WZR1tYwA2o8C"
# # RDL Big Paper Plots
#
# *Licensed under the Apache License, Version 2.0.*
#
# To run this in a public Colab, change the GitHub link: replace github.com with [githubtocolab.com](http://githubtocolab.com).
#
# This colab loads raw measurements from disk and analyzes the results.
#
# ## Choosing optimal hyperparameters
# We automatically detect hyperparameter sweeps by selecting fields that don't correspond to dataset metrics but that have more than one chosen value. We choose the hyperparameters that achieve the best according a given metric (see `dataset_metric`) after averaging over random seeds. For example, if the model is trained on CIFAR-10, we use CIFAR-10's validation loss.
#
# ## Plots
# All plots report the performance of a given model according to its optimal hyperparameters chosen above. When there are runs with multiple seeds, we show the mean and standard deviation.
# + id="h3lFstNIFvry"
from typing import Dict
import itertools
import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
from IPython import display
# + [markdown] id="-qpT4rtuEN14"
# ## Functions
# + id="T4JuGWk_t6FW"
#@title Choosing optimal hyperparameters
# The finetuning deterministic jobs use a fixed random seed but different
# upstream checkpoints, which themselves correspond to different random seeds.
# In this case, we thus marginalize over upstream checkpoints
# (`config.model_init`) rather than the random seed.
def is_hparam(col):
return col.startswith('config.') or col in ['learning_rate', 'model']
def _get_hparams(df, additional_marginalization_hparams: str):
marginalization_hparams = ['config.seed']
if additional_marginalization_hparams:
marginalization_hparams.append(additional_marginalization_hparams)
candidates = [c for c in df.columns if is_hparam(c)]
return [
c for c in candidates
if len(df[c].unique()) > 1 and c not in marginalization_hparams
]
def _get_best_hparams(df,
dataset_metric: Dict[str, str],
additional_marginalization_hparams: str):
hps = _get_hparams(df, additional_marginalization_hparams)
ds = df['config.dataset'].iloc[0]
model = df['model'].iloc[0]
metric = dataset_metric[ds]
print(f'For {model} on {ds}, found {len(hps)} hparams: {hps}')
hps = hps + ['model'] # To ensure we don't groupby an empty list.
aggregated_loss = df.groupby(hps)[metric].agg('mean').reset_index()
return aggregated_loss.loc[aggregated_loss[metric].idxmin()][hps]
def _get_optimal_results(df: pd.DataFrame,
dataset_metric: Dict[str, str],
additional_marginalization_hparams: str):
df = df.copy()
best_hps = _get_best_hparams(
df,
dataset_metric=dataset_metric,
additional_marginalization_hparams=additional_marginalization_hparams)
print(f' Best hparams: {dict(best_hps)}')
for k, v in best_hps.items():
df = df[df[k] == v]
return df
def get_optimal_results(measurements: Dict[str, pd.DataFrame],
dataset_metric: Dict[str, str]):
"""Returns a dataframe, typically with one result per model type.
A model type may have multiple results that will be averaged over when
plotting (e.g., random seeds).
Args:
measurements: Dictionary of dataframes to obtain best results for.
dataset_metric: Each dataset's metric to tune for, in the format
`{dataset: metric}`.
"""
results = []
for k, v in measurements.items():
additional_marginalization_hparams = None
if k in ('Det', 'Det I21K', 'DE'):
additional_marginalization_hparams = 'config.model_init'
for ds in v['config.dataset'].unique():
df = v[v['config.dataset'] == ds]
results.append(
_get_optimal_results(
df,
dataset_metric=dataset_metric,
additional_marginalization_hparams=additional_marginalization_hparams
))
print()
return pd.concat(results)
# + id="mMdPlsI12gec"
#@title Obtain reliability score
SPLIT_METRICS = ['loss', 'prec@1', 'ece', 'calib_auc']
IND_METRICS = [f'test_{m}' for m in SPLIT_METRICS]
FEWSHOT_DATASETS = ['imagenet', 'pets', 'birds', 'col_hist', 'cifar100', 'caltech', 'cars', 'dtd', 'uc_merced']
FEWSHOT_METRICS = [
f'z/{ds}_{f}shot' for (ds, f) in itertools.product(
FEWSHOT_DATASETS,
[1, 5, 10, 25])
]
OOD_METRICS = [
f'{ds}_{m}' for (ds, m) in itertools.product(
['cifar_10h', 'imagenet_real'],
['loss', 'prec@1', 'ece', 'calib_auc'])
]
OOD_DETECTION_METRICS = [
f'ood_{ds}_{method}_auroc'
for (ds, method) in itertools.product(
['cifar10', 'cifar100', 'svhn_cropped', 'places365_small'],
# We use just MSP following Jie's recommendation.
['msp'])
# ['entropy', 'maha', 'msp', 'rmaha'])
]
COMPUTE_METRICS = ['exaflops', 'tpu_days', 'gflops', 'ms_step']
METRICS = IND_METRICS + FEWSHOT_METRICS + OOD_METRICS + OOD_DETECTION_METRICS + COMPUTE_METRICS
CATEGORIES = {
'prediction': [
'test_loss',
'test_prec@1',
'cifar_10h_loss',
'cifar_10h_prec@1',
'imagenet_real_loss',
'imagenet_real_prec@1',
],
'uncertainty': [
'cifar_10h_calib_auc',
'cifar_10h_ece',
'imagenet_real_calib_auc',
'imagenet_real_ece',
'ood_cifar100_msp_auroc',
'ood_cifar10_msp_auroc',
'ood_places365_small_msp_auroc',
'ood_svhn_cropped_msp_auroc',
'test_calib_auc',
'test_ece',
],
'adaptation': [
'10shot_prec@1',
'25shot_prec@1',
'5shot_prec@1',
],
}
def preprocess(df,
split_metrics=SPLIT_METRICS,
metrics=METRICS,
compute_metrics=COMPUTE_METRICS,
fewshot_datasets=FEWSHOT_DATASETS):
df = df.copy()
df = df.groupby(['model', 'config.dataset']).agg('mean').reset_index()
# Set JFT/I21K upstream #s to the test set reporting since we use them that
# way.
for m in split_metrics:
df.loc[df['config.dataset'] == 'jft/entity:1.0.0', f'test_{m}'] = df.loc[
df['config.dataset'] == 'jft/entity:1.0.0', f'val_{m}']
df.loc[df['config.dataset'] == 'imagenet21k', f'test_{m}'] = df.loc[
df['config.dataset'] == 'imagenet21k', f'val_{m}']
cols = ['model', 'config.dataset'] + metrics
df = df[cols].copy()
df = df.pivot(index='model', columns='config.dataset', values=metrics)
# Drop columns with all NaNs, e.g., ECE for JFT. They aren't measured.
df = df.dropna(axis=1, how='all')
# Set few-shot imagenet metrics under a distinct dataset so later, we can
# aggregate over few-shot metrics while excluding their original
# config.dataset (the upstream dataset).
for ds in fewshot_datasets:
for f in [1, 5, 10, 25]:
df[f'{f}shot_prec@1', f'few-shot {ds}'] = df[f'z/{ds}_{f}shot'].mean(axis=1)
del df[f'z/{ds}_{f}shot']
# Do same for compute and only keep upstream compute metrics.
for metric in compute_metrics:
for ds in df[metric]:
if ds == 'imagenet21k':
df[metric, 'compute'] = df[metric, ds]
elif ds == 'jft/entity:1.0.0':
df[metric, 'compute'] = np.where(df[metric, 'compute'].isnull(), df[metric, ds], df[metric, 'compute'])
del df[metric, ds]
return df
def compute_score(df, datasets=None, categories=CATEGORIES):
"""Compute aggregate score across metrics and per-category scores."""
df = df.copy()
# Scale all metrics in range [0.0, 1.0], and where higher is better.
for column in df.columns:
metric, dataset = column
if 'ece' in metric:
df[column] = 1. - df[column]
if 'dataset' == 'compute':
del df[column]
# Remove 1-shot for now as its #s are unreliable due to high variance.
del df['1shot_prec@1']
dataset_classes = {
'cifar10': 10,
'cifar100': 100,
'imagenet2012': 1000,
'imagenet21k': 21841,
'jft/entity:1.0.0': 18291,
'retina': 2,
}
for metric, dataset in df[['test_loss', 'cifar_10h_loss', 'imagenet_real_loss']]:
# Rescale NLL under its bound [0.0, uniform entropy]. Technically I21K &
# JFT's uniform entropy should be computed on multiclass sigmoid NLL, but
# unlike categorical uniform, multiclass sigmoid uniform is so large it's
# meaningless as a bound.
num_classes = dataset_classes[dataset]
p = 1./num_classes
max_value = -num_classes * p * np.log(p)
df.loc[:, (metric, dataset)] = 1. - df[metric][dataset] / max_value
# Flatten multiindexes.
df.columns = ['_'.join(col).strip() for col in df.columns.values]
if datasets is not None:
metrics = [m for m in df.columns if any(d == m.split('_')[-1] for d in datasets)]
df = df[metrics]
# Compute the score only for models that have filled in all metrics.
subset_df = df.dropna(how='any')
score = subset_df.mean(axis=1) * 100.0
df_scores = score.sort_values(ascending=False).to_frame(name='score')
for key, value in categories.items():
metrics = [m for m in df.columns if '_'.join(m.split('_')[:-1]) in value]
subset_df = df[metrics]
subset_df = subset_df.dropna(how='any')
score = subset_df.mean(axis=1) * 100.0
df_scores[f'score_{key}'] = score
return df_scores
def pprint(df, models=None, exclude_models=None):
"""Pretty print dataframe.
Args:
df: Dataframe.
models: Optional list of models to only show. Useful for comparing specific
models to see which performs better (highlighted cells).
exclude_models: Optional list of models to exclude.
"""
def _rename(m):
m = m.replace('cifar_10h', 'cifar10h')
m = m.replace('places365_small', 'places365')
m = m.replace('_', ' ')
m = m.replace('cropped ', '')
m = m.replace('ood', '')
m = m.replace('ece', 'ECE')
m = m.replace('auc', 'AUC')
m = m.replace('auroc', 'AUROC')
m = m.replace('loss', 'NLL')
return m
def _formatter(metric):
if any(x in metric for x in ['AUROC', 'AUC']):
return '{:.2f}'.format
elif any(x in metric for x in ['prec', 'ECE']):
return lambda x: '{:.1f}%'.format(x * 100)
elif any(x in metric for x in ['score', 'exaflops', 'tpu days', 'gflops', 'ms step']):
return lambda x: '{:.1f}'.format(x)
elif 'NLL' in metric:
return '{:.3f}'.format
else:
return lambda x: x
def _highlight(data, color='#90EE90'):
attr = 'background-color: {}'.format(color)
data = data.replace('%','', regex=True).astype(float)
if any(x in data.name[1] for x in ['NLL', 'ECE']):
is_best = data == data.min()
elif any(x in data.name[1] for x in ['exaflops', 'tpu days', 'gflops', 'ms step']):
is_best = data == 'asdf'
else:
is_best = data == data.max()
return [attr if v else '' for v in is_best]
df = df.copy()
df = df.rename(columns=_rename)
for c in df:
df[c] = df[c].apply(_formatter(c[0]))
# Swap order of column's multiindex to be dataset first.
df.columns = df.columns.swaplevel(0, 1)
df = df.sort_index(axis=1, level=0)
df = df.T
if models is not None:
df = df[[c for c in df.columns if c in models]]
elif exclude_models is not None:
df = df[[c for c in df.columns if c not in exclude_models]]
return display.display(df.style.apply(_highlight, axis=1))
# + [markdown] id="GygKbZFjwiLV"
# ## Load and preprocess measurements
# + id="lFG3GNLoQN3a"
from google.colab import auth
auth.authenticate_user()
project_id = 'marginalization-external-xgcp'
# !gcloud config set project {project_id}
# !gsutil cp gs://ub-checkpoints/big-paper-raw-measurements.pkl /tmp/big-paper-raw-measurements.pkl
path = '/tmp/big-paper-raw-measurements.pkl'
# + id="JcnAzeyWsHu8"
with tf.io.gfile.GFile(path, 'rb') as f:
raw_measurements = pickle.load(f)
dataset_metric = {
'cifar10': 'val_loss',
'cifar100': 'val_loss',
'imagenet2012': 'val_loss',
'imagenet21k': 'val_loss',
'jft/entity:1.0.0': 'val_loss',
'retina': 'in_domain_validation/auroc',
}
measurements = get_optimal_results(raw_measurements,
dataset_metric=dataset_metric)
# + id="K6Bc5TFN3ZTk"
df = preprocess(measurements)
# + [markdown] id="VuV4-5wzEQiE"
# ## Compute reliability score and generate table
# + id="6kRH3CQGwaQg"
datasets = [
'cifar10',
'cifar100',
'imagenet2012',
]
datasets += [f'few-shot {d}' for d in FEWSHOT_DATASETS]
scores = compute_score(df, datasets=datasets)
display.display(scores)
# + id="wQS1I1pRD9rm"
df_with_scores = df.copy()
for column in scores.columns:
df_with_scores[column] = scores[column]
pprint(
df_with_scores,
models=['BE L/32', 'Det'],
# exclude_models=['DE', 'Det->DE'],
)
# + id="2RySd-77qMBe"
# Show a subset of the table's metrics + models
metrics = ['score', 'score_prediction', 'score_uncertainty', 'score_adaptation', 'exaflops', 'test_loss', 'tpu_days']
models = ['BE L/32', 'Det', 'GP', 'Het', 'BE L/32 (I21K)', 'Det I21K']
pprint(df_with_scores.loc[models][metrics].rename(columns={'compute': 'z/compute'}))
# + [markdown] id="VBtgUevvEYrh"
# ## Plot reliability score
# + id="GsnOp4CVambb"
import colabtools.fileedit
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.reset_orig()
matplotlib.rcParams['figure.dpi'] = 1000
matplotlib.rcParams['lines.linewidth'] = 1.25
# sns.set_style("whitegrid")
sns.set()
# + id="C5qvJhS_YWwE"
def pareto_plot(df, x, y, ax, filename=None, **kwargs):
def is_on_pareto_front(p, points, higher_is_better):
if higher_is_better:
return len([
point for point in points if point[0] <= p[0] and point[1] > p[1]
]) == 0
else:
return len([
point for point in points if point[0] <= p[0] and point[1] < p[1]
]) == 0
def get_pareto_points(x, y, higher_is_better=True):
points = list(zip(x, y))
frontier = [
p for p in points if is_on_pareto_front(p, points, higher_is_better)
]
return sorted(frontier, key=lambda x: x[0])
for model, point in df.iterrows():
ann = ax.annotate(
' ' + model,
xy=(point[x], point[y]),
ha='left',
va='bottom',
)
sns.scatterplot(x=df[x], y=df[y], ax=ax)
pareto_frontier = get_pareto_points(df[x], df[y])
xx, yy = zip(*pareto_frontier)
sns.lineplot(x=xx, y=yy, linestyle='--', ax=ax)
ax.set(xscale='log', **kwargs)
if filename is not None:
plt.tight_layout()
plt.savefig(filename)
colabtools.fileedit.download_file(filename)
fig, ax = plt.subplots(figsize=(10.0, 5.0))
pareto_plot(
df_with_scores[[x.startswith('BE') for x in df_with_scores.index.values]],
ax=ax,
y='score',
x=('tpu_days', 'compute'),
xlabel='Compute (TPUv3 core days)',
ylabel='Reliability Score',
filename='reliability.png',
)
fig, axes = plt.subplots(1, 3, figsize=(3.5 * 3, 3.5))
pareto_plot(
df_with_scores[[x.startswith('BE') for x in df_with_scores.index.values]],
ax=axes[0],
y='score_prediction',
x=('tpu_days', 'compute'),
xlabel=None,
ylabel=None,
title='Reliability Score (Prediction)',
)
pareto_plot(
df_with_scores[[x.startswith('BE') for x in df_with_scores.index.values]],
ax=axes[1],
y='score_uncertainty',
x=('tpu_days', 'compute'),
xlabel=None,
ylabel=None,
title='Reliability Score (Uncertainty)',
)
pareto_plot(
df_with_scores[[x.startswith('BE') for x in df_with_scores.index.values]],
ax=axes[2],
y='score_adaptation',
x=('tpu_days', 'compute'),
xlabel=None,
ylabel=None,
title='Reliability Score (Adaptation)',
)
filename = 'reliability_components.png'
plt.tight_layout()
plt.savefig(filename)
colabtools.fileedit.download_file(filename)
# + [markdown] id="Oj2HLvvlEg46"
# ## Analyze correlation of metrics
# + id="TIaSlPrx8JLB"
temp_df = preprocess(
measurements,
metrics=METRICS + ['training_loss', 'training_prec@1'])
datasets = [
'cifar10',
'cifar100',
'imagenet2012',
]
datasets += [f'few-shot {d}' for d in FEWSHOT_DATASETS]
temp_scores = compute_score(temp_df, datasets=datasets)
for column in temp_scores.columns:
temp_df[column] = temp_scores[column]
# scores correlation matrix
columns = ['score', 'score_prediction', 'score_uncertainty', 'score_adaptation']
corr_matrix = temp_df[columns]
corr_matrix.columns = [''.join(col) for col in corr_matrix.columns.values]
corr_matrix = corr_matrix.corr()
display.display(corr_matrix)
# upstream test metrics
metrics = ['score', 'score_prediction', 'score_uncertainty', 'score_adaptation']
corr_matrix = temp_df.corr()[['test_loss', 'test_prec@1']].T.xs(
'jft/entity:1.0.0', level='config.dataset')
corr_matrix = corr_matrix[metrics]
corr_matrix.columns = [''.join(col) for col in corr_matrix.columns.values]
display.display(corr_matrix)
# imagenet 10-shot. It doesn't correlate well with reliability, mostly due to
# it not correlating well surprisingly on other few-shot tasks.
corr_matrix = temp_df.corr()[['10shot_prec@1']].T.xs(
'few-shot imagenet', level='config.dataset')
corr_matrix = corr_matrix[metrics]
corr_matrix.columns = [''.join(col) for col in corr_matrix.columns.values]
display.display(corr_matrix)
# downstream training loss. The correlation is not nearly as tight as on
# upstream.
corr_matrix = temp_df.corr()[['training_loss']].T
corr_matrix = corr_matrix[metrics + ['test_loss']]
corr_matrix = corr_matrix.drop(index=('training_loss', 'retina'))
corr_matrix = corr_matrix.drop(index=('training_loss', 'imagenet21k'))
corr_matrix = corr_matrix.drop(columns=('test_loss', 'imagenet21k'))
# Display test loss only for training loss' same downstream dataset. Looking at
# cifar10's train loss correlation with I1K's test loss isn't meaningful.
test_loss = pd.Series(np.diag(corr_matrix['test_loss']),
index=corr_matrix['test_loss'].index)
corr_matrix = corr_matrix.drop(columns='test_loss')
corr_matrix['test_loss'] = test_loss
corr_matrix.columns = [''.join(col) for col in corr_matrix.columns.values]
display.display(corr_matrix)
# Similar to old plot in go/rdl-big-meeting, even generalization gap decreases.
# And downstream is not very indicative, but upstream is.
temp_df2 = temp_df.copy()
for d in temp_df2['test_loss'].columns:
temp_df2['reg_loss', d] = temp_df2['test_loss', d] - temp_df2['training_loss', d]
corr_matrix = temp_df2.corr()[['reg_loss']].T
corr_matrix = corr_matrix[metrics + ['training_loss']]
corr_matrix = corr_matrix.drop(index=('reg_loss', 'imagenet21k'))
display.display(corr_matrix)
# + id="-_ShrtSoVQwh"
corr_matrix = temp_df.corr()[['test_loss', 'test_prec@1', 'training_loss']].T.xs('jft/entity:1.0.0', level='config.dataset')
# Rename certain task metrics to be under their generic metric name. This way,
# we can average values across that metric.
corr_matrix.columns = corr_matrix.columns.values
corr_matrix.columns = pd.MultiIndex.from_tuples(corr_matrix.rename(columns={
('imagenet_real_calib_auc', 'imagenet2012'): ('test_calib_auc', 'imagenet_real'),
('imagenet_real_ece', 'imagenet2012'): ('test_ece', 'imagenet_real'),
('imagenet_real_loss', 'imagenet2012'): ('test_loss', 'imagenet_real'),
('imagenet_real_prec@1', 'imagenet2012'): ('test_prec@1', 'imagenet_real'),
('cifar_10h_calib_auc', 'cifar10'): ('test_calib_auc', 'cifar_10h'),
('cifar_10h_ece', 'cifar10'): ('test_ece', 'cifar_10h'),
('cifar_10h_loss', 'cifar10'): ('test_loss', 'cifar_10h'),
('cifar_10h_prec@1', 'cifar10'): ('test_prec@1', 'cifar_10h'),
('ood_cifar100_msp_auroc', 'cifar10'): ('msp_auroc', 'cifar10->cifar100'),
('ood_cifar10_msp_auroc', 'cifar100'): ('msp_auroc', 'cifar100->cifar10'),
('ood_places365_small_msp_auroc', 'imagenet2012'): ('msp_auroc', 'imagenet2012->places365'),
('ood_svhn_cropped_msp_auroc', 'cifar10'): ('msp_auroc', 'cifar10->svhn'),
('ood_svhn_cropped_msp_auroc', 'cifar100'): ('msp_auroc', 'cifar100->svhn'),
}))
corr_matrix = corr_matrix.sort_index(axis=1)
corr_matrix = corr_matrix.mean(level=0, axis='columns')
corr_matrix = abs(corr_matrix)
corr_matrix = corr_matrix.reindex(
corr_matrix.mean().sort_values().index, axis=1)
for metric in corr_matrix.columns:
if metric in COMPUTE_METRICS or metric.startswith('score'):
del corr_matrix[metric]
corr_matrix = corr_matrix.T.reset_index()
fig, ax = plt.subplots(figsize=(20.0, 5.0))
sns.barplot(x='index', y='test_loss', data=corr_matrix)
ax.set(xlabel=None)
ax.set(ylabel=r'$\rho(\cdot,$ test_loss)')
filename = 'correlation.png'
plt.tight_layout()
plt.savefig(filename)
colabtools.fileedit.download_file(filename)
# + [markdown] id="7MtTqEW-yu7B"
# # Plotting helpers
# + id="5wq6YIte9iIm"
#@title Bar plots
def plot_in_distribution(df, train_dataset, split):
df = df[df['config.dataset'] == train_dataset].copy()
metrics = [f'{split}_{m}' for m in ['loss', 'prec@1', 'ece', 'calib_auc']]
df = df[['model'] + metrics].melt(
id_vars='model', var_name='metric', value_name='value')
sns.catplot(
col='metric', data=df, y='value', kind='bar', sharey=False, x='model')
def plot_ood(df, train_dataset):
df = df[df['config.dataset'] == train_dataset].copy()
if train_dataset == 'imagenet2012':
datasets = {'places365_small'}
metrics = ['msp', 'entropy', 'mlogit']
else:
datasets = set(['svhn_cropped', 'cifar100', 'cifar10']) - {train_dataset}
metrics = ['msp', 'entropy', 'mlogit', 'maha', 'rmaha']
cols = [
f'ood_{ds}_{m}_auroc' for (ds, m) in itertools.product(datasets, metrics)
]
cols = list(set(cols).intersection(df.columns))
df = df[['model'] + cols]
df = df.melt(id_vars='model', var_name='metric', value_name='AUROC')
df['dataset'] = df['metric'].apply(lambda x: x.split('_')[1])
df['metric'] = df['metric'].apply(lambda x: x.split('_')[-2])
sns.catplot(
data=df, x='metric', y='AUROC', hue='model', kind='bar', col='dataset')
plt.ylim((0.5, 1))
def plot_corrupted(df, train_dataset):
df = df[df['config.dataset'] == train_dataset].copy()
ds = 'imagenet_real' if train_dataset == 'imagenet2012' else 'cifar_10h'
metrics = [f'{ds}_{m}' for m in ['loss', 'prec@1', 'ece', 'calib_auc']]
df = df[['model'] + metrics].melt(
id_vars='model', var_name='metric', value_name='value')
sns.catplot(
col='metric', data=df, y='value', kind='bar', sharey=False, x='model')
# + id="pB9ohPCduqdy"
#@title Pareto plots
def is_on_pareto_front(p, points, higher_is_better):
if higher_is_better:
return len([
point for point in points if point[0] <= p[0] and point[1] > p[1]
]) == 0
else:
return len([
point for point in points if point[0] <= p[0] and point[1] < p[1]
]) == 0
def get_pareto_points(x, y, higher_is_better):
points = list(zip(x, y))
frontier = [
p for p in points if is_on_pareto_front(p, points, higher_is_better)
]
return sorted(frontier, key=lambda x: x[0])
def plot_fn(data, x, y, **kws):
ax = plt.gca()
sns.scatterplot(data=data, x=x, y=y, hue='model')
for _, point in data.iterrows():
ann = ax.annotate(
' ' + point['model'],
xy=(point[x], point[y]),
ha='left',
va='bottom',
)
metric = data['metric'].iloc[0]
higher_is_better = 'prec' in metric or 'auc' in metric
pareto_frontier = get_pareto_points(
data[x], data[y], higher_is_better=higher_is_better)
xx, yy = zip(*pareto_frontier)
sns.lineplot(x=xx, y=yy, linestyle='--')
def pareto_plot(df, metrics, train_dataset=None,
xmetric='num_params', xlabel='Log # Params'):
df = df[df['config.dataset'] == train_dataset].copy()
df = df.groupby(['model', 'config.dataset', xmetric]
)[metrics].apply(np.mean).reset_index()
df = df.melt(
id_vars=['model', 'config.dataset', xmetric],
var_name='metric',
value_name='value')
g = sns.FacetGrid(data=df, col='metric', sharey=False, size=5)
g.map_dataframe(plot_fn, x=xmetric, y='value')
g.set_xlabels(xlabel)
g.set(xscale='log')
# + [markdown] id="cBld21j5yx4I"
# # Results
# + id="186VNwRThIhE"
#@title Upstream JFT
df = measurements.copy()
df = df[df['config.dataset'] == 'jft/entity:1.0.0']
df = df[['model', 'val_loss', 'val_prec@1', 'a/imagenet_10shot']].melt(
id_vars='model', var_name='metric', value_name='value')
sns.catplot(
col='metric', data=df, x='model', y='value', kind='bar', sharey=False)
g = pareto_plot(
measurements,
train_dataset='jft/entity:1.0.0',
metrics=['val_loss', 'val_prec@1', 'a/imagenet_10shot'],
)
g = pareto_plot(
measurements,
train_dataset='jft/entity:1.0.0',
metrics=['val_loss', 'val_prec@1', 'a/imagenet_10shot'],
xmetric='tpu_days',
xlabel='Compute (TPUv3 core days)',
)
# + [markdown] id="xl7rOkhsuFm0"
# ## Cifar 10
# + id="mzz9lwisoKL3"
#@title In-distribution
plot_in_distribution(measurements, train_dataset='cifar10', split='test')
g = pareto_plot(
measurements,
train_dataset='cifar10',
metrics=['test_loss', 'test_prec@1', 'test_ece', 'test_calib_auc'])
# + id="Oote6WoS_QOd"
#@title Cifar10h
plot_corrupted(measurements, train_dataset='cifar10')
g = pareto_plot(
measurements,
train_dataset='cifar10',
metrics=['cifar_10h_loss', 'cifar_10h_prec@1', 'cifar_10h_ece', 'cifar_10h_calib_auc'])
# + id="A0Afa3nr-8ri"
#@title OOD
plot_ood(measurements, train_dataset='cifar10')
# + [markdown] id="eZ2bY0aPlQ5e"
# ## Cifar100
# + id="-Q_wtXR-9CU0"
#@title In-distribution
plot_in_distribution(measurements, train_dataset='cifar100', split='test')
g = pareto_plot(
measurements,
train_dataset='cifar100',
metrics=['test_loss', 'test_prec@1', 'test_ece', 'test_calib_auc'])
# + cellView="form" id="j8jKADGx_ZyV"
#@title OOD
plot_ood(measurements, train_dataset='cifar100')
# + [markdown] id="Kd4Ub0YclSmZ"
# ## Imagenet
# + id="Gokyk90V_nYw"
#@title In-distribution
plot_in_distribution(measurements, train_dataset='imagenet2012', split='test')
g = pareto_plot(
measurements,
train_dataset='imagenet2012',
metrics=['test_loss', 'test_prec@1', 'test_ece', 'test_calib_auc'])
# + id="tcx57p3a_r7d"
#@title Imagenet Real
plot_corrupted(measurements, train_dataset='imagenet2012')
g = pareto_plot(
measurements,
train_dataset='imagenet2012',
metrics=[
'imagenet_real_loss', 'imagenet_real_prec@1', 'imagenet_real_ece',
'imagenet_real_calib_auc'
])
| experimental/big_paper/plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../images/banners/python-advanced.png" width="600"/>
# # <img src="../../images/logos/python.png" width="23"/> Effective Python Testing With Pytest
#
# ## <img src="../../images/logos/toc.png" width="20"/> Table of Contents
# * [How to Install `pytest`](#how_to_install_`pytest`)
# * [What Makes `pytest` So Useful?](#what_makes_`pytest`_so_useful?)
# * [Less Boilerplate](#less_boilerplate)
# * [State and Dependency Management](#state_and_dependency_management)
# * [Test Filtering](#test_filtering)
# * [Test Parametrization](#test_parametrization)
# * [Plugin-Based Architecture](#plugin-based_architecture)
# * [Fixtures: Managing State and Dependencies](#fixtures:_managing_state_and_dependencies)
# * [When to Create Fixtures](#when_to_create_fixtures)
# * [When to Avoid Fixtures](#when_to_avoid_fixtures)
# * [Fixtures at Scale](#fixtures_at_scale)
# * [Marks: Categorizing Tests](#marks:_categorizing_tests)
# * [Parametrization: Combining Tests](#parametrization:_combining_tests)
# * [Durations Reports: Fighting Slow Tests](#durations_reports:_fighting_slow_tests)
# * [Useful `pytest` Plugins](#useful_`pytest`_plugins)
# * [`pytest-randomly`](#`pytest-randomly`)
# * [`pytest-cov`](#`pytest-cov`)
# * [`pytest-django`](#`pytest-django`)
# * [`pytest-bdd`](#`pytest-bdd`)
# * [Conclusion](#conclusion)
#
# ---
# [Testing your code](https://realpython.com/python-testing/) brings a wide variety of benefits. It increases your confidence that the code behaves as you expect and ensures that changes to your code won’t cause regressions. Writing and maintaining tests is hard work, so you should leverage all the tools at your disposal to make it as painless as possible. [`pytest`](https://docs.pytest.org/) is one of the best tools you can use to boost your testing productivity.
# **In this tutorial, you’ll learn:**
# - What **benefits** `pytest` offers
# - How to ensure your tests are **stateless**
# - How to make repetitious tests more **comprehensible**
# - How to run **subsets** of tests by name or custom groups
# - How to create and maintain **reusable** testing utilities
#
# <a class="anchor" id="how_to_install_`pytest`"></a>
#
# ## How to Install `pytest`
# To follow along with some of the examples in this tutorial, you’ll need to install `pytest`. As with most [Python packages](https://realpython.com/python-modules-packages/), you can install `pytest` in a [virtual environment](https://realpython.com/python-virtual-environments-a-primer/) from [PyPI](https://realpython.com/pypi-publish-python-package/) using [`pip`](https://realpython.com/what-is-pip/):
# ```sh
# $ python -m pip install pytest
# ```
# The `pytest` command will now be available in your installation environment.
# <a class="anchor" id="what_makes_`pytest`_so_useful?"></a>
#
# ## What Makes `pytest` So Useful?
# If you’ve written unit tests for your Python code before, then you may have used Python’s built-in **`unittest`** module. `unittest` provides a solid base on which to build your test suite, but it has a few shortcomings.
# A number of third-party testing frameworks attempt to address some of the issues with `unittest`, and [`pytest` has proven to be one of the most popular](https://realpython.com/courses/test-driven-development-pytest/). `pytest` is a feature-rich, plugin-based ecosystem for testing your Python code.
# If you haven’t had the pleasure of using `pytest` yet, then you’re in for a treat! Its philosophy and features will make your testing experience more productive and enjoyable. With `pytest`, common tasks require less code and advanced tasks can be achieved through a variety of time-saving commands and plugins. It will even run your existing tests out of the box, including those written with `unittest`.
# As with most frameworks, some development patterns that make sense when you first start using `pytest` can start causing pains as your test suite grows. This tutorial will help you understand some of the tools `pytest` provides to keep your testing efficient and effective even as it scales.
# <a class="anchor" id="less_boilerplate"></a>
#
# ### Less Boilerplate
# Most functional tests follow the Arrange-Act-Assert model:
# 1. **Arrange**, or set up, the conditions for the test
# 2. **Act** by calling some function or method
# 3. **Assert** that some end condition is true
#
# Testing frameworks typically hook into your test’s [assertions](https://realpython.com/lessons/assertions-and-tryexcept/) so that they can provide information when an assertion fails. `unittest`, for example, provides a number of helpful assertion utilities out of the box. However, even a small set of tests requires a fair amount of [boilerplate code](https://en.wikipedia.org/wiki/Boilerplate_code).
# Imagine you’d like to write a test suite just to make sure `unittest` is working properly in your project. You might want to write one test that always passes and one that always fails:
# +
# test_with_unittest.py
from unittest import TestCase
class TryTesting(TestCase):
def test_always_passes(self):
self.assertTrue(True)
def test_always_fails(self):
self.assertTrue(False)
# -
# You can then run those tests from the command line using the `discover` option of `unittest`:
# ```sh
# $ python -m unittest discover
# F.
# ============================================================
# FAIL: test_always_fails (test_with_unittest.TryTesting)
# ------------------------------------------------------------
# Traceback (most recent call last):
# File "/.../test_with_unittest.py", line 9, in test_always_fails
# self.assertTrue(False)
# AssertionError: False is not True
#
# ------------------------------------------------------------
# Ran 2 tests in 0.001s
#
# FAILED (failures=1)
# ```
# As expected, one test passed and one failed. You’ve proven that `unittest` is working, but look at what you had to do:
# 1. Import the `TestCase` class from `unittest`
# 2. Create `TryTesting`, a [subclass](https://realpython.com/python3-object-oriented-programming/) of `TestCase`
# 3. Write a method in `TryTesting` for each test
# 4. Use one of the `self.assert*` methods from `unittest.TestCase` to make assertions
#
# That’s a significant amount of code to write, and because it’s the minimum you need for *any* test, you’d end up writing the same code over and over. `pytest` simplifies this workflow by allowing you to use Python’s `assert` keyword directly:
# +
# test_with_pytest.py
def test_always_passes():
assert True
def test_always_fails():
assert False
# -
# That’s it. You don’t have to deal with any imports or classes. Because you can use the `assert` keyword, you don’t need to learn or remember all the different `self.assert*` methods in `unittest`, either. If you can write an expression that you expect to evaluate to `True`, then `pytest` will test it for you. You can run it using the `pytest` command:
# ```sh
# $ pytest
# ================== test session starts =============================
# platform darwin -- Python 3.7.3, pytest-5.3.0, py-1.8.0, pluggy-0.13.0
# rootdir: /.../effective-python-testing-with-pytest
# collected 2 items
#
# test_with_pytest.py .F [100%]
#
# ======================== FAILURES ==================================
# ___________________ test_always_fails ______________________________
#
# def test_always_fails():
# > assert False
# E assert False
#
# test_with_pytest.py:5: AssertionError
# ============== 1 failed, 1 passed in 0.07s =========================
# ```
# `pytest` presents the test results differently than `unittest`. The report shows:
# 1. The system state, including which versions of Python, `pytest`, and any plugins you have installed
# 2. The `rootdir`, or the directory to search under for configuration and tests
# 3. The number of tests the runner discovered
#
# The output then indicates the status of each test using a syntax similar to `unittest`:
# - **A dot (`.`)** means that the test passed.
# - **An `F`** means that the test has failed.
# - **An `E`** means that the test raised an unexpected exception.
#
# For tests that fail, the report gives a detailed breakdown of the failure. In the example above, the test failed because `assert False` always fails. Finally, the report gives an overall status report of the test suite.
# Here are a few more quick assertion examples:
# +
def test_uppercase():
assert "loud noises".upper() == "LOUD NOISES"
def test_reversed():
assert list(reversed([1, 2, 3, 4])) == [4, 3, 2, 1]
def test_some_primes():
assert 37 in {
num
for num in range(1, 50)
if num != 1 and not any([num % div == 0 for div in range(2, num)])
}
# -
# The learning curve for `pytest` is shallower than it is for `unittest` because you don’t need to learn new constructs for most tests. Also, the use of `assert`, which you may have used before in your implementation code, makes your tests more understandable.
# <a class="anchor" id="state_and_dependency_management"></a>
#
# ### State and Dependency Management
# Your tests will often depend on pieces of data or [test doubles](https://en.wikipedia.org/wiki/Test_double) for some of the objects in your code. In `unittest`, you might extract these dependencies into `setUp()` and `tearDown()` methods so each test in the class can make use of them. But in doing so, you may inadvertently make the test’s dependence on a particular piece of data or object entirely **implicit**.
# Over time, implicit dependencies can lead to a complex tangle of code that you have to unwind to make sense of your tests. Tests should help you make your code more understandable. If the tests themselves are difficult to understand, then you may be in trouble!
# `pytest` takes a different approach. It leads you toward **explicit** dependency declarations that are still reusable thanks to the availability of [fixtures](https://docs.pytest.org/en/latest/fixture.html). `pytest` fixtures are functions that create data or test doubles or initialize some system state for the test suite. Any test that wants to use a fixture must explicitly accept it as an argument, so dependencies are always stated up front.
# Fixtures can also make use of other fixtures, again by declaring them explicitly as dependencies. That means that, over time, your fixtures can become bulky and modular. Although the ability to insert fixtures into other fixtures provides enormous flexibility, it can also make managing dependencies more challenging as your test suite grows. Later in this tutorial, you’ll learn [more about fixtures](#fixtures-managing-state-and-dependencies) and try a few techniques for handling these challenges.
# <a class="anchor" id="test_filtering"></a>
#
# ### Test Filtering
# As your test suite grows, you may find that you want to run just a few tests on a feature and save the full suite for later. `pytest` provides a few ways of doing this:
# - **Name-based filtering**: You can limit `pytest` to running only those tests whose fully qualified names match a particular expression. You can do this with the `-k` parameter.
# - **Directory scoping**: By default, `pytest` will run only those tests that are in or under the current directory.
# - **Test categorization**: `pytest` can include or exclude tests from particular categories that you define. You can do this with the `-m` parameter.
#
# Test categorization in particular is a subtly powerful tool. `pytest` enables you to create **marks**, or custom labels, for any test you like. A test may have multiple labels, and you can use them for granular control over which tests to run. Later in this tutorial, you’ll see an example of [how `pytest` marks work](#marks-categorizing-tests) and learn how to make use of them in a large test suite.
# <a class="anchor" id="test_parametrization"></a>
#
# ### Test Parametrization
# When you’re testing functions that process data or perform generic transformations, you’ll find yourself writing many similar tests. They may differ only in the [input or output](https://realpython.com/python-input-output/) of the code being tested. This requires duplicating test code, and doing so can sometimes obscure the behavior you’re trying to test.
# `unittest` offers a way of collecting several tests into one, but they don’t show up as individual tests in result reports. If one test fails and the rest pass, then the entire group will still return a single failing result. `pytest` offers its own solution in which each test can pass or fail independently. You’ll see [how to parametrize tests](#parametrization-combining-tests) with `pytest` later in this tutorial.
# <a class="anchor" id="plugin-based_architecture"></a>
#
# ### Plugin-Based Architecture
# One of the most beautiful features of `pytest` is its openness to customization and new features. Almost every piece of the program can be cracked open and changed. As a result, `pytest` users have developed a rich ecosystem of helpful plugins.
# Although some `pytest` plugins focus on specific frameworks like [Django](https://www.djangoproject.com/), others are applicable to most test suites. You’ll see [details on some specific plugins](#useful-pytest-plugins) later in this tutorial.
# <a class="anchor" id="fixtures:_managing_state_and_dependencies"></a>
#
# ## Fixtures: Managing State and Dependencies
# `pytest` fixtures are a way of providing data, test doubles, or state setup to your tests. Fixtures are functions that can return a wide range of values. Each test that depends on a fixture must explicitly accept that fixture as an argument.
# <a class="anchor" id="when_to_create_fixtures"></a>
#
# ### When to Create Fixtures
# Imagine you’re writing a function, `format_data_for_display()`, to process the data returned by an API endpoint. The data represents a list of people, each with a given name, family name, and job title. The function should output a list of strings that include each person’s full name (their `given_name` followed by their `family_name`), a colon, and their `title`. To test this, you might write the following code:
# +
def format_data_for_display(people):
... # Implement this!
def test_format_data_for_display():
people = [
{
"given_name": "Alfonsa",
"family_name": "Ruiz",
"title": "Senior Software Engineer",
},
{
"given_name": "Sayid",
"family_name": "Khan",
"title": "Project Manager",
},
]
assert format_data_for_display(people) == [
"<NAME>: Senior Software Engineer",
"<NAME>: Project Manager",
]
# -
# Now suppose you need to write another function to transform the data into comma-separated values for use in [Excel](https://realpython.com/openpyxl-excel-spreadsheets-python/). The test would look awfully similar:
# +
def format_data_for_excel(people):
... # Implement this!
def test_format_data_for_excel():
people = [
{
"given_name": "Alfonsa",
"family_name": "Ruiz",
"title": "Senior Software Engineer",
},
{
"given_name": "Sayid",
"family_name": "Khan",
"title": "Project Manager",
},
]
assert format_data_for_excel(people) == """given,family,title
Alfonsa,Ruiz,Senior Software Engineer
Sayid,Khan,Project Manager
"""
# -
# If you find yourself writing several tests that all make use of the same underlying test data, then a fixture may be in your future. You can pull the repeated data into a single function decorated with `@pytest.fixture` to indicate that the function is a `pytest` fixture:
# +
import pytest
@pytest.fixture
def example_people_data():
return [
{
"given_name": "Alfonsa",
"family_name": "Ruiz",
"title": "Senior Software Engineer",
},
{
"given_name": "Sayid",
"family_name": "Khan",
"title": "Project Manager",
},
]
# -
# You can use the fixture by adding it as an argument to your tests. Its value will be the return value of the fixture function:
# +
def test_format_data_for_display(example_people_data):
assert format_data_for_display(example_people_data) == [
"<NAME>: Senior Software Engineer",
"<NAME>: Project Manager",
]
def test_format_data_for_excel(example_people_data):
assert format_data_for_excel(example_people_data) == """given,family,title
Alfonsa,Ruiz,Senior Software Engineer
Sayid,Khan,Project Manager
"""
# -
# Each test is now notably shorter but still has a clear path back to the data it depends on. Be sure to name your fixture something specific. That way, you can quickly determine if you want to use it when writing new tests in the future!
# <a class="anchor" id="when_to_avoid_fixtures"></a>
#
# ### When to Avoid Fixtures
# Fixtures are great for extracting data or objects that you use across multiple tests. They aren’t always as good for tests that require slight variations in the data. Littering your test suite with fixtures is no better than littering it with plain data or objects. It might even be worse because of the added layer of indirection.
# As with most abstractions, it takes some practice and thought to find the right level of fixture use.
# <a class="anchor" id="fixtures_at_scale"></a>
#
# ### Fixtures at Scale
# As you extract more fixtures from your tests, you might see that some fixtures could benefit from further extraction. Fixtures are **modular**, so they can depend on other fixtures. You may find that fixtures in two separate test modules share a common dependency. What can you do in this case?
# You can move fixtures from test [modules](https://realpython.com/python-modules-packages/) into more general fixture-related modules. That way, you can import them back into any test modules that need them. This is a good approach when you find yourself using a fixture repeatedly throughout your project.
# `pytest` looks for `conftest.py` modules throughout the directory structure. Each `conftest.py` provides configuration for the file tree `pytest` finds it in. You can use any fixtures that are defined in a particular `conftest.py` throughout the file’s parent directory and in any subdirectories. This is a great place to put your most widely used fixtures.
# Another interesting use case for fixtures is in guarding access to resources. Imagine that you’ve written a test suite for code that deals with [API calls](https://realpython.com/api-integration-in-python/). You want to ensure that the test suite doesn’t make any real network calls, even if a test accidentally executes the real network call code. `pytest` provides a [`monkeypatch`](https://docs.pytest.org/en/latest/monkeypatch.html) fixture to replace values and behaviors, which you can use to great effect:
# +
# conftest.py
import pytest
import requests
@pytest.fixture(autouse=True)
def disable_network_calls(monkeypatch):
def stunted_get():
raise RuntimeError("Network access not allowed during testing!")
monkeypatch.setattr(requests, "get", lambda *args, **kwargs: stunted_get())
# -
# By placing `disable_network_calls()` in `conftest.py` and adding the `autouse=True` option, you ensure that network calls will be disabled in every test across the suite. Any test that executes code calling `requests.get()` will raise a `RuntimeError` indicating that an unexpected network call would have occurred.
# <a class="anchor" id="marks:_categorizing_tests"></a>
#
# ## Marks: Categorizing Tests
# In any large test suite, some of the tests will inevitably be slow. They might test timeout behavior, for example, or they might exercise a broad area of the code. Whatever the reason, it would be nice to avoid running *all* the slow tests when you’re trying to iterate quickly on a new feature.
# `pytest` enables you to define categories for your tests and provides options for including or excluding categories when you run your suite. You can mark a test with any number of categories.
# Marking tests is useful for categorizing tests by subsystem or dependencies. If some of your tests require access to a database, for example, then you could create a `@pytest.mark.database_access` mark for them.
# When the time comes to run your tests, you can still run them all by default with the `pytest` command. If you’d like to run only those tests that require database access, then you can use `pytest -m database_access`. To run all tests *except* those that require database access, you can use `pytest -m "not database_access"`. You can even use an `autouse` fixture to limit database access to those tests marked with `database_access`.
# Some plugins expand on the functionality of marks by guarding access to resources. The [`pytest-django`](https://pytest-django.readthedocs.io/en/latest/) plugin provides a `django_db` mark. Any tests without this mark that try to access the database will fail. The first test that tries to access the database will trigger the creation of Django’s test database.
# The requirement that you add the `django_db` mark nudges you toward stating your dependencies explicitly. That’s the `pytest` philosophy, after all! It also means that you can run tests that don’t rely on the database much more quickly, because `pytest -m "not django_db"` will prevent the test from triggering database creation. The time savings really add up, especially if you’re diligent about running your tests frequently.
# `pytest` provides a few marks out of the box:
# - **`skip`** skips a test unconditionally.
# - **`skipif`** skips a test if the expression passed to it evaluates to `True`.
# - **`xfail`** indicates that a test is expected to fail, so if the test *does* fail, the overall suite can still result in a passing status.
# - **`parametrize`** (note the spelling) creates multiple variants of a test with different values as arguments. You’ll learn more about this mark shortly.
#
# You can see a list of all the marks `pytest` knows about by running `pytest --markers`.
# <a class="anchor" id="parametrization:_combining_tests"></a>
#
# ## Parametrization: Combining Tests
# You saw earlier in this tutorial how `pytest` fixtures can be used to reduce code duplication by extracting common dependencies. Fixtures aren’t quite as useful when you have several tests with slightly different inputs and expected outputs. In these cases, you can [**parametrize**](http://doc.pytest.org/en/latest/example/parametrize.html) a single test definition, and `pytest` will create variants of the test for you with the parameters you specify.
# Imagine you’ve written a function to tell if a string is a [palindrome](https://en.wikipedia.org/wiki/Palindrome). An initial set of tests could look like this:
# +
def test_is_palindrome_empty_string():
assert is_palindrome("")
def test_is_palindrome_single_character():
assert is_palindrome("a")
def test_is_palindrome_mixed_casing():
assert is_palindrome("Bob")
def test_is_palindrome_with_spaces():
assert is_palindrome("Never odd or even")
def test_is_palindrome_with_punctuation():
assert is_palindrome("Do geese see God?")
def test_is_palindrome_not_palindrome():
assert not is_palindrome("abc")
def test_is_palindrome_not_quite():
assert not is_palindrome("abab")
# -
# All of these tests except the last two have the same shape:
def test_is_palindrome_<in some situation>():
assert is_palindrome("<some string>")
# You can use `@pytest.mark.parametrize()` to fill in this shape with different values, reducing your test code significantly:
# +
@pytest.mark.parametrize("palindrome", [
"",
"a",
"Bob",
"Never odd or even",
"Do geese see God?",
])
def test_is_palindrome(palindrome):
assert is_palindrome(palindrome)
@pytest.mark.parametrize("non_palindrome", [
"abc",
"abab",
])
def test_is_palindrome_not_palindrome(non_palindrome):
assert not is_palindrome(non_palindrome)
# -
# The first argument to `parametrize()` is a comma-delimited string of parameter names. The second argument is a [list](https://realpython.com/courses/lists-tuples-python/) of either [tuples](https://realpython.com/python-lists-tuples/) or single values that represent the parameter value(s). You could take your parametrization a step further to combine all your tests into one:
@pytest.mark.parametrize("maybe_palindrome, expected_result", [
("", True),
("a", True),
("Bob", True),
("Never odd or even", True),
("Do geese see God?", True),
("abc", False),
("abab", False),
])
def test_is_palindrome(maybe_palindrome, expected_result):
assert is_palindrome(maybe_palindrome) == expected_result
# Even though this shortened your code, it’s important to note that in this case, it didn’t do much to clarify your test code. Use parametrization to separate the test data from the test behavior so that it’s clear what the test is testing!
# <a class="anchor" id="durations_reports:_fighting_slow_tests"></a>
#
# ## Durations Reports: Fighting Slow Tests
# Each time you switch contexts from implementation code to test code, you incur some [overhead](https://en.wikipedia.org/wiki/Overhead_(computing)). If your tests are slow to begin with, then overhead can cause friction and frustration.
# You read earlier about using marks to filter out slow tests when you run your suite. If you want to improve the speed of your tests, then it’s useful to know which tests might offer the biggest improvements. `pytest` can automatically record test durations for you and report the top offenders.
# Use the `--durations` option to the `pytest` command to include a duration report in your test results. `--durations` expects an integer value `n` and will report the slowest `n` number of tests. The output will follow your test results:
# ```sh
# $ pytest --durations=3
# 3.03s call test_code.py::test_request_read_timeout
# 1.07s call test_code.py::test_request_connection_timeout
# 0.57s call test_code.py::test_database_read
# ======================== 7 passed in 10.06s ==============================
# ```
# Each test that shows up in the durations report is a good candidate to speed up because it takes an above-average amount of the total testing time.
# Be aware that some tests may have an invisible setup overhead. You read earlier about how the first test marked with `django_db` will trigger the creation of the Django test database. The `durations` report reflects the time it takes to set up the database in the test that triggered the database creation, which can be misleading.
# <a class="anchor" id="useful_`pytest`_plugins"></a>
#
# ## Useful `pytest` Plugins
# You learned about a few valuable `pytest` plugins earlier in this tutorial. You can explore those and a few others in more depth below.
# <a class="anchor" id="`pytest-randomly`"></a>
#
# ### `pytest-randomly`
# [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly) does something seemingly simple but with valuable effect: It forces your tests to run in a random order. `pytest` always collects all the tests it can find before running them, so `pytest-randomly` shuffles that list of tests just before execution.
# This is a great way to uncover tests that depend on running in a specific order, which means they have a **stateful dependency** on some other test. If you built your test suite from scratch in `pytest`, then this isn’t very likely. It’s more likely to happen in test suites that you migrate to `pytest`.
# The plugin will print a seed value in the configuration description. You can use that value to run the tests in the same order as you try to fix the issue.
# <a class="anchor" id="`pytest-cov`"></a>
#
# ### `pytest-cov`
# If you measure how well your tests cover your implementation code, you likely use the [coverage](https://coverage.readthedocs.io/) package. [`pytest-cov`](https://pytest-cov.readthedocs.io/en/latest/) integrates coverage, so you can run `pytest --cov` to see the test coverage report.
# <a class="anchor" id="`pytest-django`"></a>
#
# ### `pytest-django`
# [`pytest-django`](https://pytest-django.readthedocs.io/en/latest/) provides a handful of useful fixtures and marks for dealing with Django tests. You saw the `django_db` mark earlier in this tutorial, and the `rf` fixture provides direct access to an instance of Django’s [`RequestFactory`](https://docs.djangoproject.com/en/3.0/topics/testing/advanced/#django.test.RequestFactory). The `settings` fixture provides a quick way to set or override Django settings. This is a great boost to your Django testing productivity!
# If you’re interested in learning more about using `pytest` with Django, then check out [How to Provide Test Fixtures for Django Models in Pytest](https://realpython.com/django-pytest-fixtures/).
# <a class="anchor" id="`pytest-bdd`"></a>
#
# ### `pytest-bdd`
# `pytest` can be used to run tests that fall outside the traditional scope of unit testing. [Behavior-driven development](https://en.wikipedia.org/wiki/Behavior-driven_development) (BDD) encourages writing plain-language descriptions of likely user actions and expectations, which you can then use to determine whether to implement a given feature. [pytest-bdd](https://pytest-bdd.readthedocs.io/en/latest/) helps you use [Gherkin](http://docs.behat.org/en/v2.5/guides/1.gherkin.html) to write feature tests for your code.
# You can see which other plugins are available for `pytest` with this extensive [list of third-party plugins](http://plugincompat.herokuapp.com/).
# <a class="anchor" id="conclusion"></a>
#
# ## Conclusion
# `pytest` offers a core set of productivity features to filter and optimize your tests along with a flexible plugin system that extends its value even further. Whether you have a huge legacy `unittest` suite or you’re starting a new project from scratch, `pytest` has something to offer you.
# In this tutorial, you learned how to use:
# - **Fixtures** for handling test dependencies, state, and reusable functionality
# - **Marks** for categorizing tests and limiting access to external resources
# - **Parametrization** for reducing duplicated code between tests
# - **Durations** to identify your slowest tests
# - **Plugins** for integrating with other frameworks and testing tools
#
# Install `pytest` and give it a try. You’ll be glad you did. Happy testing!
| Python/04. Advanced/08.2 Effective Python Testing With Pytest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
sys.path.insert(0, "../turk/analysis")
from analysis import parse_csv
from collections import defaultdict, OrderedDict
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
def group_by(res_lines, variable_name):
to_ret = defaultdict(list)
for line in res_lines:
interest_key = line[variable_name]
to_ret[interest_key].append(line)
return to_ret
def sort_by_fxn(res_dict, fxn):
new_dict = OrderedDict()
# average = lambda x: sum(x)/len(x)
to_sort = []
for key, values in res_dict.items():
ps = [line['p_true'] for line in values]
score = fxn(ps)
to_sort.append((score, key, values))
to_sort = sorted(to_sort, key = lambda x: x[0])
for __, key, values in to_sort:
new_dict[key] = values
return new_dict
res_lines = parse_csv("/Users/Elias/vagueness/results/turk/gqa/clean_and_norm_young.csv")
figure_path = "/Users/Elias//vagueness-2020/figures/"
# print(res_lines[0].keys())
# by_sent = group_by(res_lines, "sent")
# add IDs
# for i, line in enumerate(res_lines):
# unique_id = f"{line['assignment_id']}-{line['sent']}"
# line["unique_id"] = unique_id
# res_lines[i] = line
by_sent = group_by(res_lines, "question_id")
by_sent_sorted_avg = sort_by_fxn(by_sent, lambda x: np.mean(x))
by_sent_sorted_std_dev = sort_by_fxn(by_sent, lambda x: np.std(x))
# print(by_ass)
# print(len(by_ass["3HVVDCPGTFX008I22DLYWJIDB3ZYTB-Is the sky cloudy?"]))
# +
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
font = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : 28}
matplotlib.rc('font', **font)
import os
from analysis import parse_csv
from collections import defaultdict, OrderedDict
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
font = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
def plot_data(human_data):
xs = np.arange(len(data.keys()))
human_ys = [np.mean([line['p_true'] for line in data[key] ] ) for key in human_data.keys() ]
human_std_dev = np.array([np.mean([line['p_true'] for line in human_data[key] ] ) for key in human_data.keys() ])
human_std_err = human_std_dev / np.sqrt(len(human_ys))
sents = [line[0]['sent'] for line in human_data.values()]
all_lines = [lines[0] for lines in human_data.values() ]
true_colors = ["True" if np.mean([line['p_true'] for line in data[key] ]) > 50 else "False" for key in data.keys() ]
palette = {"True": '#4575b4' ,
"False": '#d73027'}
markers = {"True": "." ,
"False": "."}
plt.figure()
fig, axs = plt.subplots(1, 1, sharex='col', figsize=(6,3))
axs.fill_between(xs, human_ys - human_std_err, human_ys + human_std_err, color=(0,0,0,0.3))
for x, y, c, s in zip(xs, human_ys, true_colors, sents):
color =c
axs.scatter([x], [y], s = 150, marker = markers[c], color = palette[color])
axs.set_yticks([0.0, 100])
axs.set_yticklabels([0.0, 100])
axs.set_xticks([])
axs.set_xticklabels([])
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.spines['bottom'].set_visible(False)
# axs.spines['left'].set_visible(False)
axs.spines['bottom'].set_visible(True)
plt.tight_layout()
# plt.savefig(os.path.join(figure_path, "sunn_normalized.pdf"))
return plt
plot_data(by_sent_sorted_avg)
# +
from scipy.optimize import curve_fit
import numpy as np
def sigmoid(x, x0, k):
return 1.0 / (1.0 + np.exp(-k * (x-x0)))
def fit_sigmoid(data):
xs = np.arange(len(data.keys()))
ys = [np.mean([line['p_true'] for line in data[key] ] ) for key in data.keys() ]
ys = np.array(ys)/100
bounds=([0,len(xs)+2], [0.01, 2])
popt, pcov = curve_fit(sigmoid, xs, ys, p0= [20, 0.1], method='dogbox')
pred_ys = np.array([sigmoid(x, popt[0], popt[1]) for x in xs])
# use mean squared error
goodness_of_fit = np.sqrt(np.sum((pred_ys - ys)**2) / len(pred_ys))
return goodness_of_fit, popt
# -
gof, popt = fit_sigmoid(by_sent_sorted_avg)
print(f"goodness of fit: {gof}")
# +
xs = np.arange(len(by_sent_sorted_avg.keys()))
human_ys = [np.mean([line['p_true'] for line in by_sent_sorted_avg[key] ] ) for key in by_sent_sorted_avg.keys() ]
sents = [line[0]['sent'] for line in by_sent_sorted_avg.values()]
all_lines = [lines[0] for lines in by_sent_sorted_avg.values() ]
true_colors = ["True" if np.mean([line['p_true'] for line in by_sent_sorted_avg[key] ]) > 50 else "False" for key in by_sent_sorted_avg.keys() ]
palette = {"True": '#4575b4' ,
"False": '#d73027',
"tshirt1": '#e0f3f8',
"tshirt2": '#fee090',
"journalism": '#fc8d59',
"teammate": '#91bfdb'}
markers = {"True": "." ,
"False": "."}
# VERSION 1
plt.figure()
fig, axs = plt.subplots(1, 1, sharex='col', figsize=(6,3))
for x, y, c, s in zip(xs, human_ys, true_colors, sents):
color =c
axs.scatter([x], [y], s = 150, marker = markers[c], color = palette[color])
axs.set_yticks([0.0, 100])
axs.set_yticklabels([0.0, 100])
# add predicted sigmoid
sig_xs = np.linspace(xs[0], xs[-1])
sig_ys = np.array([sigmoid(x, popt[0], popt[1]) for x in sig_xs]) * 100
plt.plot(sig_xs, sig_ys, '-')
plt.tight_layout()
# plt.savefig(os.path.join(figure_path, "scores_version_1.pdf"))
plt.show()
# +
# baseline fitting
xs = np.linspace(0, len(sig_xs))
ys = np.random.uniform(0, 1, len(xs))
plt.plot(xs, ys, ".")
popt, pcov = curve_fit(sigmoid, xs, ys, p0= [20, 0.1], method='dogbox')
sig_xs = np.linspace(xs[0], xs[-1])
sig_ys = np.array([sigmoid(x, popt[0], popt[1]) for x in sig_xs])
plt.plot(sig_xs, sig_ys, '-')
pred_ys = np.array([sigmoid(x, popt[0], popt[1]) for x in xs])
# use mean squared error
baseline_gof = np.sum((pred_ys - ys)**2) / len(pred_ys)
print(f"baseline goodness {baseline_gof}")
# +
## quantifying all predicates
## fit a sigmoid to k-1 predicates, measure gof on held-out
## repeat k times and average
# -
| src/plots/.ipynb_checkpoints/plot_gqa_turk_results-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ltrSfsQp7B3b" colab_type="text"
# # Henry's model select demo
#
# The follow is the model select program base on Decision Tree Classfier.
# Will be run on .py file instead of Jupyter Noteboook (for CSE server)
# + id="PLxLGXpZ7B3d" colab_type="code" colab={}
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
from imblearn.over_sampling import SMOTE
from sklearn.base import TransformerMixin
from sklearn import tree
from sklearn import preprocessing
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve, StratifiedKFold
from sklearn.metrics import roc_auc_score, roc_curve, classification_report, confusion_matrix, plot_confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import make_pipeline, Pipeline
import joblib
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
import datetime
# + [markdown] id="4qXyq4vx9yRo" colab_type="text"
# ## Mount the Google Drive to the system
# + id="Ftz-X_T09x4d" colab_type="code" outputId="fded6752-564b-4e60-b95a-1cff6a94345b" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="5CUM16J6-X8z" colab_type="code" outputId="a92548d7-bb51-4df3-e26a-f240e647e712" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls
# + [markdown] id="NQ2_sE9q7B3f" colab_type="text"
# ## Pre-define basic parameter for system adjustment
# + id="hzFtylSB7B3g" colab_type="code" colab={}
np.random.seed(1)
TRAININGFILE = '/content/gdrive/My Drive/Colab Notebooks/COMP9417/keyword.csv'
TESTFILE = '/content/gdrive/My Drive/Colab Notebooks/COMP9417/key_word_test.csv'
TESTSIZE = 0.1
REPORTPATH = '/content/gdrive/My Drive/Colab Notebooks/COMP9417/model/'
MODELPATH = '/content/gdrive/My Drive/Colab Notebooks/COMP9417/model/'
x_label_list = ['key_word_50', 'key_word_100','article_words']
y_label_list = ['topic']
topic_code = {
'ARTS CULTURE ENTERTAINMENT': 1,
'BIOGRAPHIES PERSONALITIES PEOPLE': 2,
'DEFENCE': 3,
'DOMESTIC MARKETS': 4,
'FOREX MARKETS': 5,
'HEALTH': 6,
'MONEY MARKETS': 7,
'SCIENCE AND TECHNOLOGY': 8,
'SHARE LISTINGS': 9,
'SPORTS': 10,
'IRRELEVANT': 0
}
def time_stamp():
return datetime.datetime.now().strftime('%H:%M:%S')+' >'
# + [markdown] id="Wm65QTC97B3i" colab_type="text"
# ## Pre-processing the data_set from the CSV file
# + id="29lX2Mmx7B3i" colab_type="code" colab={}
def preprocess(df, x_label, y_label):
'''
Return the x and y columns for trainning
'''
return df[[x_label, y_label]]
# for the bag of word and label encode process
def convert_word(bag_of_word_model, label_model, data_set, x_label, y_label='topic'):
'''
bow model need to be pre-fit when call current function
'''
act_x = bag_of_word_model.transform(data_set[x_label].values)
act_y = label_model.transform(data_set[y_label])
return act_x, act_y
# + [markdown] id="4StjfZQD7B3k" colab_type="text"
# ## SMOTE with different *Bag of Word* model:
# 1. CountVectorizer()
# 2. TfidfVectorizer()
# + id="NmEiHfcF7B3l" colab_type="code" colab={}
def smote_with_vector(df, vector_model, label_model, x_label):
'''
df data set
vector_model Bag of Word model
x_label process x column
y_label process y column
'''
count = vector_model.fit(df[x_label])
# convert the data
train_x, train_y = convert_word(count, label_model, df, x_label)
# start to SMOTE
smote = SMOTE(random_state=1)
sm_x, sm_y = smote.fit_sample(train_x, train_y)
# re-cover the data
new_x = count.inverse_transform(sm_x)
new_x = pd.Series([','.join(item) for item in new_x])
return new_x, sm_y
# + [markdown] id="fHHtZ0DI7B3n" colab_type="text"
# ## Implement the model pre-processing
#
# For **GridSearch** and also implement *StratifiedKFold* for cross-vaildation
# + id="jVJyKF-u7B3n" colab_type="code" colab={}
def grid_search(vector, model, train_x, train_y):
kfold = StratifiedKFold(n_splits=10,shuffle=True,random_state=1)
pipe = Pipeline([
('vector', vector),
('model', model)
])
param_grid = {
'model__max_depth': [16, 32, 64],
'model__min_samples_split': [2, 4],
'model__min_samples_leaf': [1, 2, 4],
}
# param_grid = {
# 'model__min_samples_leaf': range(1, 2)
# }
grid_search = GridSearchCV(pipe, param_grid, cv=kfold, n_jobs=-1)
grid_result=grid_search.fit(train_x, train_y)
return (grid_result.best_estimator_,grid_result.best_score_)
# + [markdown] id="USEx3F0L7B3p" colab_type="text"
# ## Implement the Score function for model evaluate
# base on the topic on each topic
# + id="aD8M-Ak17B3q" colab_type="code" colab={}
def topic_score(model, label_model, data_set, topic_name, x_label):
test_data_set = data_set[data_set['topic'] == topic_name]
test_x = test_data_set[x_label]
test_y = test_data_set['topic']
pred_y = model.predict(test_x)
en_test_y = label_model.transform(test_y)
f1_score = metrics.f1_score(en_test_y, pred_y, average='macro')
accuarcy = metrics.accuracy_score(en_test_y, pred_y)
recall_score = metrics.recall_score(en_test_y, pred_y, average='macro')
return {
'f1': round(f1_score, 4),
'accuarcy': round(accuarcy, 4),
'recall_score': round(recall_score, 4)
}
def model_score(model, label_model, x_label, test_df):
'''
model The dt model
test_df provide testing data set or using test file data
'''
print('Topic\tf1\taccuarcy\trecall_score')
test_report = []
test_df = preprocess(test_df, x_label, 'topic')
for topic in topic_code.keys():
result = [topic]
result.append(topic_score(model, label_model, test_df, topic, x_label))
test_report.append(result)
test_report.sort(reverse=True, key=lambda x: x[1]['accuarcy'])
for record in test_report:
print(record)
return test_report
def model_score_no_en(model, label_model, x_label, test_df):
'''
model The dt model
test_df provide testing data set or using test file data
'''
print('Topic\tf1\taccuarcy\trecall_score')
test_report = []
for topic in topic_code.keys():
result = [topic]
result.append(topic_score(model, label_model, test_df, topic, x_label))
test_report.append(result)
test_report.sort(reverse=True, key=lambda x: x[1]['accuarcy'])
for record in test_report:
print(record)
return test_report
def merge_x_y(data_x, data_y, x_label, y_label):
df_x = pd.Series(data_x)
df_x.rename_axis('x')
df_y = pd.Series(data_y)
df_y.rename_axis('y')
return pd.DataFrame(list(zip(df_x, df_y)), columns=[x_label, y_label])
# + [markdown] id="0w8kMuaI7B3s" colab_type="text"
# ## Define the model save function
# The function will automatically save each trainning model and result report wait for further choose
# + id="CAocm4697B3s" colab_type="code" colab={}
def save_job(model, test_report, pre_vector, feature_name):
filename = REPORTPATH+str(pre_vector)+'_'+feature_name
joblib.dump(model, filename+'.model')
with open(filename+'.txt', 'w') as fp:
fp.write('Topic\tf1\taccuarcy\trecall_score\n')
for record in test_report:
fp.write(str(record)+'\n')
# + [markdown] id="oJ1jcRrp7B3u" colab_type="text"
# ## Start to implement the main function
# ---
# + id="0Efx_fEk7B3v" colab_type="code" colab={}
def model_compile(df, x_label, vector_num):
print(time_stamp()+'Trainning topic', x_label, 'with vector num', vector_num)
df = preprocess(df, x_label, 'topic')
label_model = preprocessing.LabelEncoder().fit(df['topic'])
encode_mapping = dict(zip(label_model.classes_, range(len(label_model.classes_))))
if vector_num == 1:
print(time_stamp()+'Smoting word to matrix using TF-IDF...', end=' ')
x, y = smote_with_vector(df, TfidfVectorizer(), label_model, x_label)
else:
print(time_stamp()+'Smoting word to matrix using Count...', end=' ')
x, y = smote_with_vector(df, CountVectorizer(), label_model, x_label)
print('Done!')
new_df = merge_x_y(x, y, x_label, 'topic')
train_df, test_df = train_test_split(new_df, test_size=0.3)
topic = topic_code.keys()
train_x = train_df[x_label]
train_y = train_df['topic']
test_x = test_df[x_label]
test_y = test_df['topic']
# for grid-search
print(time_stamp()+'Starting Grid-Search with Count...', end=' ')
count_dt_model, count_dt_accuarcy = grid_search(CountVectorizer(), DecisionTreeClassifier(), train_x, train_y)
print('Done!')
print(time_stamp()+'Starting Grid-Search with Tfidf...', end=' ')
tfidf_dt_model, tfidf_dt_accuarcy = grid_search(TfidfVectorizer(norm=None), DecisionTreeClassifier(), train_x, train_y)
print('Done!')
print(time_stamp(), end='')
if count_dt_accuarcy >= tfidf_dt_accuarcy:
print(f'*************************************************************')
print(f'Now the training set is {x_label}, and the model chosen is count')
print(f'The accuracy is {count_dt_accuarcy}')
model = count_dt_model
else:
print(f'*************************************************************')
print(f'Now the training set is {x_label}, and the model chosen is tfidf')
print(f'The accuracy is {tfidf_dt_accuarcy}')
model = tfidf_dt_model
# first evaluate the data
pred_y = model.predict(test_x)
en_test_y = test_y
print(time_stamp()+'Total proformance')
print('F1 score:', metrics.f1_score(en_test_y, pred_y, average='macro'))
print('Accuarcy:', metrics.accuracy_score(en_test_y, pred_y))
print('Recall score:', metrics.recall_score(en_test_y, pred_y, average='macro'))
print('-'*15)
print('Classification Report:')
print(classification_report(en_test_y, pred_y))
# for each topic score
test_df['topic'] = label_model.inverse_transform(test_df['topic'])
model_score_no_en(model, label_model, x_label, test_df)
# for figure
conf_matrix = confusion_matrix(en_test_y, pred_y)
fig1 = plt.figure(figsize=(13,6))
sns.heatmap(conf_matrix,
# square=True,
annot=True, # show numbers in each cell
fmt='d', # set number format to integer in each cell
yticklabels=label_model.classes_,
xticklabels=model.classes_,
cmap="Blues",
# linecolor="k",
linewidths=.1,
)
plt.title(
f"Confusion Matrix on Test Set | "
f"Classifier: {'+'.join([step for step in model.named_steps.keys()])}",
fontsize=14)
plt.xlabel("Actual: False positives for y != x", fontsize=12)
plt.ylabel("Prediction: False negatives for x != y", fontsize=12)
plt.show()
def model_evaluate(model, x_label, label_model, df, encode_mapping, vector_num):
print('Start to evalute', x_label, 'model')
test_set = preprocess(df, x_label, 'topic')
test_x = test_set[x_label]
test_y = test_set['topic']
topics = list(set(test_set['topic']))
# evalute total performance
pred_y = model.predict(test_x)
en_test_y = label_model.transform(test_y)
print('Total proformance')
print('F1 score:', metrics.f1_score(en_test_y, pred_y, average='macro'))
print('Accuarcy:', metrics.accuracy_score(en_test_y, pred_y))
print('Recall score:', metrics.recall_score(en_test_y, pred_y, average='macro'))
print('-'*15)
print('Classification Report:')
print(classification_report(en_test_y, pred_y))
# evalute all the topic performance
model_report = model_score(model, label_model, x_label, df)
# save current model and performance
save_job(model, model_report, vector_num, x_label)
# for figure
conf_matrix = confusion_matrix(en_test_y, pred_y)
fig1 = plt.figure(figsize=(13,6))
sns.heatmap(conf_matrix,
# square=True,
annot=True, # show numbers in each cell
fmt='d', # set number format to integer in each cell
yticklabels=label_model.classes_,
xticklabels=model.classes_,
cmap="Blues",
# linecolor="k",
linewidths=.1,
)
plt.title(
f"Confusion Matrix on Test Set | "
f"Classifier: {'+'.join([step for step in model.named_steps.keys()])}",
fontsize=14)
plt.xlabel("Actual: False positives for y != x", fontsize=12)
plt.ylabel("Prediction: False negatives for x != y", fontsize=12)
plt.show()
#plt.savefig('model/'+str(vector_num)+'_'+x_label+'.png')
# + [markdown] id="53ImoTayiRR-" colab_type="text"
# ### For test/debug
# + id="QVMYO2ATiUVr" colab_type="code" colab={}
# %%time
x_label = 'key_word_50'
vector_num = 2
df = pd.read_csv(TRAININGFILE)
model, label_model, encode_mapping = model_compile(df, x_label, vector_num)
# + [markdown] id="shOchmF77B3w" colab_type="text"
# ## start to test different model
# ---
#
# For one topic testing
# + id="IW-k_Q8V7B3x" colab_type="code" outputId="d8106a14-5f20-4338-c921-2ed533e12ed7" colab={}
# %%time
x_label = 'key_word_50'
vector_num = 1
df = pd.read_csv(TRAININGFILE)
test_df = pd.read_csv(TESTFILE)
model, label_model, encode_mapping = model_compile(df, x_label, vector_num)
model_evaluate(model, x_label, label_model, test_df, encode_mapping, vector_num)
# + [markdown] id="saHkyyQe7B3z" colab_type="text"
# For mult-topic testing
# + id="h3EkFQfL7B30" colab_type="code" colab={}
# %%time
# load data
df = pd.read_csv(TRAININGFILE)
test_df = pd.read_csv(TESTFILE)
for x_label in x_label_list:
for vector_num in [1, 2]:
model, label_model, encode_mapping = model_compile(df, x_label, vector_num)
model_evaluate(model, x_label, label_model, test_df, encode_mapping, vector_num)
# + id="6pVXTKeq7B32" colab_type="code" outputId="ee60ce4e-b1f1-4f9e-86ba-b67240ae3dc4" colab={}
df = pd.read_csv(TRAININGFILE)
train = preprocess(df, 'key_word_50', 'topic')
print(train)
# + [markdown] id="ego4oU8-_4MG" colab_type="text"
# ## For Google Colab running
# + id="4T7MrI-P7B35" colab_type="code" outputId="8bcea9b1-4a19-40a3-8aad-8fc418896336" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %%time
x_label = 'article_words'
# load data
df = pd.read_csv(TRAININGFILE)
test_df = pd.read_csv(TESTFILE)
for vector_num in [1, 2]:
model, label_model, encode_mapping = model_compile(df, x_label, vector_num)
model_evaluate(model, x_label, label_model, test_df, encode_mapping, vector_num)
# + id="HucvOlezAfbd" colab_type="code" colab={}
# %%time
x_label = 'article_words'
# load data
df = pd.read_csv(TRAININGFILE)
train_df, test_df = train_test_split(df, test_size=0.2)
for vector_num in [1, 2]:
model, label_model, encode_mapping = model_compile(df, x_label, vector_num)
model_evaluate(model, x_label, label_model, test_df, encode_mapping, vector_num)
# + [markdown] id="9Xn2BqSAoltf" colab_type="text"
# ## For model using training file to do vaildation as follow result
# ---
# + id="iF6tK-Cdt_F3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="de06c143-7df5-4429-f491-8bd7d9324ebc"
# %%time
x_label = 'key_word_100'
vector_num = 1
df = pd.read_csv(TRAININGFILE)
model_compile(df, x_label, vector_num)
# + id="dY7spSfdwOfo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c768b6b0-4b17-45fe-bbfb-74c00daab2fe"
# %%time
x_label = 'article_words'
vector_num = 1
df = pd.read_csv(TRAININGFILE)
model_compile(df, x_label, vector_num)
# + id="4ljx9aP9RIf6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e65d36de-f4d9-43d5-d1c2-13114889e3ef"
# %%time
x_label = 'key_word_100'
vector_num = 2
df = pd.read_csv(TRAININGFILE)
model_compile(df, x_label, vector_num)
# + id="LH36OElmpW-g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f05d0799-5dd8-47ea-d40f-802db050a337"
# %%time
x_label = 'article_words'
vector_num = 2
df = pd.read_csv(TRAININGFILE)
model_compile(df, x_label, vector_num)
# + id="HfWUkObQuj1A" colab_type="code" colab={}
| Henry_s_work/Henry_s_model_select_vaildation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
# 1.Capomulin and Ramicane demonstrated more effectiveness than any other treatment regimen. When analyzing the final tumor volume by regimen, Capomulin is the first best result, with most mice showing the lowest tumor volume and,
# Ramicane comes in second place.
#
# 2.Capomulin mice had a continuous reduction in tumor volume during the treatment period, the tumor decreased about
# 90% at the end. There is also a positive correlation between the weight of the rats and the average volume of the tumor, rats with lower weight it also has less tumor volume.
#
# 3.There were outliers that affected the results of the Infubinol regimen, however, this might not be the reason for
# negative results. Both Infubinol and Ceftamin showed the median tumor volume around 40% greater than Capomulin and Ramicane.
#
# 4.There were no problems in the data set, except for a mouse that had duplicate entries and therefore this mouse was removed from the set.
#
# %matplotlib inline
# +
# Dependencies and Setup
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combined_df = pd.merge(mouse_metadata,study_results,how="outer",on="Mouse ID")
# Display the data table for preview
combined_df.head()
# -
# Checking the number of mice.
count_mice = combined_df["Mouse ID"].count()
count_mice
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mouse_id = combined_df.loc[combined_df.duplicated(subset=['Mouse ID',"Timepoint"]),"Mouse ID"].unique()
duplicate_mouse_id
# Optional: Get all the data for the duplicate mouse ID.
duplicated_mouse = combined_df.loc[combined_df['Mouse ID'] == 'g989',:]
duplicated_mouse
# +
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
#cleaned_df = combined_df[~combined_df.duplicated(subset=['Mouse ID',"Timepoint"])]
#cleaned_df = combined_df[combined_df.duplicated['Mouse ID'].isin(duplicate_mouse_id)==False]
cleaned_df = combined_df.loc[combined_df['Mouse ID'] != 'g989',:]
cleaned_df.head()
# -
# Checking the number of mice in the clean DataFrame.
count_mice_cleaned_df = cleaned_df["Mouse ID"].count()
count_mice_cleaned_df
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
grouped_drug = cleaned_df.groupby("Drug Regimen")
regimen_mean = grouped_drug["Tumor Volume (mm3)"].mean()
regimen_median = grouped_drug["Tumor Volume (mm3)"].median()
regimen_var = grouped_drug["Tumor Volume (mm3)"].var()
regimen_std = grouped_drug["Tumor Volume (mm3)"].std()
regimen_sem = grouped_drug["Tumor Volume (mm3)"].sem()
summary_stats = pd.DataFrame({"Mean":regimen_mean,
"Median":regimen_median,
"Variance":regimen_var,
"Std Deviation":regimen_std,
"SEM":regimen_sem})
summary_stats
# -
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
stats = cleaned_df.groupby("Drug Regimen")["Tumor Volume (mm3)"].aggregate(['mean','median','var','std','sem'])
stats
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
total_mice_per_drug = cleaned_df.groupby("Drug Regimen")["Mouse ID"].nunique()
# Generate the bar plot
total_mice_per_drug.plot(kind='bar', color="red", title="Number of Unique Mice per Drug Regimen",figsize=(7,5))
plt.ylabel("Number of Unique Mice")
plt.tight_layout()
# Save the figure
plt.savefig("output_data/total_mice_per_drug.png")
# Diplay plot
plt.show()
# +
# Converting series to DF
df_mice = total_mice_per_drug.to_frame()
df_mice.index.name = 'Drug Regimen'
df_mice.reset_index(level=None, drop=False, inplace=True)
df_mice.head()
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.
x_axis = np.arange(0,len(df_mice))
ticks = [value for value in x_axis]
plt.figure(figsize=(8,4))
plt.bar(x_axis, df_mice["Mouse ID"], color="r", align="center")
plt.xticks(ticks,df_mice["Drug Regimen"], rotation="vertical")
# Set the limits of the x and y axis
plt.xlim(-0.75, len(x_axis))
plt.ylim(0, max(df_mice["Mouse ID"])+5)
# Give the chart a title, x label, and y label, give proper layot
plt.title("Number of Unique Mince per Drug Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number Unique of Mice")
plt.tight_layout()
plt.show()
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
gender_distr = cleaned_df.groupby("Sex")["Mouse ID"].count()
# Set details for the plot
colors=['red','blue']
plt.figure()
gender_distr.plot(kind='pie', figsize=(5, 5),title="Distribution of Female Vs. Male Mice",autopct="%1.1F%%", colors=colors)
plt.show()
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
# Convert series into dataframe
sex_df = gender_distr.to_frame()
sex_df.index.name = 'Sex'
sex_df.reset_index(level=None, drop=False, inplace=True)
renamed_sex_df = sex_df.rename(columns={"Sex":"Sex", "Mouse ID":"Distribution of Mice"})
renamed_sex_df
# Passing plot details
sex = ["Female","Male"]
count = [930,958]
x_axis = np.arange(0,len(sex))
explode = (0.1,0)
# Tell matplotlib to create a pie chart based upon the above data
plt.figure()
colors= ['red','blue']
plt.title("Distribution of Female Vs. Male Mice")
plt.pie(count,labels=sex,colors=colors,autopct="%1.1f%%",shadow=True, explode=explode)
# Create axes which are equal so we have a perfect circle
plt.axis('equal')
# Save the figure
plt.savefig("output_data/distribution_by_gender.png")
# Diplay plot
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
max_timepoint = cleaned_df.groupby(["Mouse ID"])['Timepoint'].max()
max_timepoint = max_timepoint.reset_index()
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
max_tp_and_tumor_vol = max_timepoint.merge(cleaned_df, on=["Mouse ID","Timepoint"], how='left')
max_tp_and_tumor_vol
# +
# Put treatments into a list for for loop (and later for plot labels)
drug_regimen = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin']
# Create empty list to fill with tumor vol data (for plotting)
tumor_volume = []
# Calculate the IQR and quantitatively determine if there are any potential outliers.
for drug in drug_regimen:
final_tumor_volume = max_tp_and_tumor_vol.loc[max_tp_and_tumor_vol['Drug Regimen'] == drug, "Tumor Volume (mm3)"]
tumor_volume.append(final_tumor_volume)
quartiles = final_tumor_volume.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
outlier_tumor_vol = final_tumor_volume.loc[(final_tumor_volume < lower_bound) | (final_tumor_volume > upper_bound)]
print(f"The lower quartile of tumor volume is: {lowerq}")
print(f"The upper quartile of tumor volume is: {upperq}")
print(f"The interquartile range of tumor volume is: {iqr}")
print(f"The median of tumor volume is: {quartiles[0.5]} ")
print(f"Outliers using upper and lower bounds: {outlier_tumor_vol}")
print('-------------------------')
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
red_square = dict(markerfacecolor='r', marker='s')
plt.boxplot(tumor_volume, labels=drug_regimen, notch="True", flierprops=red_square)
plt.title("Final Tumor Volume by Regimens ")
plt.ylabel('Final Tumor Volume')
# Save the figure
plt.savefig("output_data/finalTumor_byRegimen.png")
# Diplay plot
plt.show()
# -
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Select a mouse treated with Capomulin
a_mouse_capomulin = cleaned_df.loc[cleaned_df['Mouse ID'] == 's185',:]
# Getting data for the plot
a_mouse_capomulin = a_mouse_capomulin[['Tumor Volume (mm3)','Timepoint']]
# Set variables
avg_tumor = a_mouse_capomulin['Tumor Volume (mm3)']
timepoint = a_mouse_capomulin['Timepoint']
# Plot the line that will be used to track a mouse's treatment over the days
plt.plot(timepoint,avg_tumor, c='y')
# Give the plot a title, x label, and y label, give proper layout
plt.title('Capomulin: Tumor Volume Vs. Timepoint')
plt.xlabel('Days')
plt.ylabel('Tumor Volume')
plt.xticks(np.arange(min(timepoint), max(timepoint)+1, 5))
plt.legend(['Tumor Volume (mm3)'])
plt.tight_layout()
# Save the figure
plt.savefig("output_data/capomulin_result.png")
# Diplay plot
plt.show()
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
# Getting data for the plot
capomulin_vol_weight = cleaned_df.loc[(cleaned_df['Drug Regimen'] == 'Capomulin')]
capomulin_avg = capomulin_vol_weight.groupby(['Mouse ID']).mean()
# Set variables
mouse_weight = capomulin_avg['Weight (g)']
avg_tumor= capomulin_avg['Tumor Volume (mm3)']
# Generate the scatter plot
plt.scatter(mouse_weight,avg_tumor,marker="o", color='orange')
# Give the plot a legend, atitle, x label, and y label
plt.legend(['Tumor Volume (mm3)'],loc='lower right')
plt.title('Capomulin: Average Tumor Volume Vs. Mouse Weight')
plt.xlabel('Mouse Weight (g)')
plt.ylabel('Avg Tumor Volume (mm3)')
# Set the limits of the x and y axis
plt.xlim(min(mouse_weight) -2, max(mouse_weight)+2)
plt.ylim(min(avg_tumor) -2, max(avg_tumor)+2)
plt.tight_layout()
# Save the figure
plt.savefig("output_data/capomulin_result2.png")
# Diplay plot
plt.show()
# -
# ## Correlation and Regression
# +
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
print(f'The correlation coefficient between mouse weight and average tumor volume is {round(st.pearsonr(mouse_weight,avg_tumor)[0],2)} , for the Capomulin regimen.')
# -
# Perform a linear regression on mouse weight and average tumor volume
slope, intercep, rvalue, pvalue, std_err = stats.linregress(mouse_weight,avg_tumor)
# Create equation of line
line_eq = slope * mouse_weight + intercep
# +
# Plotting scatter and linear model for weight versus tumor volume
plt.scatter(mouse_weight,avg_tumor, marker="o", color='orange')
plt.plot(mouse_weight,line_eq,"--",linewidth=1, color="g")
# Give the plot a legend, atitle, x label, and y label
plt.legend(['Tumor Volume (mm3)'],loc='lower right')
plt.title('Capomulin: Average Tumor Volume Vs. Mouse Weight')
plt.xlabel('Mouse Weight (g)')
plt.ylabel('Avg Tumor Volume (mm3)')
# Set the limits of the x and y axis
plt.xlim(min(mouse_weight) -2, max(mouse_weight)+2)
plt.ylim(min(avg_tumor) -2, max(avg_tumor)+2)
plt.tight_layout()
# Save the figure
plt.savefig("output_data/capomulin_correlation.png")
# Diplay plot
plt.show()
# -
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.1 64-bit
# name: python_defaultSpec_1599785368296
# ---
# # Let's Do Some Algebra
# In a Jupyter notebook, with sympy.
# +
# First we import sympy:
from sympy import *
# Init pretty printing
init_printing()
# -
# Then we define some symbols:
x, a, b, c, d = symbols('x, a, b, c, d')
# +
# Write some expressions with our symbols:
expr1 = a + x * c
d = (b - a)
# Do some substitution:
expr2 = expr1.subs(c, d)
# -
expr1 # Looks like:
expr2 # Looks like:
# +
# Which looks familiar
# Not sure why -a + b rather than b - a, but that's easily fixed later
# As C code:
ccode(expr2)
# + tags=[]
# Equation time!
u = symbols('u')
eq = Eq(u, expr2)
eq
# +
# Solve for x
sol = solve((eq),(x))
# Output
sol
# -
# As code
ccode(sol)
# The "inverse" of the linear interpolation formula!
#
# Easy enough to do by hand, but for demonstration purposes...
#
#
# This could be useful.
| JupyterAlgebraTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:napari-dev]
# language: python
# name: conda-env-napari-dev-py
# ---
# ## Simple demonstration of calculating segmentation metrics
# ### And interactive visualisation with Napari
# *NOTE(arl)*: the metrics here are calculated as follows in batch mode:
# + `n_true_labels` is the sum of all true labels, etc
# + `IoU` is the mean IoU of all found objects
# + `Jaccard` is the Jaccard index over all found objects
# + `localization_error` is the mean error for all found objects
# + `pixel_identity` is the per image pixel identity
# +
import os
import sys
sys.path.append('..')
import umetrics
import numpy as np
from skimage.io import imread
import napari
from napari.utils import nbscreenshot
# -
# load a ground truth - prediction image pair
p = '/media/quantumjot/Data/TrainingData/UNet_training_scribble_v2b/set14/labels'
y_true = imread(os.path.join(p, '0014_mask.tif.modified.tif'))[0, ...]
y_pred = imread(os.path.join(p, '0014_mask.tif'))[0, ...]
# ### now perform the calculation with strict matching only
result = umetrics.calculate(y_true, y_pred, strict=True, iou_threshold=0.7)
# ### render these interactively with napari
bboxes, bbox_properties, text_parameters = result.to_napari()
# with napari.gui_qt():
# %gui qt
viewer = napari.Viewer()
viewer.add_image(y_pred, colormap='magenta', contrast_limits=[0, 1], name='y_pred')
viewer.add_image(y_true, colormap='green', contrast_limits=[0, 1], name='y_true', blending='additive')
viewer.add_shapes(
bboxes,
face_color='transparent',
edge_color='green',
properties=bbox_properties,
text=text_parameters,
name='bounding box',
)
nbscreenshot(viewer)
print(result.results)
| notebooks/unet_segmentation_metrics-napari.ipynb |