code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from itertools import combinations
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
G = nx.complete_graph(3)
G.add_edges_from([(2, 3), (3, 4), (3, 6), (3,5), (4, 6), (5,6)])
nx.draw_networkx(G)
maximal_cliques_of_G = list(nx.find_cliques(G))
maximal_cliques_of_G
def make_first_structure_vector(list_of_maximal_cliques):
list_of_q_connected_components_at_qth_levels = [] #qth entry is the list of q-connected components
q_max = np.max([len(x) for x in list_of_maximal_cliques])-1
for q in range(q_max+1):
temp_list_of_max_cliques = list_of_maximal_cliques.copy()
q_connected_at_this_level = []
temp_list_of_max_cliques = [x for x in temp_list_of_max_cliques if len(x)>q]
while temp_list_of_max_cliques != []:
temp_list = []
temp_list.append(temp_list_of_max_cliques[0])
temp_list_of_max_cliques.remove(temp_list_of_max_cliques[0])
counter_for_check = -1
while len(temp_list)>counter_for_check:
counter_for_check = len(temp_list)
for clique in temp_list_of_max_cliques:
if len(clique) == q:
temp_list_of_max_cliques.remove(clique)
if any([len(list(set(clique_for_eval)&set(clique))) == q+1 for clique_for_eval in temp_list]):
temp_list.append(clique)
temp_list_of_max_cliques.remove(clique)
q_connected_at_this_level.append(temp_list)
list_of_q_connected_components_at_qth_levels.append(q_connected_at_this_level)
Q_vector = [len(x) for x in list_of_q_connected_components_at_qth_levels]
return Q_vector, list_of_q_connected_components_at_qth_levels
# +
Q_vector, q_connected_components = make_first_structure_vector(maximal_cliques_of_G)
print("The Q_vector is",Q_vector)
for index in range(len(q_connected_components)):
print("The {}^th connected components at the {}^th level is {}".format(index, index, q_connected_components[index]), end="\n" )
# -
G1 = G.copy()
G1.add_edge(4, 5)
maximal_cliques_of_G1 = list(nx.find_cliques(G1))
nx.draw_networkx(G1)
# +
Q_vector_of_G1, q_connected_components_f_G1 = make_first_structure_vector(maximal_cliques_of_G1)
print("The Q_vector is",Q_vector)
for index in range(len(q_connected_components)):
print("The {}^th connected components at the {}^th level is {}".format(index, index, q_connected_components[index]), end="\n" )
# +
def make_axuillary_vector(list_of_maximal_cliques):
q_max = np.max([len(x) for x in list_of_maximal_cliques])-1
f_hat_vector = []
for q in range(q_max+1):
f_hat_vector.append(len([len(clique) for clique in list_of_maximal_cliques if len(clique)==q+1]))
return f_hat_vector
def make_second_structure_vector(list_of_maximal_cliques):
q_max = np.max([len(x) for x in list_of_maximal_cliques])-1
f_hat_vector = make_axuillary_vector(list_of_maximal_cliques)
N_vector = []
for index in range(len(f_hat_vector)):
N_vector.append(sum(f_hat_vector[index:]))
return N_vector
def make_third_structure_vector(N_vector, Q_vector):
if len(N_vector) != len(Q_vector):
print("mismatch of the length of the vectors")
Q_hat_vector = []
for index in range(len(N_vector)):
q_hat = (1 - (Q_vector[index] / N_vector[index]))
Q_hat_vector.append(q_hat)
return Q_hat_vector
# -
f_hat_vector = make_axuillary_vector(maximal_cliques_of_G)
N_vector = make_second_structure_vector(maximal_cliques_of_G)
Q_hat_vector = make_third_structure_vector(N_vector, Q_vector)
print("The first structure vector of G is: ", Q_vector)
print("The axuillary vector of G is: ", f_hat_vector)
print("The second structure vector of G is: ", N_vector)
print("The third sturcture vector of G is: ", Q_hat_vector)
|
Algebraic_topological_measures.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import math
import json
import matplotlib.pyplot as plt
# %matplotlib inline
from clean_data_2 import *
import plotly.express as px
# -
from jupyterthemes import jtplot
jtplot.style(theme='monokai', context='notebook', ticks=True, grid=False)
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
portfolio = clean_portfolio(portfolio)
portfolio
profile = clean_profile(profile)
profile.head()
transcript.head()
transcript = clean_transcript(transcript)
transcript
df = merge_datasets(portfolio, profile, transcript)
df
# ## Analysis of our data set
# our dataset has 306,534 rows. These rows combine event and offer data in a single dataset to make it easier for analysis
df.shape
# ### Story of 1 customer
#
# Below we see the story of 1 customer, the behaviour of a customer for the duration of the test. Time 0 indicates the start of the test and the last row is the last event that happened. We have randomly chosen a customer at the age of 75 that has received multiple offer and we have a historic view of how she responded. Ideally we want to replicate that pythonically for every customer
df[df.customer_id =='78afa995795e4d85b5d9ceeca43f5fef']
# In the below table we see the behaviour of our selected customer for one specific offer. We can see that the offer was succesful
df[(df.offer_id=='B3') & (df.customer_id=='78afa995795e4d85b5d9ceeca43f5fef')]
# #### Which offers get completed more
df[df.event_offer_received == 1].offer_type.value_counts().plot(kind='bar', rot=45, figsize=(6,6), alpha=0.6)
plt.title('Offer type received')
# #### What is the distribution of the amount people are spending on a transaction
df.amount.hist(bins = 25, range=[0, 50], alpha = 0.7)
plt.title('Average Spend')
df.income.hist()
plt.title('Income Distribution')
plt.scatter(x='income', y='amount',data=df)
df_amount = df[df.amount > 200]
px.scatter(df_amount,x='income', y='amount',)
df_customer = pd.DataFrame(df.groupby('customer_id')['event_transaction'].sum())
result = df_customer.merge(df, on= 'customer_id')
df_scatter= pd.DataFrame(result.groupby('customer_id')[['event_transaction_x','income']].mean())
df_scatter
offers = get_most_popular_offers(customers, n_top=10)
customers = per_customer_data(df, profile)
customers
customers.groupby('income_group')['total_expense'].mean().plot(kind='bar')
customers.groupby('age_group')['total_expense'].mean().plot(kind='bar')
customers.groupby('gender')['total_expense'].mean().plot(kind='bar')
|
.ipynb_checkpoints/starbucks_analysis-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data description & Problem statement:
# The IMDB movie reviews dataset is a set of 50,000 reviews, half of which are positive and the other half negative. This dataset is widely used in sentiment analysis benchmarks, which makes it a convenient way to evaluate our own performance against existing models. The dataset is available online and can be either directly downloaded from Stanford’s website.
#
# # Workflow:
# - Load the training and test datasets
# - Data cleaning (e.g. remove formats and punctuations)
# - Text vectorization, using "Bag of Words" technique
# - Use "tf-idf transformation", and find the "N-grams" to improve the model performace
# - Use a supervised classifier (e.g. Logistic Regression, Naive Bayes, etc.) for text classification: Use Grid-Serach along with Cross-Validation technique for finding the optimal hyper-parameters of best classifier
# - Evaluate the performance of best classifier on the test data set, by calculating:
# - Accuracy
# - f1, Precision, Recall scores
# - Confusion matrix
# - ROC curve
# - Finally, determine most important words/features during semantic analysis for both positive and negative reviews
#
#
# * Note: I repeat abovementioned process with and without Word Normalization (i.e. using lammatization/stemming) for the sake of comparison. For the word normalization I use "SpaCy" library.
# +
import sklearn
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
# %matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
# we insatll and import spacy package for some advanced tokenizaion techniques:
import spacy
# we also install and import mglearn package (using !pip install mglearn) for some interesting visualization of results:
import mglearn
# -
# !tree aclImdb
# # load and prepare the text data:
# +
# load the training data:
from sklearn.datasets import load_files
reviews_train = load_files("aclImdb/train/") # load_files returns a bunch, containing training texts and training labels
text_train, y_train = reviews_train.data, reviews_train.target
print("type of text_train: {}".format(type(text_train)), "\n")
print("length of text_train: {}".format(len(text_train)), "\n")
print("Samples per class (training): {}".format(np.bincount(y_train)), "\n")
print("text_train[0]:\n{}".format(text_train[0]))
# -
# load the test data too:
reviews_test = load_files("aclImdb/test/")
text_test, y_test = reviews_test.data, reviews_test.target
print("Number of documents in test data: {}".format(len(text_test)))
print("Samples per class (test): {}".format(np.bincount(y_test)))
# +
# text_train contains some HTML line breaks (<br />).
# It is better to clean the data and remove this formatting before we proceed:
text_train = [doc.replace(b"<br />", b" ") for doc in text_train]
text_test = [doc.replace(b"<br />", b" ") for doc in text_test]
# -
# # Semantic analysis with tf-idf and n-grams techniques using LR model:
# * Approach 1: without word normalization (i.e. lammatization or stemming)
# +
# We find the best setting of n-gram range and logistic regression parameter using a grid search:
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
pipe = make_pipeline(TfidfVectorizer(min_df=5),
LogisticRegression())
# running the grid search takes a long time because of the relatively large grid and the inclusion of trigrams
param_grid = {"logisticregression__C": [0.001, 0.01, 0.1, 1, 10, 100],
"tfidfvectorizer__ngram_range": [(1, 1), (1, 3)]}
grid1 = GridSearchCV(pipe, param_grid, cv=2, n_jobs=-1)
grid1.fit(text_train, y_train)
G=pd.DataFrame(grid1.cv_results_)
G.sort_values("rank_test_score").head(3)
# -
print("Best parameters:\n{}".format(grid1.best_params_), '\n')
print("Best cross-validation score: {:.2f}".format(grid1.best_score_))
print("Test score: {:.2f}".format(grid1.score(text_test, y_test)))
# extract scores from grid_search and visualize them for ranges of parametrs:
plt.figure().set_size_inches(12, 3)
h=G[["param_logisticregression__C", "param_tfidfvectorizer__ngram_range", "mean_test_score"]] .pivot_table(index="param_tfidfvectorizer__ngram_range", columns="param_logisticregression__C", values="mean_test_score")
sns.heatmap(h, annot=True)
plt.show()
# +
# Classification report:
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report
report=classification_report(y_test[0:1000], grid1.predict(text_test[0:1000]))
print(report)
# +
# Plot a confusion matrix.
# cm is the confusion matrix, names are the names of the classes.
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class_names=["0", "1"]
# Compute confusion matrix
cm = confusion_matrix(y_test[0:1000], grid1.predict(text_test[0:1000]))
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, class_names, title='Normalized confusion matrix')
plt.show()
# +
# ROC curve & auc:
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score
fpr, tpr, thresholds=roc_curve(np.array(y_test[0:1000]),grid1.predict_proba(text_test[0:1000])[:, 1] , pos_label=1)
roc_auc=roc_auc_score(np.array(y_test[0:1000]), grid1.predict_proba(text_test[0:1000])[:, 1])
plt.figure()
plt.step(fpr, tpr, color='darkorange', lw=2, label='ROC curve (auc = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', alpha=0.4, lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.plot([cm_normalized[0,1]], [cm_normalized[1,1]], 'or')
plt.show()
# -
# # Semantic analysis with tf-idf and n-grams techniques using LR model:
# * Approach 2: with word normalization (i.e. using lammatization or stemming)
# +
# run following script in the command line with admin privilage, to load the english package in spaCy:
# python -m spacy download en
# +
# Technicality: we want to use the regexp-based tokenizer that is used by CountVectorizer
# and only use the lemmatization from spacy.
# We replace en_nlp.tokenizer (the spacy tokenizer) with the regexp-based tokenization:
from sklearn.feature_extraction.text import CountVectorizer
import re
# regexp used in CountVectorizer
regexp = re.compile('(?u)\\b\\w\\w+\\b')
# load spacy language model and save old tokenizer
en_nlp = spacy.load('en')
old_tokenizer = en_nlp.tokenizer
# replace the tokenizer with the preceding regexp
en_nlp.tokenizer = lambda string: old_tokenizer.tokens_from_list(regexp.findall(string))
# create a custom tokenizer using the spacy document processing pipeline (now using our own tokenizer)
def custom_tokenizer(document):
doc_spacy = en_nlp(document)
return [token.lemma_ for token in doc_spacy]
# define a count vectorizer with the custom tokenizer
lemma_vect = CountVectorizer(tokenizer=custom_tokenizer,
min_df=5,
max_features=10000,
ngram_range=(1, 1)).fit(text_train)
# +
# transform text_train using CountVectorizer with lemmatization
X_train_lemma = lemma_vect.transform(text_train)
X_test_lemma = lemma_vect.transform(text_test[0:2000])
print("X_train_lemma.shape: {}".format(X_train_lemma.shape))
# +
# We find the best logistic regression parameter using a grid search:
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
pipe = make_pipeline(TfidfTransformer(),
LogisticRegression())
# running the grid search takes a long time because of the relatively large grid and the inclusion of trigrams
param_grid = {"logisticregression__C": [0.001, 0.01, 0.1, 1, 10, 100]}
grid2 = GridSearchCV(pipe, param_grid, cv=2, n_jobs=-1)
grid2.fit(X_train_lemma, y_train)
G=pd.DataFrame(grid2.cv_results_)
G.sort_values("rank_test_score").head(3)
# -
print("Best parameters:\n{}".format(grid2.best_params_), '\n')
print("Best cross-validation score: {:.2f}".format(grid2.best_score_))
print("Test score: {:.2f}".format(grid2.score(X_test_lemma, y_test[0:2000])))
# +
# Classification report:
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report
report=classification_report(y_test[0:2000], grid2.predict(X_test_lemma), target_names=['spam', 'ham'])
print(report)
# +
# Plot a confusion matrix.
# cm is the confusion matrix, names are the names of the classes.
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class_names=["spam", "ham"]
# Compute confusion matrix
cm = confusion_matrix(y_test[0:2000], grid2.predict(X_test_lemma))
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, class_names, title='Normalized confusion matrix')
plt.show()
# -
# Most important features when using unigrams, bigrams:
feature_names = np.array(lemma_vect.get_feature_names())
coef = grid2.best_estimator_.named_steps['logisticregression'].coef_
mglearn.tools.visualize_coefficients(coef, feature_names, n_top_features=40)
print('Most important features when using unigrams, bigrams, and trigrams with tf-idf rescaling')
|
NLP projects in Python and Spark/IMDb review classification (Sentiment analysis with Scikit-learn).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from autocnet.examples import get_path
from autocnet.graph.network import CandidateGraph
from autocnet.matcher.feature import FlannMatcher
from autocnet.matcher.suppression_funcs import distance
from IPython.display import display
# %pylab inline
# +
#Point to the adjacency Graph
adjacency = get_path('two_image_adjacency.json')
basepath = get_path('Apollo15')
cg = CandidateGraph.from_adjacency(adjacency, basepath=basepath)
#Apply SIFT to extract features
cg.extract_features(method='sift')
#Match
cg.match_features()
#Apply outlier detection
cg.symmetry_checks()
cg.ratio_checks()
m = cg.edge[0][1].masks
#Compute a fundamental matrix
cg.compute_fundamental_matrices(clean_keys=['ratio', 'symmetry'])
# -
# ## Suppression
# Create a suppression object using a default error tolerance and count. Supply a custom function that suppresses based upon the distance between matches.
# +
#figsize(12,12)
#cg.edge[0][1].suppress(clean_keys=['fundamental'], func=distance)
# Plot, in blue the points that passed all outlier detectors so far
#cg.edge[0][1].plot(clean_keys=['fundamental'], line_kwargs={'linewidth':0})
# Overlay, in red, the points that remain after suppression
#cg.edge[0][1].plot(clean_keys=['suppression'], line_kwargs={'linewidth':0}, scatter_kwargs={'color':'red'})
# -
# ### Suppression and Do/Undo
# The suppression object, associated with each edge is a stateful observable. This means that other objects can observe the suppression object. If the suppression object changes, all of the observers are notified and can take whatever action they have registered. In addition to being observable, the suppression object keeps a history of itself. This supports do/undo functionality (that alerts observers).
#
# The cell above created that object with a custom distance function. The cells below alter $k$, the desired number of points, and $k_{error}$, the acceptable percentage of error in $k$. These changes are then rolled back and forth.
#
# The plotting calls remain the same for all of these example, only the first line of each is altered.
#
# --------
# $k=10$ and $k_{error}$ defaults to 10%
#
# *Take note of the bad point, in the left image, that has made it through the ratio, symmetry, and fundamental matrix computation tests.*
# +
#cg.edge[0][1].suppress(clean_keys=['fundamental'], func=distance, k=10)
# Plot, in blue the points that passed all outlier detectors so far
#cg.edge[0][1].plot(clean_keys=['fundamental'], line_kwargs={'linewidth':0})
# Overlay, in red, the points that remain after suppression
#cg.edge[0][1].plot(clean_keys=['suppression'], line_kwargs={'linewidth':0}, scatter_kwargs={'color':'red'})
# -
# $k = 50$
# +
#cg.edge[0][1].suppress(clean_keys=['fundamental'], func=distance, k=50)
# Plot, in blue the points that passed all outlier detectors so far
#cg.edge[0][1].plot(clean_keys=['fundamental'], line_kwargs={'linewidth':0})
# Overlay, in red, the points that remain after suppression
#cg.edge[0][1].plot(clean_keys=['suppression'], line_kwargs={'linewidth':0}, scatter_kwargs={'color':'red'})
# -
# $k=100$ and $k_{error} = 25%$
# +
#cg.edge[0][1].suppress(clean_keys=['fundamental'], func=distance, k=100, k_error=0.25)
# Plot, in blue the points that passed all outlier detectors so far
#cg.edge[0][1].plot(clean_keys=['fundamental'], line_kwargs={'linewidth':0})
# Overlay, in red, the points that remain after suppression
#cg.edge[0][1].plot(clean_keys=['suppression'], line_kwargs={'linewidth':0}, scatter_kwargs={'color':'red'})
# -
# Using the suppression object we can access some attributes to see how many valid points.
# +
#cg.edge[0][1].suppression.nvalid
# -
# ## Rollback
# Now we will undo that last change. Perhaps the error was just too high (it was not near 25% in this case, but imagine it was).
# +
#cg.edge[0][1].suppression.rollback()
# Plot, in blue the points that passed all outlier detectors so far
#cg.edge[0][1].plot(clean_keys=['fundamental'], line_kwargs={'linewidth':0})
# Overlay, in red, the points that remain after suppression
#cg.edge[0][1].plot(clean_keys=['suppression'], line_kwargs={'linewidth':0}, scatter_kwargs={'color':'red'})
# -
|
notebooks/Suppression vis Disk Covering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# %matplotlib inline
import pylab as plt
import numpy as np
# # Distance from high school to college campuses
#
# **Use: `easy_install googlemaps` to prepare required Python dependencies. (pip doesn't work apparently...)**
#
# **Please note, this notebook is not finished yet. We are still trying to fix the issues we encountered while using the Google Maps API. This notebook is meant to show our work and progress**.
#
# While exploring our dataset we realized we need additional data to perform meaningful yield prediction. One of the features we think might be meaningful in predicting admission yield is the distance from a student's high school to the college campus.
#
# We will user the google maps API to find the locations of high schools and the distance from the schools to each UC campus.
# +
import googlemaps
# Key left as a courtesy to the instructor.
# key = '<KEY>' #Michal's
# key = '<KEY>' #Nick's
key = '<KEY>' #Nelson's
gmaps = googlemaps.Client(key=key)
# -
# ## Finding location and distance using Google Maps API
#
# Next, we will define functions to find the following for each high school/UC campus combo:
# - Location of the high school
# - Distance between the high school and the campus
# +
import time
def get_distance(campus_abbr, school_strings):
if isinstance(school_strings, str):
school_strings = [school_strings]
if campus_abbr == 'Universitywide':
raise ValueError("Can't get the distance to the entire university system")
campus_str = 'University of California, {}'.format(campus_abbr)
#theres a max of 25 destinations per request so split them up
N = 25
chunks = [school_strings[i:i+N] for i in range(0, len(school_strings), N)]
results = []
for c in chunks:
time.sleep(1) #ensure we dont go over 100 elements/sec limit
try:
response = gmaps.distance_matrix(origins=campus_str, destinations=c)
by_hs = response['rows'][0]['elements']
except Exception as e:
raise RuntimeError("API timeout")
for entry in by_hs:
if 'distance' in entry:
results.append(entry['distance']['value'])
else:
# google maps couldnt look up that distance
results.append(np.nan)
return results
def get_school_loc_str(df):
loc = df['school'].values.copy()
loc += np.where(df['city'].notnull(), ', '+df['city'], '' )
loc += np.where(df['state'].notnull(), ', '+df['state'], '' )
loc += np.where(df['country'].notnull(), ', '+df['country'], '' )
return loc
# -
get_distance("Berkeley", ["ABRAHAM LINCOLN HIGH SCHOOL, Los Angeles", "LAWRENCEVILLE SCHOOL, Lawrenceville, New jersey"])
# Next, we will use the above functions on our main dataset.
data = pd.read_csv('../data/processed.csv')
data
# ## Issues
#
# We have encountered multiple issues while trying to collec our location data. The main problems were:
#
# - The Google Maps API only allows for a small number of API calls per day
# - The API crashed repeatedly
#
# Below, one can find different attempts we made to query the API and deduplicate the location results.
no_dups = data[data['ethnicity'] == 'All']
no_dups = data.drop_duplicates(subset=['campus', 'school_num'])
no_dups['school_loc_str'] = get_school_loc_str(no_dups)
no_dups = no_dups[no_dups['campus'] != 'Univeristywide']
no_dups = no_dups[no_dups['state'] == 'California']
no_dups.head()
# +
# no_dups[no_dups['school_num'] == '']
# -
# ## Saving results
#
# Unfortunately, the googlemaps API crashed repeatedly. We decided, to save the results in a persistent dictionary. This way we were able to save results even if our API calls crashed.
def load_distances():
import json
with open('../data/distances.json') as fp:
return json.load(fp)
gb = no_dups.groupby('campus')
to_be_done = gb.groups.keys() - {'Universitywide'}
campus_distances = load_distances()
print(campus_distances)
# +
# for campus in to_be_done:
# group = gb.get_group(campus)
# found_distances = campus_distances[campus].keys()
# not_found_schools = ~group['school_num'].isin(found_distances)
# not_found = group[not_found_schools]
# to_do = not_found[:100]
# print("getting the distance from UC " + campus + " to " + str(len(to_do)) + " schools out of " + str(len(not_found)))
# schools = to_do['school_loc_str'].values
# distances = get_distance(campus, schools)
# new_distances = dict( zip(to_do['school_num'], distances) )
# campus_distances[campus].update(new_distances)
# print('saving...')
# import json
# with open('../data/distances.json', 'w') as fp:
# json.dump(campus_distances, fp)
# print("DONE")
# -
for campus, dict_ in campus_distances.items():
print(campus, len(dict_))
for i, (school_id, distance) in enumerate(dict_.items()):
if i == 5:
print('...')
break
print(school_id, distance)
# if distance is np.nan:
# print(no_dups[no_dups['school_num'] == school_id])
# ## Add the distance data to our dataframe
# +
# campuses = [campus for school in dict_ for dict_ in distances]
# -
final_data = data.copy()
final_data['distance'] = np.nan #fill with NaNs to start
final_data
final_data.describe().T
# final_data[final_data['school_num'].isnull()]
campus_distances = load_distances()
for campus, dict_ in campus_distances.items():
print(campus, len(dict_))
# group = final_data.loc[final_data['campus']==campus]
# print(group)
# ids, distances = zip(*dict_.items())
# ids = [str(e) for e in ids]
# distances = [float(e) for e in distances]
# print(ids)
# print(distances)
campus_matches = final_data['campus']==campus
for i, (num, dist) in enumerate(dict_.items()):
print("\r{}/{}".format(i, len(dict_)), end='', flush=True)
# if i >= 200:
# break
school_matches = final_data['school_num']==int(num)
# print(campus_matches & school_matches)
# display(data[campus_matches & school_matches])
# display(final_data[campus_matches&school_matches])
# print(group.loc[group['school_num']==int(num)])
final_data.loc[school_matches & campus_matches, 'distance'] = dist
print()
# group.loc[group['school_num']==ids]['distances'] = distances
# break
final_data.describe().T
final_data
final_data.to_csv('../data/distances.csv', sep=',', index=False)
final = final_data[final_data['campus'] != 'Univeristywide']
final = final[final['state'] == 'California']
final = final[final['ethnicity'] == 'All']
final = final[final['distance'].notnull()]
final['desc'] = get_school_loc_str(final)
final
final.describe()
final['desc'].values
|
experiments/maps.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data_Retrieval_and_Plotting
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
import openweathermapy as owm
from scipy.stats import linregress
from datetime import datetime
# Import API keys
from api_keys import (weather_api_key, g_key)
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "../Output/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
#To display all the output in a cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
#Create a seed --> For testing
np.random.seed(1000)
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1600)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1600)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it's being processed (with the city number and city name).
#
# +
#Create a placeholder DF for the extracted data from API calls
weather_DF = pd.DataFrame(columns=['City','Lat', 'Lng', 'Max Temp', 'Humidity', 'Cloudiness', 'Wind Speed', 'Country', 'Date'])
#Data to get extracted
summary = ['name', 'coord.lat', 'coord.lon', 'main.temp_max', 'main.humidity', 'clouds.all', 'wind.speed', 'sys.country', 'dt']
#Parms to pass to the API call
params = {'units': 'imperial',
'appid' : weather_api_key}
#Iteratively call openweathermap api using python wrapper
print("Beginning Data Retrieval\n\
-----------------------------")
count=0 #Successful queries
for index, city in enumerate(cities):
try:
result = owm.get_current(city,**params)
weather_DF.loc[count] = result(*summary)
print(f"Processed Record {index} | {city}")
count+=1
except:
print(f"Record {index}: City {city} not found. Skipping...")
time.sleep(1) #1 sec delay between API calls
print("-----------------------------\n\
Data Retrieval Complete\n\
-----------------------------")
# -
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
weather_DF.info()
weather_DF.to_csv('../Output/cities_weather.csv', index=False, encoding='utf-8')
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
weather_DF[['Lat', 'Lng', 'Max Temp', 'Humidity', 'Cloudiness',
'Wind Speed']] = weather_DF[['Lat', 'Lng', 'Max Temp', 'Humidity', 'Cloudiness',
'Wind Speed']].astype('float')
weather_DF.describe()
# +
# Get the indices of cities that have humidity over 100%.
#remove the cities where the humidity > 100%
indices_above_100 = weather_DF[weather_DF['Humidity']>100].index
indices_above_100
# -
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
clean_city_data = weather_DF.drop(indices_above_100, axis=0) #By default "inplace=False"
# +
# Extract relevant fields from the data frame
#Keeping ['City', 'Lat', 'Lng', 'Max Temp', 'Humidity', 'Cloudiness','Wind Speed', 'Country']
clean_city_data = clean_city_data[['City', 'Country', 'Lat', 'Lng', 'Max Temp', 'Humidity', 'Cloudiness','Wind Speed']]
# Export the City_Data into a csv
clean_city_data.to_csv('../Output/cleaned_cities_weather.csv', index=False, encoding='utf-8')
# -
clean_city_data.head()
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# %matplotlib inline
from matplotlib import style
plt.style.available
plt.style.use('seaborn')
#The date of retrieving the data
utc_date = weather_DF['Date'].map(lambda x: datetime.utcfromtimestamp(int(x)).strftime('%d/%m/%Y')).unique()[0]
# ## Latitude vs. Temperature Plot
# +
fig, ax1 = plt.subplots(figsize=(8,8))
_ = clean_city_data.plot(x='Lat',
y='Max Temp',
kind='scatter',
grid=True,
edgecolor='k',
s=30,
fontsize=18,
ylim=(min(clean_city_data['Max Temp'])-5, max(clean_city_data['Max Temp'])+5),
ax=ax1
)
_=ax1.set_xlabel('Latitude', fontsize=18)
_=ax1.set_facecolor('lightcoral')
_=ax1.set_ylabel('Max Temperature $^\circ$(F)', fontsize=18)
_=ax1.set_title(f"City Latitude vs. Max Temperature ({utc_date})", fontsize=20)
plt.tight_layout()
plt.savefig('../Resources/assets/images/plot1.png',facecolor='white',edgecolor='white', bbox_inches='tight')
# -
# ## Latitude vs. Humidity Plot
# +
fig, ax2 = plt.subplots(figsize=(8,8))
ax2 = clean_city_data.plot(x='Lat',
y='Humidity',
kind='scatter',
grid=True,
edgecolor='k',
s=30,
ylim=(min(clean_city_data['Humidity'])-5, max(clean_city_data['Humidity'])+5),
fontsize=18,
ax=ax2
)
_=ax2.set_xlabel('Latitude', fontsize=18)
_=ax2.set_ylabel('Humidity (%)', fontsize=18)
_=ax2.set_facecolor('lightcoral')
_=ax2.set_title(f"City Latitude vs. Humidity ({utc_date})", fontsize=20)
plt.tight_layout()
plt.savefig('../Resources/assets/images/plot2.png',facecolor='white',edgecolor='white', bbox_inches='tight')
# -
# ## Latitude vs. Cloudiness Plot
# +
fig, ax3 = plt.subplots(figsize=(8,8))
_ = clean_city_data.plot(x='Lat',
y='Cloudiness',
kind='scatter',
grid=True,
edgecolor='k',
s=30,
ylim=(min(clean_city_data['Cloudiness'])-5, max(clean_city_data['Cloudiness'])+5),
fontsize=18,
ax=ax3
)
_=ax3.set_xlabel('Latitude', fontsize=18)
_=ax3.set_ylabel('Cloudiness (%)', fontsize=18)
_=ax3.set_title(f"City Latitude vs. Cloudiness ({utc_date})", fontsize=20)
_=ax3.set_facecolor('lightcoral')
plt.tight_layout()
plt.savefig('../Resources/assets/images/plot3.png',facecolor='white',edgecolor='white', bbox_inches='tight')
# -
# ## Latitude vs. Wind Speed Plot
# +
fig, ax4 = plt.subplots(figsize=(8,8))
_ = clean_city_data.plot(x='Lat',
y='Wind Speed',
kind='scatter',
grid=True,
edgecolor='k',
s=30,
ylim=(min(clean_city_data['Wind Speed'])-2, max(clean_city_data['Wind Speed'])+2),
fontsize=18,
ax=ax4
)
_=ax4.set_xlabel('Latitude', fontsize=18)
_=ax4.set_ylabel('Wind Speed (mph)', fontsize=18)
_=ax4.set_title(f"City Latitude vs. Wind Speed ({utc_date})", fontsize=20)
_=ax4.set_facecolor('lightcoral')
plt.tight_layout()
plt.savefig('../Resources/assets/images/plot4.png', facecolor='white',edgecolor='white', bbox_inches='tight')
# -
# ## Linear Regression
# +
# OPTIONAL: Create a function to create Linear Regression plots
def linregress_plots(DF, xl, yl, xlabel='Latitude', ylabel='', title='', figname='plot.png'):
m, c, r, p, _ = linregress(DF[xl], DF[yl])
print(f"The r-squared is: {r**2}")
#Create a new figure
fig, ax =plt.subplots(figsize=(6,6))
#Scatter plot
_ = DF.plot(x=xl,
y=yl,
kind='scatter',
s=30,
title=title,
ylim = (min(DF[yl])-5, max(DF[yl]+15)),
ax=ax
)
_=ax.set_xlabel(xlabel)
_=ax.set_ylabel(ylabel)
#Regression Line
y=m*DF[xl] + c
_=ax.plot(DF[xl], y, 'purple', linewidth=2)
pos=((0.15, 0.2) if m<=-0.4 else ((0.15, 0.75) if m>0.4 else (0.5, 0.85))) #Annotate position
#A way to dynamically finds the number of decimal positions if there is avery small value Eg:- 0.000000067
#We don't want to denote it as 0.00
val = m*100
digits = 2
while int(val)==0:
val*=10
digits+=1
s = "{:."+f"{digits}"+"f}"
format_string = "y = "+s+"x + {:.2f}"
linear_eqn = format_string.format(m, c)
_=ax.annotate(linear_eqn,
xy=pos, xycoords='figure fraction', fontsize=15, color='purple')
_=ax.set_facecolor('#FFD1C1')
plt.tight_layout()
plt.savefig(f"../Resources/assets/images/{figname}",facecolor='white',edgecolor='white', bbox_inches='tight')
_=plt.show()
return(r, p)
#This function returns the r value, and p value
#r value: Pearson Correlation Coefficient
#p value: is a measure of the significance of the gradient. If p value is < 0.01 (Significance level),
#it means that, we cannot independent variable affects dependant variable
# +
# Create Northern and Southern Hemisphere DataFrames
NHS = clean_city_data[clean_city_data['Lat']>=0]#Northern Hemisphere (Keeping equator part of NHS; tie breaking)
SHS = clean_city_data[clean_city_data['Lat']<0]#Southern Hemisphere
NHS.info()
SHS.info()
# -
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
linregress_plots(NHS, 'Lat', 'Max Temp', ylabel='Max Temperature (F)', title=f"Northern Hemi Sphere: \nCity Latitude vs. Max Temperature ({utc_date})", figname='plot5.png')
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
linregress_plots(SHS, 'Lat', 'Max Temp', ylabel='Max Temperature (F)', title=f"Southern Hemi Sphere: \nCity Latitude vs. Max Temperature ({utc_date})", figname='plot6.png')
# * Temperature depends on the distance from equator.
# * Please observe the p value of the linear regression estimator << 0. This means that slope is NOT zero
# * In both hemispheres, a high correlation between latitude and temperature
# * We can observe a pattern in scatter plot also
# * As we move towards equator, temperature increases in both sides of the hemisphere
# * From the data, it looks like, temperatures at cities equidistant from equator in both the sides might not be same.
# * For instance,
# * At latitude +30, temperature is approximated as -0.45*30+89.53=76.03 F
# * At latitude -30, temperature is approximated as 0.75*-30+78.58 = 56.08F.
# * This is because, most of the northern hemisphere is land and most of the southern hemisphere is ocean and ocean is likely to be colder
#
#
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
linregress_plots(NHS, 'Lat', 'Humidity', ylabel='Humidity (%)', title=f"Northern Hemi Sphere: \nCity Latitude vs. Humidity ({utc_date})", figname='plot7.png')
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
linregress_plots(SHS, 'Lat', 'Humidity', ylabel='Humidity (%)', title=f"Southern Hemi Sphere: \nCity Latitude vs. Humidity ({utc_date})", figname='plot8.png')
# * Humidity(%) doesn't correlate with the distance from equator.
# * Please observe that p value of the linear regression estimator >> 0 (>significance level(typically 0.05)). This means that WE CANNOT say that slope is NOT zero.
# * In both hemispheres, a near to ZERO correlation between latitude and humidity.
# * No pattern in scatter plot.
# * At the time of data is taken, humidity is centred around almost the same value in both hemispheres.
# * In northern hemisphere, most of the cities are having humidity around 72%.
# * In southern hemisphere, most of the cities are having humidity around 70%.
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
linregress_plots(NHS, 'Lat', 'Cloudiness', ylabel='Cloudiness (%)', title=f"Northern Hemi Sphere: \nCity Latitude vs. Cloudiness ({utc_date})", figname='plot9.png')
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
linregress_plots(SHS, 'Lat', 'Cloudiness', ylabel='Cloudiness (%)', title=f"Southern Hemi Sphere: \nCity Latitude vs. Cloudiness ({utc_date})", figname='plot10.png')
# * Cloudiness(%) doesn't correlate with the distance from equator.
# * Please observe that p value of the linear regression estimator > significance level (typically 0.05). This means that WE CANNOT say that slope is NOT zero.
# * In both hemispheres, a weak correlation between latitude and cloudiness.
# * No pattern in scatter plot.
# * Cloudiness is centered around different values in both hemispheres.
# * Northern hemisphere has average cloudiness around 54%.
# * Southern hemisphere has average cloudiness around 46%.
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
linregress_plots(NHS, 'Lat', 'Wind Speed', ylabel='Wind Speed (mph)', title=f"Northern Hemi Sphere: \nCity Latitude vs. Wind Speed ({utc_date})", figname='plot11.png')
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
linregress_plots(SHS, 'Lat', 'Wind Speed', ylabel='Wind Speed (mph)', title=f"Southern Hemi Sphere: \nCity Latitude vs. Wind Speed ({utc_date})", figname='plot12.png')
# * Windspeed doesn't correlate with the distance from equator.
# * Please observe that p value of the linear regression estimator > significance level (typically 0.05).
# This means that WE CANNOT say that slope is NOT zero.
# * In both hemispheres, a weak correlation between latitude and Windspeed.
# * No pattern in scatter plot.
# * At the time of data is taken, windspeed is centred around almost the same value in both hemispheres.
# * Northern hemisphere has average windspeed around 6.9 mph.
# * Southern hemisphere has average windspeed around 7.1 mph.
# ## Convert DF to HTML
clean_city_data.to_html('../Resources/assets/tables/data.html')
|
Code/Data_Retrieval_and_Plotting.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Scala
// language: scala
// name: scala
// ---
// # Bubble Sort
def bubbleSort[A](a: Array[A])(implicit ord: Ordering[A]): Unit = {
for (i <- 0 to a.length - 2) {
for (j <- a.length - 1 to i + 1 by -1) {
if (ord.lt(a(j), a(j - 1))) {
val t = a(j)
a(j) = a(j - 1)
a(j - 1) = t
}
}
}
}
def bubbleSorted[A](a: Array[A])(implicit ord: Ordering[A]): Array[A] = {
val ret = a.clone
bubbleSort(ret)(ord)
ret
}
bubbleSorted(Array(31, 41, 59, 26, 41, 58))
def desc[T : Ordering] = implicitly[Ordering[T]].reverse
bubbleSorted(Array(31, 41, 59, 26, 41, 58))(desc)
|
Getting Started/Bubble Sort.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (cvxpy)
# language: python
# name: cvxpy
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Метод сопряжённых градиентов (Conjugate gradient method)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Система линейных уравнений vs. задача безусловной минимизации
# Рассмотрим задачу
#
# $$
# \min_{x \in \mathbb{R}^n} \frac{1}{2}x^{\top}Ax - b^{\top}x,
# $$
#
# где $A \in \mathbb{S}^n_{++}$.
# Из необходимого условия экстремума имеем
#
# $$
# Ax^* = b
# $$
#
# Также обозначим $f'(x_k) = Ax_k - b = r_k$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Как решить систему $Ax = b$?
#
# - Прямые методы основаны на матричных разложениях:
# - Плотная матрица $A$: для размерностей не больше нескольких тысяч
# - Разреженная (sparse) матрица $A$: для размерностей порядка $10^4 - 10^5$
# - Итерационные методы: хороши во многих случаях, единственный подход для задач с размерностью $ > 10^6$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Немного истории...
#
# <NAME> и <NAME> предложили *метод сопряжённых градиентов* для решения систем линейных уравнений в 1952 году как **прямой** метод.
#
# Также долгое время считалось, что метод представляет только теоретический интерес поскольку
# - метод сопряжённых градиентов не работает на логарифмической линейке
# - метод сопряжённых градиентов имеет небольшое преимущество перед исключением Гаусса при вычислениях на калькуляторе
# - для вычислений на "human computers" слишком много обменов данными
#
# <img src="./human_computer.jpeg">
#
# Метод сопряжённых градиентов необходимо рассматривать как **итерационный метод**, то есть останавливаться до точной сходимости!
#
# Подробнее [здесь](https://www.siam.org/meetings/la09/talks/oleary.pdf)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Метод сопряжённых направлений
#
# В градиентном спуске направления убывания - анти-градиенты, но для функций с плохо обусловленным гессианом сходимость **медленная**.
#
# **Идея:** двигаться вдоль направлений, которые гарантируют сходимость за $n$ шагов.
#
# **Определение.** Множество ненулевых векторов $\{p_0, \ldots, p_l\}$ называется *сопряжённым* относительно матрицы $A \in \mathbb{S}^n_{++}$, если
#
# $$
# p^{\top}_iAp_j = 0, \qquad i \neq j
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# **Утверждение.** Для любой $x_0 \in \mathbb{R}^n$ последовательность $\{x_k\}$, генерируемая методом сопряжённых направлений, сходится к решению системы $Ax = b$ максимум за $n$ шагов.
#
# ```python
# def ConjugateDirections(x0, A, b, p):
#
# x = x0
#
# r = A.dot(x) - b
#
# for i in range(len(p)):
#
# alpha = - (r.dot(p[i])) / (p[i].dot(A.dot(p[i])))
#
# x = x + alpha * p[i]
#
# r = A.dot(x) - b
#
# return x
#
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ### Примеры сопряжённых направлений
#
# - Собственные векторы матрицы $A$
# - Для любого набора из $n$ векторов можно провести аналог ортогонализации Грама-Шмидта и получить сопряжённые направления
#
# **Вопрос:** что такое ортогонализация Грама-Шмидта? :)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Геометрическая интерпретация (Mathematics Stack Exchange)
#
# <center><img src="./cg.png" ></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Метод сопряжённых градиентов
#
# **Идея:** новое направление $p_k$ ищется в виде $p_k = -r_k + \beta_k p_{k-1}$, где $\beta_k$ выбирается, исходя из требования сопряжённости $p_k$ и $p_{k-1}$:
#
# $$
# \beta_k = \dfrac{p^{\top}_{k-1}Ar_k}{p^{\top}_{k-1}Ap_{k-1}}
# $$
#
# Таким образом, для получения следующего сопряжённого направления $p_k$ необходимо хранить только сопряжённое направление $p_{k-1}$ и остаток $r_k$ с предыдущей итерации.
#
# **Вопрос:** как находить размер шага $\alpha_k$?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Сопряжённость сопряжённых градиентов
#
# **Теорема**
# Пусть после $k$ итераций $x_k \neq x^*$. Тогда
#
# - $\langle r_k, r_i \rangle = 0, \; i = 1, \ldots k - 1$
# - $\mathtt{span}(r_0, \ldots, r_k) = \mathtt{span}(r_0, Ar_0, \ldots, A^kr_0)$
# - $\mathtt{span}(p_0, \ldots, p_k) = \mathtt{span}(r_0, Ar_0, \ldots, A^kr_0)$
# - $p_k^{\top}Ap_i = 0$, $i = 1,\ldots,k-1$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Теоремы сходимости
#
# **Теорема 1.** Если матрица $A$ имеет только $r$ различных собственных значений, то метод сопряжённых градиентов cойдётся за $r$ итераций.
#
# **Теорема 2.** Имеет место следующая оценка сходимости
#
# $$
# \| x_{k} - x^* \|_A \leq 2\left( \dfrac{\sqrt{\kappa(A)} - 1}{\sqrt{\kappa(A)} + 1} \right)^k \|x_0 - x^*\|_A,
# $$
#
# где $\|x\|_A = x^{\top}Ax$ и $\kappa(A) = \frac{\lambda_1(A)}{\lambda_n(A)}$ - число обусловленности матрицы $A$, $\lambda_1(A) \geq ... \geq \lambda_n(A)$ - собственные значения матрицы $A$
#
# **Замечание:** сравните коэффициент геометрической прогрессии с аналогом в градиентном спуске.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Интерпретации метода сопряжённых градиентов
#
# - Градиентный спуск в пространстве $y = Sx$, где $S = [p_0, \ldots, p_n]$, в котором матрица $A$ становится диагональной (или единичной в случае ортонормированности сопряжённых направлений)
# - Поиск оптимального решения в [Крыловском подпространстве](https://stanford.edu/class/ee364b/lectures/conj_grad_slides.pdf) $\mathcal{K}_k(A) = \{b, Ab, A^2b, \ldots A^{k-1}b\}$
#
# $$
# x_k = \arg\min_{x \in \mathcal{K}_k} f(x)
# $$
#
# - Однако естественный базис Крыловского пространства неортогональный и, более того, **плохо обусловлен**.
#
# **Упражнение** Проверьте численно, насколько быстро растёт обусловленность матрицы из векторов $\{b, Ab, ... \}$
#
# - Поэтому его необходимо ортогонализовать, что и происходит в методе сопряжённых градиентов
# + [markdown] slideshow={"slide_type": "slide"}
# ### Основное свойство
# $$
# A^{-1}b \in \mathcal{K}_n(A)
# $$
#
# Доказательство
#
# - <NAME>: $p(A) = 0$, где $p(\lambda) = \det(A - \lambda I)$
# - $p(A)b = A^nb + a_1A^{n-1}b + \ldots + a_{n-1}Ab + a_n b = 0$
# - $A^{-1}p(A)b = A^{n-1}b + a_1A^{n-2}b + \ldots + a_{n-1}b + a_nA^{-1}b = 0$
# - $A^{-1}b = -\frac{1}{a_n}(A^{n-1}b + a_1A^{n-2}b + \ldots + a_{n-1}b)$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Сходимость по функции и по аргументу
#
# - Решение: $x^* = A^{-1}b$
# - Минимум функции:
#
# $$
# f^* = \frac{1}{2}b^{\top}A^{-\top}AA^{-1}b - b^{\top}A^{-1}b = -\frac{1}{2}b^{\top}A^{-1}b = -\frac{1}{2}\|x^*\|^2_A
# $$
#
# - Оценка сходимости по функции:
#
# $$
# f(x) - f^* = \frac{1}{2}x^{\top}Ax - b^{\top}x + \frac{1}{2}\|x^*\|_A^2 =\frac{1}{2}\|x\|_A^2 - x^{\top}Ax^* + \frac{1}{2}\|x^*\|_A^2 = \frac{1}{2}\|x - x^*\|_A^2
# $$
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Доказательство сходимости
#
# - $x_k$ лежит в $\mathcal{K}_k$
# - $x_k = \sum\limits_{i=1}^k c_i A^{i-1}b = p(A)b$, где $p(x)$ некоторый полином степени не выше $k-1$
# - $x_k$ минимизирует $f$ на $\mathcal{K}_k$, отсюда
#
# $$
# 2(f_k - f^*) = \inf_{x \in \mathcal{K}_k} \|x - x^* \|^2_A = \inf_{\mathrm{deg}(p) < k} \|(p(A) - A^{-1})b\|^2_A
# $$
#
# - Спектральное разложение $A = U\Lambda U^*$ даёт
#
# $$
# 2(f_k - f^*) = \inf_{\mathrm{deg}(p) < k} \|(p(\Lambda) - \Lambda^{-1})d\|^2_{\Lambda} = \inf_{\mathrm{deg}(p) < k} \sum_{i=1}^n\frac{d_i^2 (\lambda_ip(\lambda_i) - 1)^2}{\lambda_i} = \inf_{\mathrm{deg}(q) \leq k, q(0) = 1} \sum_{i=1}^n\frac{d_i^2 q(\lambda_i)^2}{\lambda_i}
# $$
#
# - Сведём задачу к поиску некоторого многочлена
# $$
# f_k - f^* \leq \left(\sum_{i=1}^n \frac{d_i^2}{2\lambda_i}\right) \inf_{\mathrm{deg}(q) \leq k, q(0) = 1}\left(\max_{i=1,\ldots,n} q(\lambda_i)^2 \right) = \frac{1}{2}\|x^*\|^2_A \inf_{\mathrm{deg}(q) \leq k, q(0) = 1}\left(\max_{i=1,\ldots,n} q(\lambda_i)^2 \right)
# $$
#
# - Пусть $A$ имеет $m$ различных собственных значений, тогда для
#
# $$
# r(y) = \frac{(-1)^m}{\lambda_1 \cdot \ldots \cdot \lambda_m}(y - \lambda_i)\cdot \ldots \cdot (y - \lambda_m)
# $$
#
# выполнено $\mathrm{deg}(r) = m$ и $r(0) = 1$
# - Значение для оптимального полинома степени не выше $k$ оценим сверху значением для полинома $r$ степени $m$
#
# $$
# 0 \leq f_k - f^* \leq \frac{1}{2}\|x^*\|_A^2 \max_{i=1,\ldots,m} r(\lambda_i) = 0
# $$
# - Метод сопряжённых градиентов сошёлся за $m$ итераций
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Улучшенная версия метода сопряжённых градиентов
# На практике используются следующие формулы для шага $\alpha_k$ и коэффициента $\beta_{k}$:
#
# $$
# \alpha_k = \dfrac{r^{\top}_k r_k}{p^{\top}_{k}Ap_{k}} \qquad \beta_k = \dfrac{r^{\top}_k r_k}{r^{\top}_{k-1} r_{k-1}}
# $$
#
# **Вопрос:** чем они лучше базовой версии?
# + [markdown] slideshow={"slide_type": "slide"}
# ### Псевдокод метода сопряжённых градиентов
# ```python
# def ConjugateGradientQuadratic(x0, A, b, eps):
#
# r = A.dot(x0) - b
#
# p = -r
#
# while np.linalg.norm(r) > eps:
#
# alpha = r.dot(r) / p.dot(A.dot(p))
#
# x = x + alpha * p
#
# r_next = r + alpha * A.dot(p)
#
# beta = r_next.dot(r_next) / r.dot(r)
#
# p = -r_next + beta * p
#
# r = r_next
#
# return x
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ## Метод сопряжённых градиентов для неквадратичной функции
# **Идея:** использовать градиенты $f'(x_k)$ неквадратичной функции вместо остатков $r_k$ и линейный поиск шага $\alpha_k$ вместо аналитического вычисления. Получим метод Флетчера-Ривса.
#
# ```python
# def ConjugateGradientFR(f, gradf, x0, eps):
#
# x = x0
#
# grad = gradf(x)
#
# p = -grad
#
# while np.linalg.norm(gradf(x)) > eps:
#
# alpha = StepSearch(x, f, gradf, **kwargs)
#
# x = x + alpha * p
#
# grad_next = gradf(x)
#
# beta = grad_next.dot(grad_next) / grad.dot(grad)
#
# p = -grad_next + beta * p
#
# grad = grad_next
#
# if restart_condition:
#
# p = -gradf(x)
#
# return x
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ### Теорема сходимости
#
# **Теорема.** Пусть
# - множество уровней $\mathcal{L}$ ограничено
# - существует $\gamma > 0$: $\| f'(x) \|_2 \leq \gamma$ для $x \in \mathcal{L}$
# Тогда
#
# $$
# \lim_{j \to \infty} \| f'(x_{k_j}) \|_2 = 0
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Перезапуск (restart)
#
# 1. Для ускорения метода сопряжённых градиентов используют технику перезапусков: удаление ранее накопленной истории и перезапуск метода с текущей точки, как будто это точка $x_0$
# 2. Существуют разные условия, сигнализирующие о том, что надо делать перезапуск, например
# - $k = n$
# - $\dfrac{|\langle f'(x_k), f'(x_{k-1}) \rangle |}{\| f'(x_k) \|_2^2} \geq \nu \approx 0.1$
# 3. Можно показать (см. Nocedal, Wright Numerical Optimization, Ch. 5, p. 125), что запуск метода Флетчера-Ривза без использования перезапусков на некоторых итерациях может приводить к крайне медленной сходимости!
# 4. Метод Полака-Рибьера и его модификации лишены подобного недостатка.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Комментарии
# - Замечательная методичка "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" размещена [тут](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)
# - Помимо метода Флетчера-Ривса существуют другие способы вычисления $\beta_k$: метод Полака-Рибьера, метод Хестенса-Штифеля...
# - Для метода сопряжённых градиентов требуется 4 вектора: каких?
# - Самой дорогой операцией является умножение матрицы на вектор
# + [markdown] slideshow={"slide_type": "slide"}
# ## Эксперименты
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Квадратичная целевая функция
# + slideshow={"slide_type": "fragment"}
import numpy as np
n = 100
# Random
A = np.random.randn(n, n)
# A = A.T.dot(A)
# Clustered eigenvalues
A = np.diagflat([np.ones(n//4), 10 * np.ones(n//4), 100*np.ones(n//4), 1000* np.ones(n//4)])
U = np.random.rand(n, n)
Q, _ = np.linalg.qr(U)
A = Q.dot(A).dot(Q.T)
# A = (A + A.T) * 0.5
print("A is normal matrix: ||AA* - A*A|| =", np.linalg.norm(A.dot(A.T) - A.T.dot(A)))
b = np.random.randn(n)
# Hilbert matrix
# A = np.array([[1.0 / (i+j - 1) for i in range(1, n+1)] for j in range(1, n+1)]) + 1e-3 * np.eye(n)
# b = np.ones(n)
f = lambda x: 0.5 * x.dot(A.dot(x)) - b.dot(x)
grad_f = lambda x: A.dot(x) - b
x0 = np.zeros(n)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Распределение собственных значений
# + slideshow={"slide_type": "fragment"}
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rc("text", usetex=True)
plt.rc("font", family='serif')
import seaborn as sns
sns.set_context("talk")
eigs = np.linalg.eigvalsh(A)
cond_A = np.linalg.cond(A)
print((np.sqrt(cond_A) - 1) / (np.sqrt(cond_A) + 1))
print((cond_A - 1) / (cond_A + 1))
plt.plot(np.unique(eigs))
plt.yscale("log")
plt.ylabel("Eigenvalues", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Правильный ответ
# + slideshow={"slide_type": "fragment"}
import scipy.optimize as scopt
def callback(x, array):
array.append(x)
# + slideshow={"slide_type": "fragment"}
scopt_cg_array = []
scopt_cg_callback = lambda x: callback(x, scopt_cg_array)
x = scopt.minimize(f, x0, method="CG", jac=grad_f, callback=scopt_cg_callback)
x = x.x
print("||f'(x*)|| =", np.linalg.norm(A.dot(x) - b))
print("f* =", f(x))
# + [markdown] slideshow={"slide_type": "slide"}
# #### Реализация метода сопряжённых градиентов
# + slideshow={"slide_type": "fragment"}
def ConjugateGradientQuadratic(x0, A, b, tol=1e-8, callback=None):
x = x0
r = A.dot(x0) - b
p = -r
while np.linalg.norm(r) > tol:
alpha = r.dot(r) / p.dot(A.dot(p))
x = x + alpha * p
if callback is not None:
callback(x)
r_next = r + alpha * A.dot(p)
beta = r_next.dot(r_next) / r.dot(r)
p = -r_next + beta * p
r = r_next
return x
# + slideshow={"slide_type": "slide"}
import liboptpy.unconstr_solvers as methods
import liboptpy.step_size as ss
print("\t CG quadratic")
cg_quad = methods.fo.ConjugateGradientQuad(A, b)
x_cg = cg_quad.solve(x0, max_iter=1000, tol=1e-7, disp=True)
print("\t Gradient Descent")
gd = methods.fo.GradientDescent(f, grad_f, ss.ExactLineSearch4Quad(A, b))
x_gd = gd.solve(x0, tol=1e-7, max_iter=1000, disp=True)
print("Condition number of A =", abs(max(eigs)) / abs(min(eigs)))
# + [markdown] slideshow={"slide_type": "slide"}
# #### График сходимости
# + slideshow={"slide_type": "fragment"}
plt.figure(figsize=(8,6))
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()], label=r"$\|f'(x_k)\|^{CG}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array[:5000]], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2)
# plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2)
plt.legend(loc="best", fontsize=20)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Convergence rate", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# + slideshow={"slide_type": "fragment"}
print([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()])
# + slideshow={"slide_type": "slide"}
plt.figure(figsize=(8,6))
plt.plot([f(x) for x in cg_quad.get_convergence()], label=r"$f(x^{CG}_k)$", linewidth=2)
plt.plot([f(x) for x in scopt_cg_array], label=r"$f(x^{CG_{PR}}_k)$", linewidth=2)
# plt.plot([f(x) for x in gd.get_convergence()], label=r"$f(x^{G}_k)$", linewidth=2)
plt.legend(loc="best", fontsize=20)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Function value", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Неквадратичная функция
# + slideshow={"slide_type": "fragment"}
import numpy as np
import sklearn.datasets as skldata
import scipy.special as scspec
n = 300
m = 1000
X, y = skldata.make_classification(n_classes=2, n_features=n, n_samples=m, n_informative=n//3)
C = 1
def f(w):
return np.linalg.norm(w)**2 / 2 + C * np.mean(np.logaddexp(np.zeros(X.shape[0]), -y * X.dot(w)))
def grad_f(w):
denom = scspec.expit(-y * X.dot(w))
return w - C * X.T.dot(y * denom) / X.shape[0]
# f = lambda x: -np.sum(np.log(1 - A.T.dot(x))) - np.sum(np.log(1 - x*x))
# grad_f = lambda x: np.sum(A.dot(np.diagflat(1 / (1 - A.T.dot(x)))), axis=1) + 2 * x / (1 - np.power(x, 2))
x0 = np.zeros(n)
print("Initial function value = {}".format(f(x0)))
print("Initial gradient norm = {}".format(np.linalg.norm(grad_f(x0))))
# + [markdown] slideshow={"slide_type": "slide"}
# #### Реализация метода Флетчера-Ривса
# + slideshow={"slide_type": "fragment"}
def ConjugateGradientFR(f, gradf, x0, num_iter=100, tol=1e-8, callback=None, restart=False):
x = x0
grad = gradf(x)
p = -grad
it = 0
while np.linalg.norm(gradf(x)) > tol and it < num_iter:
alpha = utils.backtracking(x, p, method="Wolfe", beta1=0.1, beta2=0.4, rho=0.5, f=f, grad_f=gradf)
if alpha < 1e-18:
break
x = x + alpha * p
if callback is not None:
callback(x)
grad_next = gradf(x)
beta = grad_next.dot(grad_next) / grad.dot(grad)
p = -grad_next + beta * p
grad = grad_next.copy()
it += 1
if restart and it % restart == 0:
grad = gradf(x)
p = -grad
return x
# + [markdown] slideshow={"slide_type": "slide"}
# #### График сходимости
# + slideshow={"slide_type": "fragment"}
import scipy.optimize as scopt
import liboptpy.restarts as restarts
n_restart = 30
tol = 1e-5
max_iter = 600
scopt_cg_array = []
scopt_cg_callback = lambda x: callback(x, scopt_cg_array)
x = scopt.minimize(f, x0, tol=tol, method="CG", jac=grad_f, callback=scopt_cg_callback, options={"maxiter": max_iter})
x = x.x
print("\t CG by Polak-Rebiere")
print("Norm of garient = {}".format(np.linalg.norm(grad_f(x))))
print("Function value = {}".format(f(x)))
print("\t CG by Fletcher-Reeves")
cg_fr = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))
x = cg_fr.solve(x0, tol=tol, max_iter=max_iter, disp=True)
print("\t CG by Fletcher-Reeves with restart n")
cg_fr_rest = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4,
init_alpha=1.), restarts.Restart(n // n_restart))
x = cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter, disp=True)
print("\t Gradient Descent")
gd = methods.fo.GradientDescent(f, grad_f, ss.Backtracking("Wolfe", rho=0.5, beta1=0.1, beta2=0.4, init_alpha=1.))
x = gd.solve(x0, max_iter=max_iter, tol=tol, disp=True)
# + slideshow={"slide_type": "fragment"}
plt.figure(figsize=(8, 6))
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ no restart", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr_rest.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ restart", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2)
plt.legend(loc="best", fontsize=16)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Convergence rate", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Время выполнения
# + slideshow={"slide_type": "fragment"}
# %timeit scopt.minimize(f, x0, method="CG", tol=tol, jac=grad_f, options={"maxiter": max_iter})
# %timeit cg_fr.solve(x0, tol=tol, max_iter=max_iter)
# %timeit cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter)
# %timeit gd.solve(x0, tol=tol, max_iter=max_iter)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Резюме
#
# 1. Сопряжённые направления
# 2. Метод сопряжённых градиентов
# 3. Сходимость
# 4. Эксперименты
|
Spring2021/09-AccGd/cg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Work with Data
#
# Data is the foundation on which machine learning models are built. Managing data centrally in the cloud, and making it accessible to teams of data scientists who are running experiments and training models on multiple workstations and compute targets is an important part of any professional data science solution.
#
# In this notebook, you'll explore two Azure Machine Learning objects for working with data: *datastores*, and *datasets*.
# ## Connect to your workspace
#
# To get started, connect to your workspace.
#
# > **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
# +
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
# -
# ## Work with datastores
#
# In Azure ML, *datastores* are references to storage locations, such as Azure Storage blob containers. Every workspace has a default datastore - usually the Azure storage blob container that was created with the workspace. If you need to work with data that is stored in different locations, you can add custom datastores to your workspace and set any of them to be the default.
#
# ### View datastores
#
# Run the following code to determine the datastores in your workspace:
# +
# Get the default datastore
default_ds = ws.get_default_datastore()
# Enumerate all datastores, indicating which is the default
for ds_name in ws.datastores:
print(ds_name, "- Default =", ds_name == default_ds.name)
# -
# You can also view and manage datastores in your workspace on the **Datastores** page for your workspace in [Azure Machine Learning studio](https://ml.azure.com).
#
# ### Upload data to a datastore
#
# Now that you have determined the available datastores, you can upload files from your local file system to a datastore so that it will be accessible to experiments running in the workspace, regardless of where the experiment script is actually being run.
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
# ## Work with datasets
#
# Azure Machine Learning provides an abstraction for data in the form of *datasets*. A dataset is a versioned reference to a specific set of data that you may want to use in an experiment. Datasets can be *tabular* or *file*-based.
#
# ### Create a tabular dataset
#
# Let's create a dataset from the diabetes data you uploaded to the datastore, and view the first 20 records. In this case, the data is in a structured format in a CSV file, so we'll use a *tabular* dataset.
# +
from azureml.core import Dataset
# Get the default datastore
default_ds = ws.get_default_datastore()
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Display the first 20 rows as a Pandas dataframe
tab_data_set.take(20).to_pandas_dataframe()
# -
# As you can see in the code above, it's easy to convert a tabular dataset to a Pandas dataframe, enabling you to work with the data using common python techniques.
#
# ### Create a file Dataset
#
# The dataset you created is a *tabular* dataset that can be read as a dataframe containing all of the data in the structured files that are included in the dataset definition. This works well for tabular data, but in some machine learning scenarios you might need to work with data that is unstructured; or you may simply want to handle reading the data from files in your own code. To accomplish this, you can use a *file* dataset, which creates a list of file paths in a virtual mount point, which you can use to read the data in the files.
# +
#Create a file dataset from the path on the datastore (this may take a short while)
file_data_set = Dataset.File.from_files(path=(default_ds, 'diabetes-data/*.csv'))
# Get the files in the dataset
for file_path in file_data_set.to_path():
print(file_path)
# -
# ### Register datasets
#
# Now that you have created datasets that reference the diabetes data, you can register them to make them easily accessible to any experiment being run in the workspace.
#
# We'll register the tabular dataset as **diabetes dataset**, and the file dataset as **diabetes files**.
# +
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
# Register the file dataset
try:
file_data_set = file_data_set.register(workspace=ws,
name='diabetes file dataset',
description='diabetes files',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
print('Datasets registered')
# -
# You can view and manage datasets on the **Datasets** page for your workspace in [Azure Machine Learning studio](https://ml.azure.com). You can also get a list of datasets from the workspace object:
print("Datasets:")
for dataset_name in list(ws.datasets.keys()):
dataset = Dataset.get_by_name(ws, dataset_name)
print("\t", dataset.name, 'version', dataset.version)
# The ability to version datasets enables you to redefine datasets without breaking existing experiments or pipelines that rely on previous definitions. By default, the latest version of a named dataset is returned, but you can retrieve a specific version of a dataset by specifying the version number, like this:
#
# ```python
# dataset_v1 = Dataset.get_by_name(ws, 'diabetes dataset', version = 1)
# ```
#
#
# ### Train a model from a tabular dataset
#
# Now that you have datasets, you're ready to start training models from them. You can pass datasets to scripts as *inputs* in the estimator being used to run the script.
#
# Run the following two code cells to create:
#
# 1. A folder named **diabetes_training_from_tab_dataset**
# 2. A script that trains a classification model by using a tabular dataset that is passed to is as an argument.
# +
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_tab_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
# +
# %%writefile $experiment_folder/diabetes_training.py
# Import libraries
import os
import argparse
from azureml.core import Run, Dataset
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Get the script arguments (regularization rate and training dataset ID)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument("--input-data", type=str, dest='training_dataset_id', help='training dataset')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# Get the training dataset
print("Loading Data...")
diabetes = run.input_datasets['training_data'].to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
# -
# > **Note**: In the script, the dataset is passed as a parameter (or argument). In the case of a tabular dataset, this argument will contain the ID of the registered dataset; so you could write code in the script to get the experiment's workspace from the run context, and then get the dataset using its ID; like this:
# >
# > ```
# > run = Run.get_context()
# > ws = run.experiment.workspace
# > dataset = Dataset.get_by_id(ws, id=args.training_dataset_id)
# > diabetes = dataset.to_pandas_dataframe()
# > ```
# >
# > However, Azure Machine Learning runs automatically identify arguments that reference named datasets and add them to the run's **input_datasets** collection, so you can also retrieve the dataset from this collection by specifying its "friendly name" (which as you'll see shortly, is specified in the argument definition in the script run configuration for the experiment). This is the approach taken in the script above.
#
# Now you can run a script as an experiment, defining an argument for the training dataset, which is read by the script.
#
# > **Note**: The **Dataset** class depends on some components in the **azureml-dataprep** package, which includes optional support for **pandas** that is used by the **to_pandas_dataframe()** method. So you need to include this package in the environment where the training experiment will be run.
# +
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.widgets import RunDetails
# Create a Python environment for the experiment
sklearn_env = Environment("sklearn-env")
# Ensure the required packages are installed (we need scikit-learn, Azure ML defaults, and Azure ML dataprep)
packages = CondaDependencies.create(conda_packages=['scikit-learn','pip'],
pip_packages=['azureml-defaults','azureml-dataprep[pandas]'])
sklearn_env.python.conda_dependencies = packages
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
arguments = ['--regularization', 0.1, # Regularizaton rate parameter
'--input-data', diabetes_ds.as_named_input('training_data')], # Reference to dataset
environment=sklearn_env)
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
# -
# > **Note:** The **--input-data** argument passes the dataset as a *named input* that includes a *friendly name* for the dataset, which is used by the script to read it from the **input_datasets** collection in the experiment run. The string value in the **--input-data** argument is actually the registered dataset's ID. As an alternative approach, you could simply pass `diabetes_ds.id`, in which case the script can access the dataset ID from the script arguments and use it to get the dataset from the workspace, but not from the **input_datasets** collection.
#
# The first time the experiment is run, it may take some time to set up the Python environment - subsequent runs will be quicker.
#
# When the experiment has completed, in the widget, view the **azureml-logs/70_driver_log.txt** output log and the metrics generated by the run.
#
# ### Register the trained model
#
# As with any training experiment, you can retrieve the trained model and register it in your Azure Machine Learning workspace.
# +
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Tabular dataset'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
# -
# ### Train a model from a file dataset
#
# You've seen how to train a model using training data in a *tabular* dataset; but what about a *file* dataset?
#
# When you're using a file dataset, the dataset argument passed to the script represents a mount point containing file paths. How you read the data from these files depends on the kind of data in the files and what you want to do with it. In the case of the diabetes CSV files, you can use the Python **glob** module to create a list of files in the virtual mount point defined by the dataset, and read them all into Pandas dataframes that are concatenated into a single dataframe.
#
# Run the following two code cells to create:
#
# 1. A folder named **diabetes_training_from_file_dataset**
# 2. A script that trains a classification model by using a file dataset that is passed to is as an *input*.
# +
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_file_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
# +
# %%writefile $experiment_folder/diabetes_training.py
# Import libraries
import os
import argparse
from azureml.core import Dataset, Run
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import glob
# Get script arguments (rgularization rate and file dataset mount point)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument('--input-data', type=str, dest='dataset_folder', help='data mount point')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data_path = run.input_datasets['training_files'] # Get the training data path from the input
# (You could also just use args.data_folder if you don't want to rely on a hard-coded friendly name)
# Read the files
all_files = glob.glob(data_path + "/*.csv")
diabetes = pd.concat((pd.read_csv(f) for f in all_files), sort=False)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
# -
# Just as with tabular datasets, you can retrieve a file dataset from the **input_datasets** collection by using its friendly name. You can also retrieve it from the script argument, which in the case of a file dataset contains a mount path to the files (rather than the dataset ID passed for a tabular dataset).
#
# Next we need to change the way we pass the dataset to the script - it needs to define a path from which the script can read the files. You can use either the **as_download** or **as_mount** method to do this. Using **as_download** causes the files in the file dataset to be downloaded to a temporary location on the compute where the script is being run, while **as_mount** creates a mount point from which the files can be streamed directly from the datasetore.
#
# You can combine the access method with the **as_named_input** method to include the dataset in the **input_datasets** collection in the experiment run (if you omit this, for example by setting the argument to `diabetes_ds.as_mount()`, the script will be able to access the dataset mount point from the script arguments, but not from the **input_datasets** collection).
# +
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes file dataset")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
arguments = ['--regularization', 0.1, # Regularizaton rate parameter
'--input-data', diabetes_ds.as_named_input('training_files').as_download()], # Reference to dataset location
environment=sklearn_env) # Use the environment created previously
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
# -
# When the experiment has completed, in the widget, view the **azureml-logs/70_driver_log.txt** output log to verify that the files in the file dataset were downloaded to a temporary folder to enable the script to read the files.
#
# ### Register the trained model
#
# Once again, you can register the model that was trained by the experiment.
# +
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'File dataset'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
# -
# > **More Information**: For more information about training with datasets, see [Training with Datasets](https://docs.microsoft.com/azure/machine-learning/how-to-train-with-datasets) in the Azure ML documentation.
|
06 - Work with Data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_latest_p36
# language: python
# name: conda_pytorch_latest_p36
# ---
# # Data Preprocessing and Feature Engineering
#
# Upload raw data to S3
# The dataset we use is the IEEE-CIS Fraud Detection dataset which is a typical example of financial transactions dataset that many companies have. The dataset consists of two tables:
#
# Transactions: Records transactions and metadata about transactions between two users. Examples of columns include the product code for the transaction and features on the card used for the transaction.
# Identity: Contains information about the identity users performing transactions. Examples of columns here include the device type and device ids used.
# We will go over the specific data schema in subsequent cells but now let's move the raw data to a convenient location in the S3 bucket for this proejct, where it will be picked up by the preprocessing job and training job.
#
# If you would like to use your own dataset for this demonstration. Replace the raw_data_location with the s3 path or local path of your dataset, and modify the data preprocessing step as needed.
# ### Prerequisites
#
# - AWS account
# - make sure your env with memory 32G+
# - install python 3.6+, boto3, sagemaker>=2.41.0, pyyaml, pandas, requests(such as `pip install boto3 'sagemaker>=2.41.0,<3.0' pyyaml pandas requests`)
# - install supported version of [awscli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html)
# - configure credential of aws cli with s3, sagemaker permissions
# - install tar and gzip in your OS
import json
import os
import boto3
import sagemaker
import tempfile
# +
raw_data_location = 's3://aws-gcr-solutions-assets/open-dataset/ieee-fraud-detection/'
session_prefix = 'realtime-fraud-detection-on-dgl'
dest_dir = tempfile.mkdtemp()
transaction_source = f'{raw_data_location}train_transaction.csv'
transaction_dest = f'{dest_dir}/transaction.csv'
# !aws s3 cp $transaction_source $transaction_dest
identity_source = f'{raw_data_location}train_identity.csv'
identity_dest = f'{dest_dir}/identity.csv'
# !aws s3 cp $identity_source $identity_dest
# +
output_dir = tempfile.mkdtemp()
# ! python ./data-preprocessing/graph_data_preprocessor.py --data-dir $dest_dir --output-dir $output_dir --id-cols 'card1,card2,card3,card4,card5,card6,ProductCD,addr1,addr2,P_emaildomain,R_emaildomain' '--cat-cols' 'M1,M2,M3,M4,M5,M6,M7,M8,M9'
# +
import requests
def detect_running_region():
"""Dynamically determine the region)."""
easy_checks = [
# check if set through ENV vars
os.environ.get('AWS_REGION'),
os.environ.get('AWS_DEFAULT_REGION'),
# else check if set in config or in boto already
boto3.DEFAULT_SESSION.region_name if boto3.DEFAULT_SESSION else None,
boto3.Session().region_name,
]
for region in easy_checks:
if region:
return region
# else query an external service
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
response_json = r.json()
return response_json.get('region')
current_region = detect_running_region()
print(f'current AWS region is {current_region}')
default_bucket = sagemaker.session.Session(boto3.session.Session(region_name=current_region)).default_bucket()
processed_data = f's3://{default_bucket}/{session_prefix}/processed-data'
print(processed_data)
# ! export AWS_DEFAULT_REGION=$current_region && aws s3 sync $output_dir $processed_data
# -
# %store processed_data
# %store default_bucket
# %store current_region
|
src/sagemaker/01.FD_SL_Process_IEEE-CIS_Dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Introduction to Earth Engine and TensorFlow in Cloud Datalab
#
# This notebook walks you through a simple example of using Earth Engine and TensorFlow together in Cloud Datalab.
#
# Specifically, we will train a neural network to recognize cloudy pixels in a Landsat scene. For this simple example we will use the output of the Fmask cloud detection algorithm as training data.
# ## Configure the Environment
# We begin by importing a number of useful libraries.
# %matplotlib inline
import ee
from IPython import display
import math
from matplotlib import pyplot
import numpy
from osgeo import gdal
import tempfile
import tensorflow as tf
import urllib
import zipfile
# Initialize the Earth Engine client. This assumes that you have already configured Earth Engine credentials in this Datalab instance. If not, see the "`Earth Engine Datalab Initialization.ipynb`" notebook.
ee.Initialize()
# ## Inspect the Input Data
#
# Load a Landsat image with corresponding Fmask label data.
input_image = ee.Image('LANDSAT/LT5_L1T_TOA_FMASK/LT50100551998003CPE00')
# Let's define a helper function to make it easier to print thumbnails of Earth Engine images. (We'll be adding a library with utility functions like this one to the Earth Engine Python SDK, but for now we can do it by hand.)
def print_image(image):
display.display(display.Image(ee.data.getThumbnail({
'image': image.serialize(),
'dimensions': '360',
})))
# Now we can use our helper function to quickly visualize the image and label data. The Fmask values are:
#
# 0 | 1 | 2 | 3 | 4
# :---:|:---:|:---:|:---:|:---:
# Clear | Water | Shadow | Snow | Cloud
print_image(input_image.visualize(
bands=['B3', 'B2', 'B1'],
min=0,
max=0.3,
))
print_image(input_image.visualize(
bands=['fmask'],
min=0,
max=4,
palette=['808080', '0000C0', '404040', '00FFFF', 'FFFFFF'],
))
# ## Fetch the Input Data
#
# First we define some helper functions to download raw data from Earth Engine as `numpy` arrays.
#
# We use the `getDownloadId()` function, which only works for modestly sized datasets. For larger datasets, a better approach would be to initiate a batch Export from Earth Engine to Cloud Storage, which you could easily manage right here in Datalab too.
# +
def download_tif(image, scale):
url = ee.data.makeDownloadUrl(ee.data.getDownloadId({
'image': image.serialize(),
'scale': '%d' % scale,
'filePerBand': 'false',
'name': 'data',
}))
local_zip, headers = urllib.urlretrieve(url)
with zipfile.ZipFile(local_zip) as local_zipfile:
return local_zipfile.extract('data.tif', tempfile.mkdtemp())
def load_image(image, scale):
local_tif_filename = download_tif(image, scale)
dataset = gdal.Open(local_tif_filename, gdal.GA_ReadOnly)
bands = [dataset.GetRasterBand(i + 1).ReadAsArray() for i in range(dataset.RasterCount)]
return numpy.stack(bands, 2)
# -
# Now we can use that function to load the data from Earth Engine, including a valid data band, as a `numpy` array. This may take a few seconds. We also convert the Fmask band to a binary cloud label (i.e. `fmask`=4).
mask = input_image.mask().reduce('min')
data = load_image(input_image.addBands(mask), scale=240)
data[:,:,7] = numpy.equal(data[:,:,7], 4)
# Display the local data. This time, for variety, we display it as an NRG false-color image. We can use `pyplot` to display local `numpy` arrays.
pyplot.imshow(numpy.clip(data[:,:,[3,2,1]] * 3, 0, 1))
pyplot.show()
# ## Preprocess the Input Data
#
# Select the valid pixels and hold out a fraction for use as validation data. Compute per-band means and standard deviations of the training data for normalization.
# +
HOLDOUT_FRACTION = 0.1
# Reshape into a single vector of pixels.
data_vector = data.reshape([data.shape[0] * data.shape[1], data.shape[2]])
# Select only the valid data and shuffle it.
valid_data = data_vector[numpy.equal(data_vector[:,8], 1)]
numpy.random.shuffle(valid_data)
# Hold out a fraction of the labeled data for validation.
training_size = int(valid_data.shape[0] * (1 - HOLDOUT_FRACTION))
training_data = valid_data[0:training_size,:]
validation_data = valid_data[training_size:-1,:]
# Compute per-band means and standard deviations of the input bands.
data_mean = training_data[:,0:7].mean(0)
data_std = training_data[:,0:7].std(0)
# -
valid_data.shape
# ## Build the TensorFlow Model
#
# We start with a helper function to build a simple TensorFlow neural network layer.
def make_nn_layer(input, output_size):
input_size = input.get_shape().as_list()[1]
weights = tf.Variable(tf.truncated_normal(
[input_size, output_size],
stddev=1.0 / math.sqrt(float(input_size))))
biases = tf.Variable(tf.zeros([output_size]))
return tf.matmul(input, weights) + biases
# Here we define our TensorFlow model, a neural network with two hidden layers with tanh() nonlinearities. The main network has two outputs, continuous-valued “logits” representing non-cloud and cloud, respectively. The binary output is intepreted as the argmax of these outputs.
#
# We define a training step, which uses Kingma and Ba's Adam algorithm to minimize the cross-entropy between the logits and the training data. Finally, we define a simple overall percentage accuracy measure.
# +
NUM_INPUT_BANDS = 7
NUM_HIDDEN_1 = 20
NUM_HIDDEN_2 = 20
NUM_CLASSES = 2
input = tf.placeholder(tf.float32, shape=[None, NUM_INPUT_BANDS])
labels = tf.placeholder(tf.float32, shape=[None])
normalized = (input - data_mean) / data_std
hidden1 = tf.nn.tanh(make_nn_layer(normalized, NUM_HIDDEN_1))
hidden2 = tf.nn.tanh(make_nn_layer(hidden1, NUM_HIDDEN_2))
logits = make_nn_layer(hidden2, NUM_CLASSES)
outputs = tf.argmax(logits, 1)
int_labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels= int_labels, name='xentropy')
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(outputs, int_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# -
# ## Train the Neural Network
#
# Now train the neural network, using batches of training data drawn randomly from the training data pool. We periodically compute the accuracy against the validation data. When we're done training, we apply the model to the complete input data set.
#
# This simple notebook performs all TensorFlow operations locally. However, for larger analyses you could bring up a cluster of TensorFlow workers to parallelize the computation, all controlled from within Datalab.
# +
BATCH_SIZE = 1000
NUM_BATCHES = 1000
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
validation_dict = {
input: validation_data[:,0:7],
labels: validation_data[:,7],
}
for i in range(NUM_BATCHES):
batch = training_data[numpy.random.choice(training_size, BATCH_SIZE, False),:]
train_step.run({input: batch[:,0:7], labels: batch[:,7]})
if i % 100 == 0 or i == NUM_BATCHES - 1:
print('Accuracy %.2f%% at step %d' % (accuracy.eval(validation_dict) * 100, i))
output_data = outputs.eval({input: data_vector[:,0:7]})
# -
# ## Inspect the Results
#
# Here we dislay the results. The red band corresponds to the TensorFlow output and the blue band corresponds to the labeled training data, so pixels that are red and blue correspond to disagreements between the model and the training data. (There aren't many: look carefully around the fringes of the clouds.)
# +
output_image = output_data.reshape([data.shape[0], data.shape[1]])
red = numpy.where(data[:,:,8], output_image, 0.5)
blue = numpy.where(data[:,:,8], data[:,:,7], 0.5)
green = numpy.minimum(red, blue)
comparison_image = numpy.dstack((red, green, blue))
pyplot.figure(figsize = (12,12))
pyplot.imshow(comparison_image)
pyplot.show()
# -
# We can zoom in on a particular region over on the right side of the image to see some of the disagreements. Red pixels represent comission errors and blue pixels represent omission errors relative to the labeled input data.
pyplot.figure(figsize = (12,12))
pyplot.imshow(comparison_image[300:500,600:,:], interpolation='nearest')
pyplot.show()
|
src/tfexample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HCA paper
#
# - Purpose: Reproduce Fig4(left), returns vs. episodes in Delayed Effect Environment
# - Compare training curves for policy gradient (baseline), return HCA, and state HCA. All employing 3-step bootstrapping, where aliased chain is size 4. Expect policy gradient to fail, but HCA to figure this out.
#
# Resulting plot reproduces qualitative behavior of return HCA vs. policy gradient, but not state HCA (which isn't learning => need to debug).
# +
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import hca.tabular_actor_critic as tabular_actor_critic
from hca.tabular_vpg import vpg
from hca.envs.delayed_effect_env import DelayedEffectEnv
# %matplotlib inline
# -
vpg_config = dict(
actor_critic=tabular_actor_critic.TabularVPGActorCritic,
ac_kwargs={'pi_lr': 0.2, 'vf_lr': 0.2},
algo='vpg',
)
returnHCA_config = dict(
actor_critic=tabular_actor_critic.TabularReturnHCA,
ac_kwargs={'pi_lr': 0.2, 'vf_lr': 0.2, 'h_lr': 0.2,
'return_bins': np.array([-1,0,1])},
algo='returnHCA',
)
stateHCA_config = dict(
actor_critic=tabular_actor_critic.TabularStateHCA,
ac_kwargs={'pi_lr': 0.2, 'vf_lr': 0.2, 'h_lr': 0.2},
algo='stateHCA',
)
config = dict(
env_kwargs={'OHE_obs': False,
'n': 4,
'final_reward': 1.0,
'sigma': 0.0},
n_episodes=500,
n_test_episodes=100,
gamma=1.0,
lam=1.0,
bootstrap_n=3
)
num_runs = 100
algo_configs = [vpg_config, returnHCA_config, stateHCA_config]
for algo_conf in algo_configs:
algo_name = algo_conf.pop('algo')
for i in range(num_runs):
logger_out_dir = os.path.join('.', algo_name, f"{i:03}")
logger_kwargs = {'exp_name': 'hca', 'output_dir': logger_out_dir}
vpg(env_fn=DelayedEffectEnv, **config, **algo_conf,
logger_kwargs=logger_kwargs)
# # Read in results for the 100 independent runs per algo (over 500 episodes), calculate means and standard deviations, and plot against each other
# +
import glob
vpg_logs = glob.glob('vpg/*/progress.txt')
returnHCA_logs = glob.glob('returnHCA/*/progress.txt')
stateHCA_logs = glob.glob('stateHCA/*/progress.txt')
# -
def run_results(n_episodes, num_runs, log_names):
assert len(log_names) == num_runs
test_ep_ret = np.zeros((n_episodes, num_runs))
ep_ret = np.zeros((n_episodes, num_runs))
for idx, log in enumerate(log_names):
df = pd.read_table(log)
test_ep_ret[:,idx] = df.AverageTestEpRet.values
ep_ret[:,idx] = df.EpRet.values
return pd.DataFrame(test_ep_ret), pd.DataFrame(ep_ret)
# +
n_episodes = config['n_episodes']
vpg_test_ep_ret, vpg_ep_ret = run_results(n_episodes, num_runs, vpg_logs)
returnHCA_test_ep_ret, returnHCA_ep_ret = run_results(n_episodes, num_runs, returnHCA_logs)
stateHCA_test_ep_ret, stateHCA_ep_ret = run_results(n_episodes, num_runs, stateHCA_logs)
# -
returnHCA_mean_test_ep_ret.tail()
vpg_mean_test_ep_ret = vpg_test_ep_ret.mean(axis=1)
vpg_std_test_ep_ret = vpg_test_ep_ret.std(axis=1)
returnHCA_mean_test_ep_ret = returnHCA_test_ep_ret.mean(axis=1)
returnHCA_std_test_ep_ret = returnHCA_test_ep_ret.std(axis=1)
stateHCA_mean_test_ep_ret = stateHCA_test_ep_ret.mean(axis=1)
stateHCA_std_test_ep_ret = stateHCA_test_ep_ret.std(axis=1)
# +
# plot test returns
plt.rcParams["figure.figsize"] = (12,8)
epochs = range(n_episodes)
alpha=0.3
fig, ax = plt.subplots(1)
ax.plot(epochs, vpg_mean_test_ep_ret, lw=2, label='vpg', color='red')
ax.plot(epochs, returnHCA_mean_test_ep_ret, lw=2, label='returnHCA', color='blue')
ax.plot(epochs, stateHCA_mean_test_ep_ret, lw=2, label='stateHCA', color='green')
ax.fill_between(epochs,
vpg_mean_test_ep_ret+vpg_std_test_ep_ret,
vpg_mean_test_ep_ret-vpg_std_test_ep_ret,
facecolor='red', alpha=alpha)
ax.fill_between(epochs,
returnHCA_mean_test_ep_ret+returnHCA_std_test_ep_ret,
returnHCA_mean_test_ep_ret-returnHCA_std_test_ep_ret,
facecolor='blue', alpha=alpha)
ax.fill_between(epochs,
stateHCA_mean_test_ep_ret+stateHCA_std_test_ep_ret,
stateHCA_mean_test_ep_ret-stateHCA_std_test_ep_ret,
facecolor='green', alpha=alpha)
ax.legend(loc='upper left')
ax.set_xlabel('episode')
ax.set_ylabel('Test episode returns')
ax.grid()
# +
vpg_mean_ep_ret = vpg_ep_ret.mean(axis=1)
vpg_std_ep_ret = vpg_ep_ret.std(axis=1)
returnHCA_mean_ep_ret = returnHCA_ep_ret.mean(axis=1)
returnHCA_std_ep_ret = returnHCA_ep_ret.std(axis=1)
stateHCA_mean_ep_ret = stateHCA_ep_ret.mean(axis=1)
stateHCA_std_ep_ret = stateHCA_ep_ret.std(axis=1)
# plot test returns
plt.rcParams["figure.figsize"] = (12,8)
epochs = range(n_episodes)
alpha=0.3
fig, ax = plt.subplots(1)
ax.plot(epochs, vpg_mean_ep_ret, lw=2, label='vpg', color='red')
ax.plot(epochs, returnHCA_mean_ep_ret, lw=2, label='returnHCA', color='blue')
ax.plot(epochs, stateHCA_mean_test_ep_ret, lw=2, label='stateHCA', color='green')
ax.fill_between(epochs,
vpg_mean_ep_ret+vpg_std_ep_ret,
vpg_mean_ep_ret-vpg_std_ep_ret,
facecolor='red', alpha=alpha)
ax.fill_between(epochs,
returnHCA_mean_ep_ret+returnHCA_std_ep_ret,
returnHCA_mean_ep_ret-returnHCA_std_ep_ret,
facecolor='blue', alpha=alpha)
ax.fill_between(epochs,
stateHCA_mean_ep_ret+stateHCA_std_ep_ret,
stateHCA_mean_ep_ret-stateHCA_std_ep_ret,
facecolor='green', alpha=alpha)
ax.legend(loc='upper left')
ax.set_xlabel('episode')
ax.set_ylabel('episode returns')
ax.grid()
# -
|
HCA/hca/scripts/hca_paper_fig4_left.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
# #### Propriva had the least number of tests. What did the tumor volume change over time look like? What is the average number of tests per mouse compared to other drugs.
#
# #### Capomulin and Ramicane had the highest number of tests with the lowest tumor volume at the last recorded reading, interested in starting tumor volume to the ending tumor volume for those two drug regimens.
#
# #### A few additional questions I would be interested in answering:
# * Metastatic Sites changes per Drug Regimen?
# * Avg # tests per mouse per Drug Regimen
# * Summary stats based on Starting Tumor Volume to Last Measurement per Drug Regimen
# * Tumor volume vs. time point Average per Mouse per Drug Regimen (see line plot below, this would be an expansion of that)
# * Any correlations pertaining to age?
# * Ketapril- had the highest tumor volume variance as well as std error, deeper dive into start to end tumor volume
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "./Mouse_metadata.csv"
study_results_path = "./Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
merge_df = pd.merge(mouse_metadata, study_results, on="Mouse ID", how="right")
# Display the data table for preview
merge_df
# -
# Checking the number of mice.
len(merge_df["Mouse ID"].unique())
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
dup_mice = merge_df[merge_df[['Mouse ID', 'Timepoint']].duplicated() == True]
dup_mice
# Optional: Get all the data for the duplicate mouse ID.
dup_mouse = merge_df.loc[merge_df["Mouse ID"] == "g989", :]
print(dup_mouse)
# +
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
indexNames = merge_df[ merge_df['Mouse ID'] == 'g989' ].index
# Delete these row indexes from dataFrame
merge_df.drop(indexNames , inplace=True)
print(merge_df)
# Checking the number of mice in the clean DataFrame.
check_me = merge_df.loc[merge_df["Mouse ID"] == "g989", :]
check_me
len(merge_df["Mouse ID"].unique())
# -
merge_df
# ## Summary Statistics
# +
from scipy.stats import sem
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
drug_group = merge_df.groupby(['Drug Regimen'])
dg_mean = drug_group["Tumor Volume (mm3)"].mean()
dg_median = drug_group["Tumor Volume (mm3)"].median()
dg_std_dev = drug_group["Tumor Volume (mm3)"].std()
dg_var = drug_group["Tumor Volume (mm3)"].var()
dg_sem = drug_group["Tumor Volume (mm3)"].sem()
# print(dg_mean)
# print(dg_median)
# print(dg_std_dev)
# print(dg_var)
# print(dg_sem)
drug_summary = pd.DataFrame({"Mean Tumor Volume": dg_mean,
"Median Tumor Volume": dg_median,
"Tumor Volume Variance": dg_var,
"Tumor Volume Std Dev": dg_std_dev,
"Tumor Volume Std Err": dg_sem})
drug_summary
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
drug_group.agg({"Tumor Volume (mm3)":['mean', 'median', 'std', 'var', 'sem']})
# -
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.
drug_group = merge_df.groupby(['Drug Regimen'])
tests = drug_group["Timepoint"].count()
tests_summary = pd.DataFrame({"Number of Mice Tested": tests})
tests_summary
#Plot with Pandas
tests_summary.plot(kind="bar", figsize=(15,5))
# Set x and y limits
plt.ylim(0, max(tests_summary["Number of Mice Tested"])+10)
# Set a Title and labels
plt.title("Tests Per Drug Regimen")
plt.ylabel("Number of Mice Tested")
plt.show()
plt.tight_layout()
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.
drug_group = merge_df.groupby(['Drug Regimen'])
tests = drug_group["Timepoint"].count()
tests_summary = pd.DataFrame({"Number of Mice Tested": tests})
drug_list = tests_summary.index.tolist()
#print(drug_list)
nums= tests_summary["Number of Mice Tested"].tolist()
#print(nums)
x_axis = drug_list
tick_locations = [value for value in x_axis]
# Create a list indicating where to write x labels and set figure size to adjust for space
plt.figure(figsize=(15,5))
plt.bar(x_axis, nums, color='blue', alpha=0.5, align="edge")
plt.xticks(tick_locations, x_axis, rotation="vertical")
# Set x and y limits
plt.xlim(-0.25, len(x_axis))
plt.ylim(0, max(nums)+30)
# Set a Title and labels
plt.title("Tests Per Drug Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Mice Tested")
# Show the grap
plt.tight_layout()
plt.show()
#tests_summary
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
gender_only= merge_df.loc[:, ["Sex", "Timepoint"]]
gender_only
gender_only.rename(columns = {'Timepoint' : 'Sex', "Sex": "Gender"}, inplace = True)
#print(gender_only)
gender_count = gender_only.groupby(["Gender"]).count()
#print(gender_count)
gender_summed = gender_count.sum()
#print(gender_summed)
gender_percent = (gender_count / gender_summed) *100
#print(gender_percent)
#Plot
gender_pie = gender_percent.plot.pie(y="Sex", autopct='%1.1f%%', explode=(0, 0.05),
shadow=True, figsize=(5, 5))
plt.show()
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
gender_only= merge_df.loc[:, ["Sex", "Timepoint"]]
gender_only
gender_count = gender_only.groupby(["Sex"]).count()
#print(gender_count)
gender_summed = gender_count.sum()
#print(gender_summed)
gender_percent = (gender_count / gender_summed) *100
#print(gender_percent)
gender = gender_percent["Timepoint"]
#print(gender)
labels = ["Female", "Male"]
colors = ["blue", "orange"]
plt.pie(gender, labels=labels, colors=colors, autopct="%1.1f%%", startangle=100)
plt.title("Sex")
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
max_tumor_df= merge_df.groupby(["Mouse ID"]).max()
max_tumor_df = max_tumor_df.reset_index()
merged_data_df = max_tumor_df[['Mouse ID','Timepoint']].merge(merge_df,on=['Mouse ID','Timepoint'],how="left")
merged_data_df
# +
# Put treatments into a list for for loop (and later for plot labels)
treatment_list = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
# Create empty list to fill with tumor vol data (for plotting)
tumor_vol_list = []
# Calculate the IQR and quantitatively determine if there are any potential outliers.
for drug in treatment_list: #list of drugs to check for
# Locate the rows which contain mice on each drug and get the tumor volumes
final_tumor_vol = merged_data_df.loc[merged_data_df["Drug Regimen"] == drug, 'Tumor Volume (mm3)']
#print(final_tumor_vol)
#add subset
tumor_vol_list.append(final_tumor_vol)
# Determine outliers using upper and lower bounds
quartiles = final_tumor_vol.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"{drug}'s potential outliers: {final_tumor_vol.loc[(final_tumor_vol < lower_bound) | (final_tumor_vol > upper_bound)]}")
#tumor_vol_list
# -
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
outliers = dict(markerfacecolor='red',markersize=10)
plt.boxplot(tumor_vol_list,labels= treatment_list,flierprops=outliers)
plt.xlabel('Drug Regimen')
plt.ylabel('Final Tumor Volume (mm3)')
plt.tight_layout()
plt.show()
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
mouse = merge_df.loc[merge_df["Mouse ID"] == "r554", :]
#mouse
mouse_reduce = mouse[["Timepoint", "Tumor Volume (mm3)"]]
#mouse_reduce
mouse_reduce = mouse_reduce.plot.line(x='Timepoint', y='Tumor Volume (mm3)', color="blue")
plt.title("Capomulin Treatment of Mouse r554")
plt.xlabel("Timepoint (Days)")
plt.ylabel("Tumor Volume (mm3)")
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
cap_only = merge_df.loc[merge_df["Drug Regimen"] == "Capomulin", :]
cap_only
scatter_df = cap_only[["Mouse ID", "Weight (g)", "Tumor Volume (mm3)"]].groupby(["Mouse ID"]).mean()
scatter_df = scatter_df.reset_index()
scatter_df
plt.scatter(scatter_df.loc[:,"Weight (g)"],scatter_df.loc[:,"Tumor Volume (mm3)"])
plt.xlabel("Weight (g)")
plt.ylabel('Average Tumor Volume (mm3)')
# -
# ## Correlation and Regression
# +
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
weight = scatter_df.iloc[:,1]
#print(weight)
avg_tumor_vol = scatter_df.iloc[:, 2]
avg_tumor_vol
correlation = st.pearsonr(weight,avg_tumor_vol)
print(f"The correlation between the mouse weight and average tumor volume for the Capomulin Drug Regimen is {round(correlation[0],2)}")
# +
x_values = scatter_df.loc[:,'Weight (g)']
x_values
y_values = scatter_df['Tumor Volume (mm3)']
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Weight (g)')
plt.ylabel('Average Tumor Volume (mm3)')
plt.title('Correlation Coefficient & Linear Regression Model')
plt.show()
# -
|
matplotlib-challenge/pymaceuticals_starter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import math
import scipy as sp
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from sklearn import cluster
from sklearn import neighbors
import torch
import torch.nn as nn
import torch.optim as optim
import scikit_wrappers
# +
cuda = False
if torch.cuda.is_available():
print("Using CUDA...")
cuda = True
# GPU number
gpu = 0
# -
# ### Dataset
ds = pd.read_csv("Data/owid-covid-data.csv")
#Except France, China, USA, Nepal
contries =['AFG', 'ALB', 'DZA', 'AND', 'AGO', 'AIA', 'ATG', 'ARG', 'ARM',
'ABW', 'AUS', 'AUT', 'AZE', 'BHS', 'BHR', 'BGD', 'BRB', 'BLR',
'BEL', 'BLZ', 'BEN', 'BMU', 'BTN', 'BOL', 'BES', 'BIH', 'BWA',
'BRA', 'VGB', 'BRN', 'BGR', 'BFA', 'BDI', 'KHM', 'CMR', 'CAN',
'CPV', 'CYM', 'CAF', 'TCD', 'CHL', 'COL', 'COM', 'COG',
'CRI', 'CIV', 'HRV', 'CUB', 'CUW', 'CYP', 'CZE', 'COD', 'DNK',
'DJI', 'DMA', 'DOM', 'ECU', 'EGY', 'SLV', 'GNQ', 'ERI', 'EST',
'ETH', 'FRO', 'FLK', 'FJI', 'FIN', 'PYF', 'GAB', 'GMB',
'GEO', 'DEU', 'GHA', 'GIB', 'GRC', 'GRL', 'GRD', 'GUM', 'GTM',
'GGY', 'GIN', 'GNB', 'GUY', 'HTI', 'HND', 'HKG', 'HUN', 'ISL',
'IND', 'IDN', 'IRN', 'IRQ', 'IRL', 'IMN', 'ISR', 'ITA', 'JAM',
'JPN', 'JEY', 'JOR', 'KAZ', 'KEN', 'KWT', 'KGZ', 'LAO',
'LVA', 'LBN', 'LSO', 'LBR', 'LBY', 'LIE', 'LTU', 'LUX', 'MKD',
'MDG', 'MWI', 'MYS', 'MDV', 'MLI', 'MLT', 'MRT', 'MUS', 'MEX',
'MDA', 'MCO', 'MNG', 'MNE', 'MSR', 'MAR', 'MOZ', 'MMR', 'NAM',
'NLD', 'NCL', 'NZL', 'NIC', 'NER', 'NGA', 'MNP', 'NOR',
'OMN', 'PAK', 'PSE', 'PAN', 'PNG', 'PRY', 'PER', 'PHL', 'POL',
'PRT', 'PRI', 'QAT', 'ROU', 'RUS', 'RWA', 'KNA', 'LCA', 'VCT',
'SMR', 'STP', 'SAU', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', 'SXM',
'SVK', 'SVN', 'SOM', 'ZAF', 'KOR', 'SSD', 'ESP', 'LKA', 'SDN',
'SUR', 'SWZ', 'SWE', 'CHE', 'SYR', 'TWN', 'TJK', 'TZA', 'THA',
'TLS', 'TGO', 'TTO', 'TUN', 'TUR', 'TCA', 'UGA', 'UKR', 'ARE',
'GBR', 'VIR', 'URY', 'UZB', 'VAT', 'VEN', 'VNM', 'ESH',
'YEM', 'ZMB', 'ZWE']
data = ds[ds['iso_code'].isin(contries)]['new_cases']
#ds[ds['iso_code'] in contries]['new_cases'].reset_index().drop(columns=['index']).plot()
data = data.interpolate(method='nearest')
# +
#train, test = sklearn.model_selection.train_test_split(data.to_numpy(), test_size=0.2, shuffle=False)
# +
# Preprocessing: normalization
mean = np.mean(data)
var = np.var(data)
data = (data - mean)/math.sqrt(var)
#train = (train - mean)/math.sqrt(var)
#test = (test - mean)/math.sqrt(var)
print('Mean: ', np.mean(data))
print('Variance: ', np.var(data))
# -
# ### Meat
hyperparameters = {
"batch_size": 1,
"channels": 30,
"compared_length": None,
"depth": 10,
"nb_steps": 100,
"in_channels": 1,
"kernel_size": 3,
"penalty": None,
"early_stopping": None,
"lr": 0.001,
"nb_random_samples": 10,
"negative_penalty": 1,
"out_channels": 160,
"reduced_size": 80,
"cuda": cuda,
"gpu": gpu
}
encoder = scikit_wrappers.CausalCNNEncoderClassifier()
encoder.set_params(**hyperparameters)
model = 'COVIDMODELS/'
data = np.array([[data.to_numpy()]])
#encoder.fit_encoder(data, save_memory=True, verbose=True)
encoder.load_encoder(model)
# +
#encoder.save_encoder(model)
# -
test_fra = np.array([[ds[ds['iso_code'].isin(['FRA'])]['new_cases'].to_numpy()]])
test_chn = np.array([[ds[ds['iso_code'].isin(['CHN'])]['new_cases'].to_numpy()]])
test_usa = np.array([[ds[ds['iso_code'].isin(['USA'])]['new_cases'].to_numpy()]])
test_npl = np.array([[ds[ds['iso_code'].isin(['NPL'])]['new_cases'].interpolate(method='nearest').to_numpy()]])
test_features_fra = encoder.encode_window(test_fra, 1)
test_features_chn = encoder.encode_window(test_chn, 1)
test_features_usa = encoder.encode_window(test_usa, 1)
test_features_npl = encoder.encode_window(test_npl, 1)
def find_contiguous_colors(colors):
# finds the continuous segments of colors and returns those segments
segs = []
curr_seg = []
prev_color = ''
for c in colors:
if c == prev_color or prev_color == '':
curr_seg.append(c)
else:
segs.append(curr_seg)
curr_seg = []
curr_seg.append(c)
prev_color = c
segs.append(curr_seg) # the final one
return segs
# ### Results Visualization
kmeans = cluster.KMeans(n_clusters=3).fit(np.swapaxes(test_features_npl[0, :, :], 0, 1))
associated_colors = {0: 'blue', 1: 'green', 2: 'red', 3: 'yellow', 4: 'magenta', 5: 'black', 6: 'purple', 7: 'cyan', 8: 'pink', 9: 'orange', 10: 'grey', 11: 'fuchsia', 12: 'maroon', 13: 'navy'}
colors = [associated_colors[l] for l in kmeans.labels_]
segments = find_contiguous_colors(colors)
plt.figure(figsize=(30,10))
flat_seg = [item for sublist in segments for item in sublist]
# +
y = range(0,len(flat_seg))
plt.scatter(y,test_npl,color=flat_seg)
# -
plt.plot(test_npl.flatten())
|
TSLR/COVID19.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Realtime ASR
#
# Let say you want to transcribe realtime recording / input, malaya-speech able to do that.
# <div class="alert alert-info">
#
# This tutorial is available as an IPython notebook at [malaya-speech/example/realtime-asr](https://github.com/huseinzol05/malaya-speech/tree/master/example/realtime-asr).
#
# </div>
# <div class="alert alert-warning">
#
# This module is not language independent, so it not save to use on different languages. Pretrained models trained on hyperlocal languages.
#
# </div>
# <div class="alert alert-warning">
#
# This is an application of malaya-speech Pipeline, read more about malaya-speech Pipeline at [malaya-speech/example/pipeline](https://github.com/huseinzol05/malaya-speech/tree/master/example/pipeline).
#
# </div>
import malaya_speech
from malaya_speech import Pipeline
# ### Load VAD model
#
# We are going to use WebRTC VAD model, read more about VAD at https://malaya-speech.readthedocs.io/en/latest/load-vad.html
vad_model = malaya_speech.vad.webrtc()
# ### Recording interface
#
# So, to start recording audio including realtime VAD and Classification, we need to use `malaya_speech.streaming.record`. We use `pyaudio` library as the backend.
#
# ```python
# def record(
# vad,
# asr_model = None,
# classification_model = None,
# device = None,
# input_rate: int = 16000,
# sample_rate: int = 16000,
# blocks_per_second: int = 50,
# padding_ms: int = 300,
# ratio: float = 0.75,
# min_length: float = 0.1,
# filename: str = None,
# spinner: bool = False,
# ):
# """
# Record an audio using pyaudio library. This record interface required a VAD model.
#
# Parameters
# ----------
# vad: object
# vad model / pipeline.
# asr_model: object
# ASR model / pipeline, will transcribe each subsamples realtime.
# classification_model: object
# classification pipeline, will classify each subsamples realtime.
# device: None
# `device` parameter for pyaudio, check available devices from `sounddevice.query_devices()`.
# input_rate: int, optional (default = 16000)
# sample rate from input device, this will auto resampling.
# sample_rate: int, optional (default = 16000)
# output sample rate.
# blocks_per_second: int, optional (default = 50)
# size of frame returned from pyaudio, frame size = sample rate / (blocks_per_second / 2).
# 50 is good for WebRTC, 30 or less is good for Malaya Speech VAD.
# padding_ms: int, optional (default = 300)
# size of queue to store frames, size = padding_ms // (1000 * blocks_per_second // sample_rate)
# ratio: float, optional (default = 0.75)
# if 75% of the queue is positive, assumed it is a voice activity.
# min_length: float, optional (default=0.1)
# minimum length (s) to accept a subsample.
# filename: str, optional (default=None)
# if None, will auto generate name based on timestamp.
# spinner: bool, optional (default=False)
# if True, will use spinner object from halo library.
#
#
# Returns
# -------
# result : [filename, samples]
# """
# ```
# **pyaudio will returned int16 bytes, so we need to change to numpy array, normalize it to -1 and +1 floating point**.
# ### Check available devices
# +
import sounddevice
sounddevice.query_devices()
# -
# By default it will use `0` index.
# ### Load ASR model
quantized_model = malaya_speech.stt.deep_transducer(model = 'small-conformer', quantized = True)
# ### ASR Pipeline
#
# Because pyaudio will returned int16 bytes, so we need to change to numpy array then normalize to float, feel free to add speech enhancement or any function, but in this example, I just keep it simple.
p_asr = Pipeline()
pipeline_asr = (
p_asr.map(malaya_speech.astype.to_ndarray)
.map(malaya_speech.astype.int_to_float)
.map(quantized_model)
)
p_asr.visualize()
# **Again, once you start to run the code below, it will straight away recording your voice**.
#
# If you run in jupyter notebook, press button stop up there to stop recording, if in terminal, press `CTRL + c`.
file, samples = malaya_speech.streaming.record(vad = vad_model, asr_model = p_asr, spinner = False)
file
# Actually it is pretty nice. As you can see, it able to transcribe realtime, you can try it by yourself.
# +
import IPython.display as ipd
ipd.Audio(file)
# -
len(samples[0])
type(samples[0][0]), samples[0][1]
# I do not want to print `samples[0][0]`, it is very long.
|
docs/realtime-asr.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
# Take all 4 images
filename1="1.jpg"
filename2="2.jpg"
filename3="3.jpg"
filename4="4.jpg"
image1 = Image.open(filename1)
image2 = Image.open(filename2)
image3 = Image.open(filename3)
image4 = Image.open(filename4)
image1 =image1.resize((50,50))
image2 =image2.resize((50,50))
image3 =image3.resize((50,50))
image4 =image4.resize((50,50))
coordinate = x1,y1 = 10, 10
points=image1.getpixel (coordinate)
print (points[1])
print(points)
# +
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.cluster import KMeans
width1 , height1 = image1.size
width2 , height2 = image2.size
width3 , height3 = image3.size
width4 , height4 = image4.size
colors= ["g.","r.","y.","b.","m.","c."]
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points=image1.getpixel (coordinate)
plt.plot(points[0],points[1],colors[0])
axes = plt.gca()
axes.set_xlim([0,180])
axes.set_ylim([0,150])
plt.show()
# +
for i in range(width2):
for j in range(height2):
coordinate = x,y=i,j
points=image2.getpixel (coordinate)
plt.plot(points[0],points[1],colors[1])
axes = plt.gca()
axes.set_xlim([0,180])
axes.set_ylim([0,150])
plt.show()
# +
for i in range(width3):
for j in range(height3):
coordinate = x,y=i,j
points=image3.getpixel (coordinate)
plt.plot(points[0],points[1],colors[2])
axes = plt.gca()
axes.set_xlim([0,180])
axes.set_ylim([0,150])
plt.show()
# +
for i in range(width4):
for j in range(height4):
coordinate = x,y=i,j
points=image4.getpixel (coordinate)
plt.plot(points[0],points[1],colors[3])
axes = plt.gca()
axes.set_xlim([0,180])
axes.set_ylim([0,150])
plt.show()
# +
import numpy as np
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=4)
a=[[0,0]]
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points1=image1.getpixel(coordinate)
points2=image2.getpixel(coordinate)
points3=image3.getpixel(coordinate)
points4=image4.getpixel(coordinate)
a=np.insert(a,1,[points1[0],points1[1]],axis=0)
a=np.insert(a,1,[points2[0],points2[1]],axis=0)
a=np.insert(a,1,[points3[0],points3[1]],axis=0)
a=np.insert(a,1,[points4[0],points4[1]],axis=0)
kmeans.fit(a)
centroids=kmeans.cluster_centers_
labels=kmeans.labels_
for n in range(len(a)):
plt.plot(a[n][0],a[n][1],colors[labels[n]])
plt.scatter(centroids[:,0],centroids[:,1],marker="x",s=150)
plt.show
# +
kmeans = KMeans(n_clusters=6)
colors= ["g.","r.","y.","b.","m.","c."]
d=[[0,0]]
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points1=image1.getpixel(coordinate)
points2=image2.getpixel(coordinate)
points3=image3.getpixel(coordinate)
points4=image4.getpixel(coordinate)
d=np.insert(d,1,[points1[0],points1[1]],axis=0)
d=np.insert(d,1,[points2[0],points2[1]],axis=0)
d=np.insert(d,1,[points3[0],points3[1]],axis=0)
d=np.insert(d,1,[points4[0],points4[1]],axis=0)
kmeans.fit(d)
centroids=kmeans.cluster_centers_
labels=kmeans.labels_
for n in range(len(d)):
plt.plot(d[n][0],d[n][1],colors[labels[n]])
plt.scatter(centroids[:,0],centroids[:,1],marker="x",s=150)
plt.show
# +
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points=image1.getpixel (coordinate)
plt.scatter(points[0],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,150])
plt.show()
# +
for i in range(width2):
for j in range(height2):
coordinate = x,y=i,j
points=image2.getpixel (coordinate)
plt.scatter(points[0],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,150])
plt.show()
# -
for i in range(width3):
for j in range(height3):
coordinate = x,y=i,j
points=image3.getpixel (coordinate)
plt.scatter(points[0],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,150])
plt.show()
for i in range(width4):
for j in range(height4):
coordinate = x,y=i,j
points=image4.getpixel (coordinate)
plt.scatter(points[0],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,150])
plt.show()
# +
kmeans = KMeans(n_clusters=4)
b=[[0,0]]
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points1=image1.getpixel(coordinate)
points2=image2.getpixel(coordinate)
points3=image3.getpixel(coordinate)
points4=image4.getpixel(coordinate)
b=np.insert(b,1,[points1[0],points1[2]],axis=0)
b=np.insert(b,1,[points2[0],points2[2]],axis=0)
b=np.insert(b,1,[points3[0],points3[2]],axis=0)
b=np.insert(b,1,[points4[0],points4[2]],axis=0)
kmeans.fit(b)
centroids=kmeans.cluster_centers_
labels=kmeans.labels_
for n in range(len(b)):
plt.plot(b[n][0],b[n][1],colors[labels[n]])
plt.scatter(centroids[:,0],centroids[:,1],marker="x",s=150)
plt.show
# -
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points=image1.getpixel (coordinate)
plt.scatter(points[1],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,170])
plt.show()
for i in range(width2):
for j in range(height2):
coordinate = x,y=i,j
points=image2.getpixel (coordinate)
plt.scatter(points[1],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,170])
plt.show()
for i in range(width3):
for j in range(height3):
coordinate = x,y=i,j
points=image3.getpixel (coordinate)
plt.scatter(points[1],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,170])
plt.show()
for i in range(width4):
for j in range(height4):
coordinate = x,y=i,j
points=image4.getpixel (coordinate)
plt.scatter(points[1],points[2])
axes = plt.gca()
axes.set_ylim([0,80])
axes.set_xlim([0,170])
plt.show()
# +
kmeans = KMeans(n_clusters=4)
c=[[0,0]]
for i in range(width1):
for j in range(height1):
coordinate = x,y=i,j
points1=image1.getpixel(coordinate)
points2=image2.getpixel(coordinate)
points3=image3.getpixel(coordinate)
points4=image4.getpixel(coordinate)
c=np.insert(c,1,[points1[0],points1[2]],axis=0)
c=np.insert(c,1,[points2[0],points2[2]],axis=0)
c=np.insert(c,1,[points3[0],points3[2]],axis=0)
c=np.insert(c,1,[points4[0],points4[2]],axis=0)
kmeans.fit(c)
centroids=kmeans.cluster_centers_
labels=kmeans.labels_
for n in range(len(c)):
plt.plot(c[n][0],c[n][1],colors[labels[n]])
plt.scatter(centroids[:,0],centroids[:,1],marker="x",s=150)
plt.show
|
maintest.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# # Exercícios
# Execute a célula a seguir para configurar a verificação de código, que verificará seu trabalho conforme você avança.
# Configurar verificação do código
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex2 import *
print("Setup Complete")
# ## Passo 1: Carregamento de Dados
# Leia o arquivo de dados de Iowa em um Pandas DataFrame chamado home_data.
# +
import pandas as pd
# Caminho do arquivo para ler.
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
# Preencha a linha abaixo para ler o arquivo em uma variável home_data
home_data = pd.read_csv(iowa_file_path)
# Ligue a linha abaixo sem nenhum argumento para verificar se você carregou os dados corretamente
step_1.check()
# -
# Lines below will give you a hint or solution code
step_1.hint()
step_1.solution()
|
kanggle/IntroMachineLearn/Exercices/exercise001.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
# -
import tensorflow as tf
import malaya_speech
import malaya_speech.train
from malaya_speech.train.model import revsic_multispeakerglowtts as glowtts
import numpy as np
# +
_pad = 'pad'
_start = 'start'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_rejected = '\'():;"'
MALAYA_SPEECH_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_punctuation) + list(_letters)
)
# -
speaker_model = malaya_speech.speaker_vector.deep_model('vggvox-v2')
stats = {
'male': np.load('/home/husein/speech-bahasa/male-stats-v3/stats.npy'),
'female': np.load('/home/husein/speech-bahasa/female-stats-v3/stats.npy'),
'husein': np.load('/home/husein/speech-bahasa/husein-stats-v3/stats.npy'),
'haqkiem': np.load('/home/husein/speech-bahasa/haqkiem-stats/stats.npy'),
}
mel_stats = np.load('universal-stats/stats.npy')
speaker_size = 512
input_ids = tf.placeholder(tf.int32, [None, None], name = 'input_ids')
lens = tf.placeholder(tf.int32, [None], name = 'lens')
mel_outputs = tf.placeholder(tf.float32, [None, None, 80])
mel_lengths = tf.placeholder(tf.int32, [None])
temperature = tf.placeholder(tf.float32, shape=(), name = 'temperature')
length_scale = tf.placeholder(tf.float32, shape=(), name = 'length_ratio')
speakers = tf.placeholder(tf.float32, [None, speaker_size], name = 'speakers')
speakers_right = tf.placeholder(tf.float32, [None, speaker_size], name = 'speakers_right')
config = glowtts.Config(mel = 80, vocabs = len(MALAYA_SPEECH_SYMBOLS), gin_channels=speaker_size)
config.temperature = temperature
config.length_scale = length_scale
config.norm_g = False
config.block_num = 8
config.flow_block_num = 16
config.channels = 256
config.block_ffn = config.channels * 4
model = glowtts.Model(config)
# +
instance = malaya_speech.train.model.revsic_multispeakerglowtts.flow.actnorm.ActNorm
for k in range(len(model.decoder.flows)):
if isinstance(model.decoder.flows[k], instance):
model.decoder.flows[k].init = 1
# -
loss, losses, attn = model.compute_loss(text = input_ids,
textlen = lens,
mel = mel_outputs, mellen = mel_lengths,
g = speakers)
loss, losses, attn
mel, mellen, attn_out = model(inputs = input_ids, lengths = lens, g = speakers, g_right = speakers_right)
mel, mellen, attn_out
mel = tf.identity(mel, name = 'mel_output')
attn_out = tf.identity(attn_out, name = 'alignment_histories')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
path = 'glowtts-multispeaker-scale'
ckpt_path = tf.train.latest_checkpoint(path)
ckpt_path
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
# +
import re
from unidecode import unidecode
import malaya
normalizer = malaya.normalize.normalizer(date = False, time = False)
pad_to = 2
def tts_encode(string: str, add_eos: bool = True):
r = [MALAYA_SPEECH_SYMBOLS.index(c) for c in string if c in MALAYA_SPEECH_SYMBOLS]
if add_eos:
r = r + [MALAYA_SPEECH_SYMBOLS.index('eos')]
return r
def put_spacing_num(string):
string = re.sub('[A-Za-z]+', lambda ele: ' ' + ele[0] + ' ', string)
return re.sub(r'[ ]+', ' ', string).strip()
def convert_to_ascii(string):
return unidecode(string)
def collapse_whitespace(string):
return re.sub(_whitespace_re, ' ', string)
def cleaning(string, normalize = True, add_eos = False):
sequence = []
string = convert_to_ascii(string)
if string[-1] in '-,':
string = string[:-1]
if string[-1] not in '.,?!':
string = string + '.'
string = string.replace('&', ' dan ')
string = string.replace(':', ',').replace(';', ',')
if normalize:
t = normalizer._tokenizer(string)
for i in range(len(t)):
if t[i] == '-':
t[i] = ','
string = ' '.join(t)
string = normalizer.normalize(string,
check_english = False,
normalize_entity = False,
normalize_text = False,
normalize_url = True,
normalize_email = True,
normalize_year = True)
string = string['normalize']
else:
string = string
string = put_spacing_num(string)
string = ''.join([c for c in string if c in MALAYA_SPEECH_SYMBOLS and c not in _rejected])
string = re.sub(r'[ ]+', ' ', string).strip()
string = string.lower()
ids = tts_encode(string, add_eos = add_eos)
text_input = np.array(ids)
num_pad = pad_to - ((len(text_input) + 2) % pad_to)
text_input = np.pad(
text_input, ((1, 1)), 'constant', constant_values = ((1, 2))
)
text_input = np.pad(
text_input, ((0, num_pad)), 'constant', constant_values = 0
)
return string, text_input
# -
import matplotlib.pyplot as plt
from glob import glob
files = glob('/home/husein/speech-bahasa/*/audios/*.npy')
len(files)
from collections import defaultdict
speakers_ = defaultdict(list)
for f in files:
speaker = f.split('/')[-3].replace('output-', '').split('-')[0]
speakers_[speaker].append(f)
speakers_.keys()
speakers_['haqkiem'][0]
wav = np.load(speakers_['female'][0])
audio_16k = malaya_speech.resample(wav, 22050, 16000)
v = speaker_model([audio_16k])
v.shape
y, _ = malaya_speech.load('mas-aisyah.wav')
v_right = speaker_model([y])
v_right.shape
t, ids = cleaning('PUTRAJAYA: Datuk Seri Ismail Sabri Yaakob menyerahkan kepada Presiden UMNO, Datuk Seri Dr <NAME>, untuk menyelesaikan isu pergolakan Kerajaan Melaka.')
t, ids
# +
# %%time
o = sess.run([mel, mellen, attn_out], feed_dict = {input_ids: [ids],
lens: [len(ids)],
temperature: 0.333, length_scale: 1.2,
speakers: v, speakers_right: v_right})
# -
o[0][:,:-].shape
mel_outputs_ = np.reshape(o[0][:,:-16], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title('Alignment steps')
im = ax.imshow(
o[-1][0].T,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.show()
# +
import pickle
with open('a.pkl', 'wb') as fopen:
pickle.dump([np.reshape(o[0][:,:-8], [-1, 80])], fopen)
# -
saver = tf.train.Saver()
saver.save(sess, 'glowtts-multispeaker-scale-output/model.ckpt')
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'gather' in n.op.lower()
or 'input_ids' in n.name
or 'lens' in n.name
or 'temperature' in n.name
or 'length_ratio' in n.name
or 'mel_output' in n.name
or 'alignment_histories' in n.name
or 'speakers' in n.name)
and 'adam' not in n.name
and 'global_step' not in n.name
and 'Assign' not in n.name
and 'AssignVariableOp' not in n.name
and 'ReadVariableOp' not in n.name
and 'Gather' not in n.name
and 'IsVariableInitialized' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('glowtts-multispeaker-scale-output', strings)
# +
import struct
unknown = b'\xff\xff\xff\xff'
def load_graph(frozen_graph_filename, return_def = False):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in range(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr:
del node.attr['use_locking']
elif node.op == 'AssignAdd':
node.op = 'Add'
if 'use_locking' in node.attr:
del node.attr['use_locking']
elif node.op in ['Assign', 'AssignVariableOp']:
if node.op == 'AssignVariableOp':
node.attr.setdefault('T')
node.attr['T'].type = node.attr['dtype'].type
del node.attr['dtype']
node.op = 'Identity'
if 'use_locking' in node.attr:
del node.attr['use_locking']
if 'validate_shape' in node.attr:
del node.attr['validate_shape']
if len(node.input) == 2:
node.input[0] = node.input[1]
del node.input[1]
elif node.op == 'Switch' and 'wave_net' in node.name and '/weight_normalization_' in node.name and 'AssignVariableOp_' in node.name:
node.attr['T'].type = 1
if return_def:
return graph_def
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
# -
g = load_graph('glowtts-multispeaker-scale-output/frozen_model.pb')
test_sess = tf.InteractiveSession(graph = g)
input_nodes = ['input_ids', 'lens', 'temperature', 'length_ratio', 'speakers', 'speakers_right']
inputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in input_nodes}
output_nodes = ['mel_output','alignment_histories']
outputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in output_nodes}
# +
# %%time
o = test_sess.run(outputs, feed_dict = {inputs['input_ids']: [ids],
inputs['lens']: [len(ids)],
inputs['temperature']: 0.3333,
inputs['length_ratio']: 1.0,
inputs['speakers']: v,
inputs['speakers_right']: v_right})
# -
mel_outputs_ = np.reshape(o['mel_output'][:,:-16], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title('Alignment steps')
im = ax.imshow(
o['alignment_histories'][0],
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.show()
from tensorflow.tools.graph_transforms import TransformGraph
transforms = ['add_default_attributes',
'remove_nodes(op=Identity, op=CheckNumerics)',
'fold_batch_norms',
'fold_old_batch_norms',
'quantize_weights(fallback_min=-1024, fallback_max=1024)',
'strip_unused_nodes',
'sort_by_execution_order']
pb = 'glowtts-multispeaker-scale-output/frozen_model.pb'
# +
input_graph_def = tf.GraphDef()
with tf.gfile.FastGFile(pb, 'rb') as f:
input_graph_def.ParseFromString(f.read())
transformed_graph_def = TransformGraph(input_graph_def,
input_nodes,
output_nodes, transforms)
with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
# -
g = load_graph('glowtts-multispeaker-scale-output/frozen_model.pb.quantized')
test_sess = tf.InteractiveSession(graph = g)
inputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in input_nodes}
outputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in output_nodes}
# +
# %%time
o = test_sess.run(outputs, feed_dict = {inputs['input_ids']: [ids],
inputs['lens']: [len(ids)],
inputs['temperature']: 0.3333,
inputs['length_ratio']: 1.0,
inputs['speakers']: v,
inputs['speakers_right']: v_right})
# -
mel_outputs_ = np.reshape(o['mel_output'][:,:-16], [-1, 80])
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-before-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
b2_application_key_id = os.environ['b2_application_key_id']
b2_application_key = os.environ['b2_application_key']
from b2sdk.v1 import *
info = InMemoryAccountInfo()
b2_api = B2Api(info)
application_key_id = b2_application_key_id
application_key = b2_application_key
b2_api.authorize_account("production", application_key_id, application_key)
file_info = {'how': 'good-file'}
b2_bucket = b2_api.get_bucket_by_name('malaya-speech-model')
file = 'glowtts-multispeaker-scale-output/frozen_model.pb'
outPutname = 'v2/tts/glowtts-multispeaker.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'glowtts-multispeaker-scale-output/frozen_model.pb.quantized'
outPutname = 'v2/tts/glowtts-multispeaker.pb.quantized'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
# !tar -zcvf glowtts-multispeaker-scale-output.tar.gz glowtts-multispeaker-scale-output
file = 'glowtts-multispeaker-scale-output.tar.gz'
outPutname = 'pretrained/glowtts-multispeaker-scale-output.tar.gz'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
|
pretrained-model/tts/glowtts/export/glowtts-multispeaker.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.10 64-bit
# name: python3
# ---
import json
json_dir = "/home/samuelkim/.kaggle/data/sartorius/json_kaggle"
# +
# '['a172', 'bt474', 'bv2', 'huh7', 'mcf7', 'shsy5y', 'skbr3', 'skov3']'
# '['shsy5y', 'astro', 'cort']'
def convert_label(anno_dict):
label_map ={"a172": "astro", # 0->2
"b5474": "cort", #1->3
"bv2": "cort", #2->3
"huh7": "astro", #3->2
"mcf7": "astro", #4->2
"shsy5y": "shsy5y", #5->1
"skbr3": "shsy5y", #6->1
"skov3": "shsy5y"} #7->1
inv_id_map = {0: 2,
1: 3,
2: 3,
3: 2,
4: 2,
5: 1,
6: 1,
7: 1}
id_map = {"shsy5y": 1,"astro": 2,"cort": 3}
categories = [{'name': 'shsy5y', 'id': 1},{'name': 'astro', 'id': 2},{'name': 'cort', 'id': 3}]
for i in range(len(anno_dict["annotations"])):
prev_id = anno_dict["annotations"][i]["category_id"]
new_id = inv_id_map[prev_id]
anno_dict["annotations"][i]["category_id"] = new_id
anno_dict["categories"] = categories
return anno_dict
# +
# Curate Train annotations
with open(f'{json_dir}/livecell_annotations_train.json', "r") as f:
train_anno = json.load(f)
train_anno = convert_label(train_anno)
with open(f'{json_dir}/livecell_annotations_train_curated.json', "w") as f:
json.dump(train_anno, f)
# Curate Valid annotations
with open(f'{json_dir}/livecell_annotations_val.json', "r") as f:
valid_anno = json.load(f)
valid_anno = convert_label(valid_anno)
with open(f'{json_dir}/livecell_annotations_val_curated.json', "w") as f:
json.dump(valid_anno, f)
# Curate Test annotations
with open(f'{json_dir}/livecell_annotations_test.json', "r") as f:
test_anno = json.load(f)
test_anno = convert_label(test_anno)
with open(f'{json_dir}/livecell_annotations_test_curated.json', "w") as f:
json.dump(test_anno, f)
# -
|
create_datasets/revise_json_livecell.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dictionary
# Python dictionary is an unordered collection of items. While other compound data types have only value as an element, a dictionary has a key: value pair.
# # Dict Creation
# +
#empty dictionary
my_dict = {}
#dictionary with integer keys
my_dict = {1: 'abc', 2: 'xyz'}
print(my_dict)
#dictionary with mixed keys
my_dict = {'name': 'satish', 1: ['abc', 'xyz']}
print(my_dict)
#create empty dictionary using dict()
my_dict = dict()
my_dict = dict([(1, 'abc'), (2, 'xyz')]) #create a dict with list of tuples
print(my_dict)
# -
# # Dict Access
# +
my_dict = {'name': 'satish', 'age': 27, 'address': 'guntur'}
#get name
print(my_dict['name'])
# -
#if key is not present it gives KeyError
print(my_dict['degree'])
#another way of accessing key
print(my_dict.get('address'))
#if key is not present it will give None using get method
print(my_dict.get('degree'))
# # Dict Add or Modify Elements
# +
my_dict = {'name': 'satish', 'age': 27, 'address': 'guntur'}
#update name
my_dict['name'] = 'raju'
print(my_dict)
# +
#add new key
my_dict['degree'] = 'M.Tech'
print(my_dict)
# -
# # Dict Delete or Remove Element
# +
#create a dictionary
my_dict = {'name': 'satish', 'age': 27, 'address': 'guntur'}
#remove a particular item
print(my_dict.pop('age'))
print(my_dict)
# +
my_dict = {'name': 'satish', 'age': 27, 'address': 'guntur'}
#remove an arbitarty key
my_dict.popitem()
print(my_dict)
# +
squares = {2: 4, 3: 9, 4: 16, 5: 25}
#delete particular key
del squares[2]
print(squares)
# +
#remove all items
squares.clear()
print(squares)
# +
squares = {2: 4, 3: 9, 4: 16, 5: 25}
#delete dictionary itself
del squares
print(squares) #NameError because dict is deleted
# -
# # Dictionary Methods
# +
squares = {2: 4, 3: 9, 4: 16, 5: 25}
my_dict = squares.copy()
print(my_dict)
# -
#fromkeys[seq[, v]] -> Return a new dictionary with keys from seq and value equal to v (defaults to None).
subjects = {}.fromkeys(['Math', 'English', 'Hindi'], 0)
print(subjects)
subjects = {2:4, 3:9, 4:16, 5:25}
print(subjects.items()) #return a new view of the dictionary items (key, value)
subjects = {2:4, 3:9, 4:16, 5:25}
print(subjects.keys()) #return a new view of the dictionary keys
subjects = {2:4, 3:9, 4:16, 5:25}
print(subjects.values()) #return a new view of the dictionary values
#get list of all available methods and attributes of dictionary
d = {}
print(dir(d))
# # Dict Comprehension
# +
#Dict comprehensions are just like list comprehensions but for dictionaries
d = {'a': 1, 'b': 2, 'c': 3}
for pair in d.items():
print(pair)
# -
#Creating a new dictionary with only pairs where the value is larger than 2
d = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
new_dict = {k:v for k, v in d.items() if v > 2}
print(new_dict)
#We can also perform operations on the key value pairs
d = {'a':1,'b':2,'c':3,'d':4,'e':5}
d = {k + 'c':v * 2 for k, v in d.items() if v > 2}
print(d)
|
Python Data Structure/3.5 Dictonary.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import time
import matplotlib
matplotlib.use('TkAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import numpy as np
import scipy.integrate as ing
#Circle trajectory adjustments:
xcirc=3950 #xcenter
ycirc=14550 #ycenter
radc=3000 #radius center
xstart=0. #start x val
xend=10000. #ending x val
ystart=ycirc - radc
nc=1000 #data points in circle
#get circl points starting at bottom
def circlepts(xc,yc,r,frac):
yret=r*np.sin((frac-0.25)*2*np.pi)+yc
xret=r*np.cos((frac-0.25)*2*np.pi)+xc
return (xret, yret)
xpts = np.empty(nc)
ypts = np.empty(nc)
for i in range(0,nc):
xpts[i], ypts[i] = circlepts(xcirc,ycirc,radc,float(i)/float(nc))
xlin1= np.empty(nc/10)
ylin1= np.empty(nc/10)
xlin2= np.empty(nc/10)
ylin2= np.empty(nc/10)
delx=float(xcirc-xstart)/float(nc/10)
delx2=float(xend-xcirc)/float(nc/10)
for i in range(0,nc/10):
xlin1[i]=xstart + i*delx
ylin1[i]=ystart
xlin2[i]=xcirc + (i+1)*delx2
ylin2[i]=ystart
xtraj=np.concatenate((xlin1,xpts,xlin2))
ytraj=np.concatenate((ylin1,ypts,ylin2))
plt.plot(xtraj,ytraj)
plt.axis("equal")
plt.show()
# +
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
def animate():
tstart = time.time() # for profiling
data=np.random.randn(10,10)
im=plt.imshow(data)
for i in np.arange(1,200):
data=np.random.randn(10,10)
im.set_data(data)
fig.canvas.draw() # redraw the canvas
print 'FPS:' , 200/(time.time()-tstart)
win = fig.canvas.manager.window
fig.canvas.manager.window.after(100, animate)
plt.show()
# -
|
testcode/smartLoop/circletraj-Copy1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
df=pd.DataFrame({"y1":[5,4],"y2":[10,11]}, index=["flat1","flat2"])
plt.hlines("flat1",5,10)
plt.hlines("flat2",4,11)
plt.hlines("flat3",8,11)
# +
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks")
# Initialize the figure with a logarithmic x axis
f, ax = plt.subplots(figsize=(7, 6))
ax.set_xscale("log")
# Load the example planets dataset
planets = sns.load_dataset("planets")
# Plot the orbital period with horizontal boxes
sns.boxplot(x="distance", y="method", data=planets,
whis="range", palette="vlag")
# Add in points to show each observation
sns.swarmplot(x="distance", y="method", data=planets,
size=2, color=".3", linewidth=0)
# Tweak the visual presentation
ax.xaxis.grid(True)
ax.set(ylabel="")
sns.despine(trim=True, left=True)
# -
planets
|
ipynb/matplot divakar.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The world of developers
# ## Section 1: Business Understanding
# Stackoverflow’s 2017 Annual Developer Survey aims to understand various aspects of jobs that relate to data analysis and software development. It has around 64,000 records spread over about 213 countries and regions.
#
# Based on the survey, we want to predict job satisfaction through a sample, in addition to answering the following business questions:
# #### Question 1: How is job satisfaction related to earned salary and hours worked per week for professional developers?
# #### Question 2: On average, how much do professional developers earn and what size of company do they work most?
# #### Question 3: On average, how is job satisfaction and salary according to the importance of each person’s education?
# #### Question 4: How is the job satisfaction ratio for professional developers who can work at home?
# ### Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# %matplotlib inline
# ## Section 2: Data Understanding
# ### Import Data - Stack Overflow Data - 2017 Survey
df = pd.read_csv('C://Users//<NAME> S//Desktop//survey_results_public.csv')
df.head()
# +
## To know the number of records that the base has
df.shape[0]
# -
# Base for modeling
df = df[['CareerSatisfaction', 'JobSatisfaction', 'HoursPerWeek', 'StackOverflowSatisfaction', 'Salary',
'ExpectedSalary', 'Professional', 'University', 'FormalEducation', 'EmploymentStatus', 'DiversityImportant',
'HomeRemote', 'EducationImportant', 'CompanySize']]
# +
# Describe numerical variables
df.describe()
# +
## Histogram to analize numerical variables
df.hist()
# -
# ## Section 3: Prepare Data
# +
# Missing values of the target variable are cleared because null variables cannot be predicted
df = df.dropna(axis = 0, subset = ['JobSatisfaction'])
df.shape[0]
# +
# Fill numeric columns with the mean, to ensure consistency of the data and that model works well. Data with missing values
# can't be processing by machine learning algorithns, so is necessary to replace missing values by a approximation. In this
# case I used the mean because is the best approximation.
def add_mean(df):
'''
INPUT:
df - Sample of the survey data
OUTPUT:
df - Sample survey data with mean instead of missing values
'''
vn = df.select_dtypes(include = ['float', 'int']).columns
for i in vn:
df[i].fillna((df[i].mean()), inplace = True)
return df
# +
# The function add_mean repalce missing values by mean
df = add_mean(df)
# +
# Dummy the categorical variables, so that categorical variables can be consumed by the model
# Matrix with categorical variables can't be processed by certain algorithns as lineal regression, so is necessary to encode
# the categorical variables to dummies
def dummies(df):
'''
INPUT:
df - Sample of the survey data
OUTPUT:
df - Sample survey data with with dymmies of categorical variables instead of categorical variables
'''
vc = df.select_dtypes(include = ['object']).copy().columns
for ct in vc:
df = pd.concat([df.drop(ct, axis = 1), pd.get_dummies(df[ct], prefix = ct, prefix_sep = '_', drop_first = True)], axis = 1)
return df
# +
# The function dummies replace categorical vairbles to dummies for each of them
df = dummies(df)
# -
# ## Section 4: Modeling
# +
# Split into explanatory and response variables
def split_data(df):
'''
INPUT:
df - Sample of the survey data
OUTPUT:
X - Explanatory variables
Y - Target Variable
'''
Y = df['JobSatisfaction']
X = df.drop(['JobSatisfaction'], axis = 1)
return X, Y
# +
# Function split_data split the data in explanatory and target variable
X, Y = split_data(df)
# +
# Split into train and test - Fit model
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.3, random_state = 48)
mco_model = LinearRegression(normalize=True)
mco_model.fit(X_Train, Y_Train)
# -
# ## Section 5: Evaluation
# +
# Score using the model
pred_test = mco_model.predict(X_Test)
r2 = r2_score(Y_Test, pred_test)
r2
# +
# Is overfitting generated?
pred_train = mco_model.predict(X_Train)
test_sc = r2_score(Y_Test, pred_test)
train_sc = r2_score(Y_Train, pred_train)
train_sc
# -
# Overfitting is not generated because the metrics evaluated in train and test give similar.
# ## Section 6: Evaluate the Results
# ### 1. How does job satisfaction relate to earned salary and hours worked per week for professional developers?
# +
## A correlation matrix is done
df[df['Professional'] == 'Professional developer'][['JobSatisfaction', 'Salary', 'HoursPerWeek']].corr()
# +
# A Heat Map is done to analize the correlation between interest variables
sns.heatmap(df[df['Professional'] == 'Professional developer'][['JobSatisfaction', 'Salary', 'HoursPerWeek']].corr(),
annot = True)
# -
# According to the previous results for the year 2017, the job satisfaction of professional developers would have a positive relationship with salary, which means that with a salary increase there would eventually be greater job satisfaction. However, as expected in the face of an increase in working hours, job satisfaction would tend to decrease, this means that they have a negative correlation.
# ### 2. On average, how much do professional developers earn and what size of company do they work most?
# +
# Only preffesional developer
PD = pd.DataFrame(df[df['Professional'] == 'Professional developer'][['Professional','Salary', 'CompanySize']])
PD = PD.dropna(axis = 0, subset = ['Salary'], how = 'all')
PD['Salary'].mean()
# +
# Histogram is done to analize the wage distribution
plt.hist(PD['Salary'], color = 'purple', alpha = 0.5)
plt.title('Wage Distribution')
# +
# Proportion of proffesional developer workers by size company
((PD['CompanySize'].value_counts())/PD.shape[0]).plot(kind='bar', color= 'red',
title='Proportion of proffesional developer workers by size company',
alpha = 0.7)
# -
# Professional developers have an average salary of 56,298, and around 43% are employed in companies with fewer than 500 employees.
# ### 3. On average, how is job satisfaction and salary according to the importance of each person's education?
# +
# Filter Data Frame
IE = pd.DataFrame(df[['JobSatisfaction', 'EducationImportant', 'Salary']])
# +
# Comparison in a table
A = IE.groupby(['EducationImportant']).mean()['JobSatisfaction'].reset_index()
B = IE.groupby(['EducationImportant']).mean()['Salary'].reset_index()
C = A.merge(B, how = 'left', left_on = 'EducationImportant', right_on = 'EducationImportant')
C
# -
# On average, people who place the highest importance on education have a higher salary and greater job satisfaction than people who place less importance on education.
# ### 4. How is the job satisfaction ratio for professional developers who can work at home?
# +
# New filter data frame
HO = df[df['Professional'] == 'Professional developer'][['JobSatisfaction', 'HomeRemote']]
# -
HO.groupby(['HomeRemote']).mean()['JobSatisfaction'].plot(kind='bar', color= 'grey',
title='Job satisfaction by type of job',
alpha = 0.7)
# According to the previous graph, for professional developers there is greater job satisfaction if they always or almost always work remotely at home.
|
Proyecto.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # 2D free support Wasserstein barycenters of distributions
#
#
# Illustration of 2D Wasserstein barycenters if discributions that are weighted
# sum of diracs.
#
#
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
# -
# Generate data
# -------------
# %% parameters and data generation
#
#
# +
N = 3
d = 2
measures_locations = []
measures_weights = []
for i in range(N):
n_i = np.random.randint(low=1, high=20) # nb samples
mu_i = np.random.normal(0., 4., (d,)) # Gaussian mean
A_i = np.random.rand(d, d)
cov_i = np.dot(A_i, A_i.transpose()) # Gaussian covariance matrix
x_i = ot.datasets.make_2D_samples_gauss(n_i, mu_i, cov_i) # Dirac locations
b_i = np.random.uniform(0., 1., (n_i,))
b_i = b_i / np.sum(b_i) # Dirac weights
measures_locations.append(x_i)
measures_weights.append(b_i)
# -
# Compute free support barycenter
# -------------
#
#
# +
k = 10 # number of Diracs of the barycenter
X_init = np.random.normal(0., 1., (k, d)) # initial Dirac locations
b = np.ones((k,)) / k # weights of the barycenter (it will not be optimized, only the locations are optimized)
X = ot.lp.free_support_barycenter(measures_locations, measures_weights, X_init, b)
# -
# Plot data
# ---------
#
#
pl.figure(1)
for (x_i, b_i) in zip(measures_locations, measures_weights):
color = np.random.randint(low=1, high=10 * N)
pl.scatter(x_i[:, 0], x_i[:, 1], s=b * 1000, label='input measure')
pl.scatter(X[:, 0], X[:, 1], s=b * 1000, c='black', marker='^', label='2-Wasserstein barycenter')
pl.title('Data measures and their barycenter')
pl.legend(loc=0)
pl.show()
|
docs/source/auto_examples/plot_free_support_barycenter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv('data/mnist_train.csv')
train.head(5)
# ## saveing the label
label = train["label"]
label
#
data = train.drop("label", axis=1)
data
print(train.shape)
print(label.shape)
plt.figure(figsize=(8,8))
x = 170
grid_data = data.iloc[x].values.reshape(28,28)
plt.imshow(grid_data,interpolation="none", cmap="gray")
plt.show()
print(label[x])
# ### getting 15k data points to work on it becouse of time effeciency
labels = label.head(15000)
datas = data.head(15000)
print(labels.shape)
print(datas.shape)
# ## Step1
# Data preprocessing: Standardizing the data
from sklearn.preprocessing import StandardScaler
stand_data = StandardScaler().fit_transform(datas)
print(stand_data.shape)
# findig the covariance matrix : A^T * A
sample_data = stand_data
covar_matrix = np.matmul(sample_data.T, sample_data)
print(covar_matrix.shape)
# +
from scipy.linalg import eigh
values, vectors = eigh(covar_matrix, eigvals=(782,783))
print(values)
print(vectors.shape)
vectors = vectors.T
print(vectors.shape)
# -
new_coordinates = np.matmul(vectors, sample_data.T)
print(new_coordinates.shape)
print(labels.shape)
new_coordinates = np.vstack((new_coordinates,labels)).T
#print(new_coordinates.shape)
dataFrame = pd.DataFrame(data=new_coordinates, columns=("1st-principal", "2nd-principal", "labels"))
print(dataFrame.head())
sns.set_style("whitegrid")
sns.FacetGrid(dataFrame, hue="labels", height=6).map(plt.scatter, "1st-principal", "2nd-principal").add_legend()
plt.show()
# # PCA Using Scikit-Learn
from sklearn import decomposition
pca = decomposition.PCA()
pca.n_components = 2
pca_data = pca.fit_transform(sample_data)
pca_data.shape
# +
pca_data = np.vstack((pca_data.T, labels)).T
pca_pd = dataFrame = pd.DataFrame(data=pca_data, columns=("1st-principal", "2nd-principal", "labels"))
print(pca_pd.shape)
# -
sns.FacetGrid(pca_pd, hue="labels", height=6).map(plt.scatter, "1st-principal", "2nd-principal").add_legend()
plt.show()
sns.pairplot(pca_pd, hue="labels")
# ## PCA for Dimensionality reduction ( Not For Vizuliztion)
# +
pca.n_components = 784
pca_data = pca.fit_transform(sample_data)
perc_var_explin = pca.explained_variance_ / np.sum(pca.explained_variance_)
cum_var_expla = np.cumsum(perc_var_explin)
plt.figure(1, figsize=(6,4))
plt.clf()
plt.plot(cum_var_expla, linewidth=2)
plt.axis('tight')
plt.grid()
plt.xlabel('n_components')
plt.ylabel('cum_exp_var')
plt.show()
# -
|
Random Projects/PCA/PCA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Prepare Data and Evaluate the Results
#
# Now we have had a closer look at the data, we need to clean it, and create functions to get data by ethnicity or gender.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
df18 = pd.read_csv('./survey_results_public18.csv', low_memory=False)
df18.head()
# -
df19 = pd.read_csv('./survey_results_public19.csv', low_memory=False)
df19.head()
df20 = pd.read_csv('./survey_results_public20.csv', low_memory=False)
df20.head()
# First we will be handling just with respondents of US, that answer about ethnicity, gender, employment and job seeking status. Job satisfaction and annual salary cannot be used as a mandatory field, because who is not working, probably not answer this fields.
# +
sample18 = df18.query('Country == "United States" & RaceEthnicity == RaceEthnicity & Gender == Gender & JobSearchStatus == JobSearchStatus & Employment == Employment')
sample19 = df19.query('Country == "United States" & Ethnicity == Ethnicity & Gender == Gender & JobSeek == JobSeek & Employment == Employment')
sample20 = df20.query('Country == "United States" & Ethnicity == Ethnicity & Gender == Gender & JobSeek == JobSeek & Employment == Employment')
print('Total clean sample 2018: %s'%sample18.shape[0])
print('Total clean sample 2019: %s'%sample19.shape[0])
print('Total clean sample 2020: %s'%sample20.shape[0])
# -
# Below, we have the function used in data understanding to extract the list of possible values used in a column.
def extract_feature_list(column):
'''
INPUT
column - a dataframe series
OUTPUT
return - a list of possible values to a given column
'''
attr_set = set()
for row in column:
if type(row) == str:
attr_itens = row.split(';')
for item in attr_itens:
# Removing special characters which cause error on filter df
scaped_item = item.replace('(', '\(').replace(')', '\)')
attr_set.add(scaped_item)
return list(attr_set)
# ### Question 1: How diverse are tech workforce in the US?
# In order to understand more about how diversy are the US tech workforce, it's necessary understand distribution by ethnicity and gender.
# All questions we have, will be answered from ***ethnicity*** or ***gender***. To do this more easily, we can adapt the function created in data understanding (distribution_by_feature), to always provide a filtered dataset by gender or ethnicity and pass this dataset to a callback function to do some extraction, preparing the data to evaluate the results.
def findings_by_feature(years_df, feature, years_feature, label, func):
'''
INPUT
years_df - a dict with dataframes for each one of the study years;
feature - a dict with strings representing the feature name of column study for each one of
the study years;
Example: 2018 dataset use JobSatisfaction, whereas 2019 and 2020 dataset use JobSat
years_feature - a dict with each possible values for a given column setted above;
* The values can assume differente string to say the same think accros the years;
label - a string representing name used to group the "feature";
func - a callback function to return the value represented by some calculation made in this function;
OUTPUT
return - a dataframe containing values represented some statistic calculated in "func" by each possible
values setted in years_feature;
'''
years = list(years_df.keys())
minorities = years_feature.keys()
data = []
for item in minorities:
row = []
for year in years:
num = np.nan
df = years_df[year]
feature_name = feature[year]
if years_feature[item][year]:
num = func(df[df[feature_name].str.contains(years_feature[item][year])], year, feature_name)
row.append(num)
last_year = years[-1]
row.insert(0, years_feature[item][last_year])
data.append(row)
years.insert(0, label)
new_df = pd.DataFrame(data, columns=years)
new_df = new_df.set_index(label)
new_df.sort_values(by=years[-1], ascending=False)
return new_df
# Now let's cerate the callback function to get the distribution by ethnicity and gender.
def get_distribution_by_years(df, year, feature):
'''
Callback function to findings_by_minority
INPUT
df - a dataframe of specific year, result of a previous query
year - year of the dataframe
minority - type of minority (Ethnicity | Gender)
OUTPUT
return - proportion of a specific group (Ethnicity | Gender), in the US for a specific year
'''
query_by_country = 'Country == "United States"'
if year == 2018:
df_18 = df18.query(query_by_country)
return df.shape[0]/sample18[feature].count()
elif year == 2019:
df_19 = df19.query(query_by_country)
return df.shape[0]/sample19[feature].count()
elif year == 2020:
df_20 = df20.query(query_by_country)
return df.shape[0]/sample20[feature].count()
return np.nan
# +
df_list = {2018: sample18, 2019: sample19, 2020: sample20}
feature = {2018: 'RaceEthnicity', 2019: 'Ethnicity', 2020: 'Ethnicity'}
ethnicity_list = {
'White': {2018: 'White or of European descent', 2019: 'White or of European descent', 2020: 'White or of European descent'},
'South': {2018: 'South Asian', 2019: 'South Asian', 2020: 'South Asian'},
'East': {2018: 'East Asian', 2019: 'East Asian', 2020: 'East Asian'},
'Southeast': {2018: None, 2019: None, 2020: 'Southeast Asian'},
'Middle': {2018: 'Middle Eastern', 2019: 'Middle Eastern', 2020: 'Middle Eastern'},
'Hispanic': {2018: 'Hispanic or Latino/Latina', 2019: 'Hispanic or Latino/Latina', 2020: 'Hispanic or Latino/a/x'},
'Black': {2018: 'Black or of African descent', 2019: 'Black or of African descent', 2020: 'Black or of African descent'},
'Multiracial': {2018: None, 2019: 'Multiracial', 2020: 'Multiracial'},
'Biracial': {2018: None, 2019: 'Biracial', 2020: 'Biracial'},
'Native': {2018: 'Native American, Pacific Islander, or Indigenous Australian', 2019: 'Native American, Pacific Islander, or Indigenous Australian', 2020: 'Indigenous \(such as Native American, Pacific Islander, or Indigenous Australian\)'}
}
ethnicities = findings_by_feature(df_list, feature, ethnicity_list, 'Ethnicity', get_distribution_by_years)
ethnicities.plot(kind='bar', title='Distribution by ethnicity')
# -
ethnicities
# What was the increase or decrease comparing 2020 to 2018 in ethnicity?
(ethnicities[2020]-ethnicities[2018])*100
asians = ethnicities.loc['South Asian'][2020]+ethnicities.loc['East Asian'][2020]+ethnicities.loc['Southeast Asian'][2020]
print('Total Asian proportion in 2020: %.4f'%asians)
multi = ethnicities.loc['Multiracial'][2020]+ethnicities.loc['Biracial'][2020]
print('Total Multiracial proportion in 2020: %.4f'%multi)
# The [2019 US Census](https://www.census.gov/quickfacts/fact/dashboard/US/SEX255219#SEX255219) detected 60.1% of Non-Hispanic white people, but they were overrepresented in the tech workforce in 2020 with 85.8%, losing just 1.7 percentual points since 2018.
#
# As well as white developers, Asian ones are overrepresented too. The Stackoverflow’s survey divides Asians in three different ethnicities being East Asian, South Asian and Southeast Asian, together in the US they sum 9.7% workforce representation in 2020. It’s little less than double identified by the 2019 US Census.
#
# Hispanics and Latinos have increased almost one percentual point since 2018, reaching 6.3% representation in 2020 US tech workforce, which represents little more than a third of the value identified by the 2019 US Census.
#
# Middle eastern and Black or African descent had the lowest increase between 2018 to 2020, achieving 1.7% and 2.5% respectively of the workforce. Despite the US Census accounts 13.4% of the population being Black or African descent and Middle eastern was not covered by the 2019 US Census.
#
# Multiracial and Native American seem to have the most proportional representation in the US tech workforce. Stackoverflow surveys started counting Biracial and Multiracial just in 2019 separately, and in 2020 together they achieved 3.5% of the developers representation, against 2.8% of Multiracial detected by the US Census.
#
# Native American ethnicity is divided by two in the 2019 US Census as Native Hawaiian and American Indian, together they sum 1.5% of US population representation. Curiously, they had the same representation in 2018 US tech workforce, losing 0.19% of representation in the two subsequent years.
# How are the tech workforce distributed by gender in the last years?
# +
feature = {2018: 'Gender', 2019: 'Gender', 2020: 'Gender'}
gender_list = {
'Man': {2018: 'Male', 2019: 'Man', 2020: 'Man'},
'Woman': {2018: 'Female', 2019: 'Woman', 2020: 'Woman'},
'Non-binary': {2018: 'Non-binary, genderqueer, or gender non-conforming', 2019: 'Non-binary, genderqueer, or gender non-conforming', 2020: 'Non-binary, genderqueer, or gender non-conforming'},
'Transgender': {2018: 'Transgender', 2019: None, 2020: None}
}
genres = findings_by_feature(df_list, feature, gender_list, 'Gender', get_distribution_by_years)
genres
# -
genres = genres.rename(index={None: 'Transgender'})
genres.plot(kind='bar', title='Distribution by gender')
genres
# What was the increase or decrease comparing 2020 to 2018 in gender?
(genres[2020]-genres[2018])*100
# According to the 2019 US Census, it’s possible to assume that minorities in the US tech workforce are ***Hispanics or Latinos***, ***Middle eastern*** and ***Black or African descent*** by ethnicity and ***Women*** and ***Non-binary or non-conforming*** by gender.
#
# This study will not consider ***Native American*** as minority in this case, to be very close to representation
# ### Question 2: How satisfied are minorities among their peers?
# According to the above data crossing, it’s possible to assume that minorities in the US tech workforce are Hispanics or Latinos, Middle eastern and Black or African descent by ethny and Women and Non-binary or non-conforming by gender.
#
# The 2018 Stackoverflow's survey used 7 scales of “Job Satisfaction”, whereas in the following years they used 5 scales.
#
# How satisfied are minorities ethnicities among their peers?
# To answer this question we need to create a function to crossing ethnicity and gender with the scales of job satisfaction. How 2018 have 7 scales, we will use just 2019 and 2020 dataset samples.
def data_crossing(df, factor1, factor_list, factor2):
'''
INPUT
df - a dataframe representing the result of a specific query;
factor1 - a string representing the study column name;
factor_list - a list with each possible value of study for a given column setted above;
factor2 - a string representing the second study colum name;
of absolute numbers; (true by default)
OUTPUT
return - a dataframe containing values for a given data crossing;
Example: Ethnicities X Job Satisfaction
'''
miss_data_label = 'Not answered'
columns = extract_feature_list(df[factor2])
columns.append(miss_data_label)
data = []
for item in factor_list:
minotity_df = df[df[factor1].str.contains(item)]
sat_df = (minotity_df[factor2].value_counts()/minotity_df.shape[0]).reset_index()
sat_df.set_index('index', inplace=True)
sat_df.loc[miss_data_label] = [(minotity_df.shape[0]-minotity_df[factor2].count())/minotity_df.shape[0]]
row = [item]
for column in columns:
row.append(float(sat_df.loc[column]))
data.append(row)
columns.insert(0, factor1)
new_df = pd.DataFrame(data, columns=columns)
new_df.set_index(factor1, inplace=True)
return new_df
# To evaluate Job Satisfaction, we need to ensure we'll use just respondents who is currently working.
# +
employment_query = 'Employment == "Employed full-time" | Employment == "Employed part-time" | Employment == "Independent contractor, freelancer, or self-employed"'
employed_sample19 = sample19.query(employment_query)
employed_sample20 = sample20.query(employment_query)
print('Proportion respondents working in 2019: %.4f'%(employed_sample19.shape[0]/sample19.shape[0]))
print('Proportion respondents working in 2020: %.4f'%(employed_sample20.shape[0]/sample20.shape[0]))
# +
#Job satisfaction by minorities ethnicities 2019
ethnicity_list19 = ['White or of European descent', 'Hispanic or Latino/Latina', 'Middle Eastern', 'Black or of African descent']
minority_ethny19 = data_crossing(employed_sample19, 'Ethnicity', ethnicity_list19, 'JobSat')
minority_ethny19 = minority_ethny19[['Very satisfied', 'Slightly satisfied', 'Neither satisfied nor dissatisfied', 'Slightly dissatisfied', 'Very dissatisfied', 'Not answered']]
minority_ethny19
# -
def extract_to_csv(df):
txt = df.to_csv()
txt = txt.replace(',', ';').replace('.', ',')
arr = txt.split('\n')
for item in arr:
print(item)
extract_to_csv(minority_ethny19)
# In the column of ***Very satisfied*** we can see a great difference between ethnicities. How much is this difference?
for ethny in ethnicity_list19:
diff = minority_ethny19.loc['White or of European descent']['Very satisfied'] - minority_ethny19.loc[ethny]['Very satisfied']
print('Difference between White or of European descent and {}: {}'.format(ethny, diff))
# +
#Job satisfaction by minorities ethnicities 2020
ethnicity_list20 = ['White or of European descent', 'Hispanic or Latino/a/x', 'Middle Eastern', 'Black or of African descent']
minority_ethny20 = data_crossing(employed_sample20, 'Ethnicity', ethnicity_list20, 'JobSat')
minority_ethny20 = minority_ethny20[['Very satisfied', 'Slightly satisfied', 'Neither satisfied nor dissatisfied', 'Slightly dissatisfied', 'Very dissatisfied', 'Not answered']]
minority_ethny20
# -
extract_to_csv(minority_ethny20)
# The difference seems to decrease in 2020.
for ethny in ethnicity_list20:
diff = minority_ethny20.loc['White or of European descent']['Very satisfied'] - minority_ethny20.loc[ethny]['Very satisfied']
print('Difference between White or of European descent and {}: {}'.format(ethny, diff))
# What is the difference to 2019 to 2020 in ***Very satisfied*** column?
minority_ethny19 = minority_ethny19.rename(index={'Hispanic or Latino/Latina': 'Hispanic or Latino/a/x'})
(minority_ethny20['Very satisfied']-minority_ethny19['Very satisfied'])*100
# What is the job satisfaction distribution by each gender in 2019 and 2020?
# +
#Job satisfaction by minorities genders 2019
gender_list19 = extract_feature_list(df19['Gender'])
minority_gender19 = data_crossing(employed_sample19, 'Gender', gender_list19, 'JobSat')
minority_gender19 = minority_gender19[['Very satisfied', 'Slightly satisfied', 'Neither satisfied nor dissatisfied', 'Slightly dissatisfied', 'Very dissatisfied', 'Not answered']]
minority_gender19
# -
extract_to_csv(minority_gender19)
# The same difference can be noted in Very satisfied column.
for gender in gender_list19:
diff = minority_gender19.loc['Woman']['Very satisfied'] - minority_gender19.loc[gender]['Very satisfied']
print('Difference between Woman and {}: {}'.format(gender, diff))
# +
#Job satisfaction by minorities genders 2020
gender_list20 = extract_feature_list(df20['Gender'])
minority_gender20 = data_crossing(employed_sample20, 'Gender', gender_list20, 'JobSat')
minority_gender20 = minority_gender20[['Very satisfied', 'Slightly satisfied', 'Neither satisfied nor dissatisfied', 'Slightly dissatisfied', 'Very dissatisfied', 'Not answered']]
minority_gender20
# -
extract_to_csv(minority_gender20)
for gender in gender_list20:
diff = minority_gender20.loc['Man']['Very satisfied'] - minority_gender20.loc[gender]['Very satisfied']
print('Difference between Man and {}: {}'.format(gender, diff))
# What is the difference to 2019 to 2020 in Very satisfied column for gender?
(minority_gender20['Very satisfied']-minority_gender19['Very satisfied'])*100
# Stackoverflow asked developers what their job seeking status was in 2019 and 2020. So, it’s possible to find some false negatives, i.e, developers who said they were ***very satisfied*** with their jobs and ***were actively looking for a job***. So, let's create a callback function to extract these false positives to gender and ethnicity.
def get_false_positive(df, year, feature):
jobseek_query = 'JobSeek == "I am actively looking for a job"'
false_positive = df.query(jobseek_query).shape[0]
return false_positive/df.shape[0]
# Extracting the false positives related to respondents who felt very satisfied and were actively looking for a new job by ethnicity.
# +
satisfaction_query = 'JobSat == "Very satisfied"'
df_list = {2019: employed_sample19.query(satisfaction_query), 2020: employed_sample20.query(satisfaction_query)}
feature = {2019: 'Ethnicity', 2020: 'Ethnicity'}
minority_ethny_list = {
'White': {2019: 'White or of European descent', 2020: 'White or of European descent'},
'Middle': {2019: 'Middle Eastern', 2020: 'Middle Eastern'},
'Hispanic': {2019: 'Hispanic or Latino/Latina', 2020: 'Hispanic or Latino/a/x'},
'Black': {2019: 'Black or of African descent', 2020: 'Black or of African descent'},
}
fp_by_ethny = findings_by_feature(df_list, feature, minority_ethny_list, 'Ethnicity', get_false_positive)
fp_by_ethny
# -
extract_to_csv(fp_by_ethny)
# Let's remove the false positives
minority_ethny19['Very satisfied']-fp_by_ethny[2019]
minority_ethny20['Very satisfied']-fp_by_ethny[2020]
# How much false positives has gender?
# +
feature = {2019: 'Gender', 2020: 'Gender'}
gender_list = {
'Man': {2019: 'Man', 2020: 'Man'},
'Woman': {2019: 'Woman', 2020: 'Woman'},
'Non-binary': {2019: 'Non-binary, genderqueer, or gender non-conforming', 2020: 'Non-binary, genderqueer, or gender non-conforming'},
}
fp_gender = findings_by_feature(df_list, feature, gender_list, 'Gender', get_false_positive)
fp_gender
# -
extract_to_csv(fp_gender)
# Let's remove the false positives
minority_gender19['Very satisfied']-fp_gender[2019]
minority_gender20['Very satisfied']-fp_gender[2020]
# ### Question 3: Do minorities have been compensated equally in the US tech industry?
#
# According to Forbes, salary is correlated to job satisfaction. So, it’s important to understand the difference in salaries across the last years. To evaluate these salary differences, we need to ensure we'll use just respondents who is currently working and answered about salary.
# +
employed_sample18 = sample18.query(employment_query)
salary_sample18 = employed_sample18.query('ConvertedSalary == ConvertedSalary')
salary_sample19 = employed_sample19.query('ConvertedComp == ConvertedComp')
salary_sample20 = employed_sample20.query('ConvertedComp == ConvertedComp')
print('Proportion repondents with missing data about compensation in 2018: %.4f'%(1-salary_sample18.shape[0]/sample19.shape[0]))
print('Proportion repondents with missing data about compensation in 2019: %.4f'%(1-salary_sample19.shape[0]/sample19.shape[0]))
print('Proportion repondents with missing data about compensation in 2020: %.4f'%(1-salary_sample20.shape[0]/sample20.shape[0]))
# -
# Let's create a callback function to extract the mean salary.
def get_salary_by_years(df, year, feature):
'''
Callback function to findings_by_minority
It's important stands out the high variability of annual salary.
So was used the median to better representation of annual salary of the samples.
INPUT
df - a dataframe of specific year, result of a previous query
year - year of the dataframe
minority - type of minority (Ethnicity | Gender)
OUTPUT
return - annual salary for a specific group (Ethnicity | Gender), in the US for a specific year
'''
salary_list = {2018: 'ConvertedSalary', 2019: 'ConvertedComp', 2020: 'ConvertedComp'}
salary_feature_name = salary_list[year]
return df.mean()[salary_feature_name]
# Let's extract the annual mean salary by ethnicity of all employed respondents.
# +
df_list = {2018: salary_sample18, 2019: salary_sample19, 2020: salary_sample20}
feature = {2018: 'RaceEthnicity', 2019: 'Ethnicity', 2020: 'Ethnicity'}
minority_ethny_list = {
'White': {2018: 'White or of European descent', 2019: 'White or of European descent', 2020: 'White or of European descent'},
'Middle': {2018: 'Middle Eastern', 2019: 'Middle Eastern', 2020: 'Middle Eastern'},
'Hispanic': {2018: 'Hispanic or Latino/Latina', 2019: 'Hispanic or Latino/Latina', 2020: 'Hispanic or Latino/a/x'},
'Black': {2018: 'Black or of African descent', 2019: 'Black or of African descent', 2020: 'Black or of African descent'},
}
salary_by_ethny = findings_by_feature(df_list, feature, minority_ethny_list, 'Ethnicity', get_salary_by_years)
salary_by_ethny
# -
# This annual mean salary are very high, let's see the standard deviation.
def get_std_salary_by_years(df, year, feature):
'''
Callback function to findings_by_minority
It's important stands out the high variability of annual salary.
So was used the median to better representation of annual salary of the samples.
INPUT
df - a dataframe of specific year, result of a previous query
year - year of the dataframe
minority - type of minority (Ethnicity | Gender)
OUTPUT
return - standard deviation by annual salary for a specific group (Ethnicity | Gender), in the US for a specific year
'''
salary_list = {2018: 'ConvertedSalary', 2019: 'ConvertedComp', 2020: 'ConvertedComp'}
salary_feature_name = salary_list[year]
return df.std()[salary_feature_name]
std_salary_by_ethny = findings_by_feature(df_list, feature, minority_ethny_list, 'Ethnicity', get_std_salary_by_years)
std_salary_by_ethny
# Annual salary has a huge variability. Probably this respondent's with the higher salaries are compensated by some own app. So, let's use the median.
def get_salary_by_years(df, year, feature):
'''
Callback function to findings_by_minority
It's important stands out the high variability of annual salary.
So was used the median to better representation of annual salary of the samples.
INPUT
df - a dataframe of specific year, result of a previous query
year - year of the dataframe
minority - type of minority (Ethnicity | Gender)
OUTPUT
return - annual salary for a specific group (Ethnicity | Gender), in the US for a specific year
'''
salary_list = {2018: 'ConvertedSalary', 2019: 'ConvertedComp', 2020: 'ConvertedComp'}
salary_feature_name = salary_list[year]
return df.median()[salary_feature_name]
salary_by_ethny = findings_by_feature(df_list, feature, minority_ethny_list, 'Ethnicity', get_salary_by_years)
salary_by_ethny
extract_to_csv(salary_by_ethny)
salary_by_ethny.plot(kind='bar', title='Mean annual salary by ethnicity')
years = salary_by_ethny.columns
for year in years:
white_salary = salary_by_ethny.loc['White or of European descent'][year]
for ethny in ethnicity_list20:
diff = white_salary - salary_by_ethny.loc[ethny][year]
print('Annual salary gap between White or of European descent and %s in %s: %s'%(ethny, year, diff))
print('##################')
# Here we are with more realistic salaries. Let's see the salaries by gender.
# +
feature = {2018: 'Gender', 2019: 'Gender', 2020: 'Gender'}
gender_list = {
'Man': {2018: 'Male', 2019: 'Man', 2020: 'Man'},
'Woman': {2018: 'Female', 2019: 'Woman', 2020: 'Woman'},
'Non-binary': {2018: 'Non-binary, genderqueer, or gender non-conforming', 2019: 'Non-binary, genderqueer, or gender non-conforming', 2020: 'Non-binary, genderqueer, or gender non-conforming'},
}
salary_by_gender = findings_by_feature(df_list, feature, gender_list, 'Gender', get_salary_by_years)
salary_by_gender
# -
salary_by_gender.plot(kind='bar', title='Mean annual salary by gender')
years = salary_by_gender.columns
for year in years:
men_salary = salary_by_gender.loc['Man'][year]
for gender in gender_list20:
diff = men_salary - salary_by_gender.loc[gender][year]
print('Annual salary gap between Man and %s in %s: %s'%(gender, year, diff))
print('##################')
# ### Question 4: The US tech industry has been absorbing the minorities's labor?
#
# One of the questions in Stackoverflow’s survey was related to the current employment status of the respondent.
#
# In order to understand better if the tech industry has been absorbing the minorities’s labor, we need to understand the unemployment rate, i.e, people that are not employed, but are looking for work. Let's create a callback function to extract unemployment rate by ethnicity and gender.
def get_umployment_rate(df, year, feature):
unemployment = df.query('Employment == "Not employed, but looking for work"')
return unemployment.shape[0]/df.shape[0]
# +
df_list = {2018: sample18, 2019: sample19, 2020: sample20}
feature = {2018: 'RaceEthnicity', 2019: 'Ethnicity', 2020: 'Ethnicity'}
unemployment_by_ethny = findings_by_feature(df_list, feature, minority_ethny_list, 'Ethnicity', get_umployment_rate)
unemployment_by_ethny
# -
extract_to_csv(unemployment_by_ethny)
feature = {2018: 'Gender', 2019: 'Gender', 2020: 'Gender'}
unemployment_by_gender = findings_by_feature(df_list, feature, gender_list, 'Gender', get_umployment_rate)
unemployment_by_gender
extract_to_csv(unemployment_by_gender)
# How much has the unemployment rate decreased in the last few years?
(unemployment_by_ethny[2018]-unemployment_by_ethny[2020])*100
(unemployment_by_gender[2018]-unemployment_by_gender[2020])*100
|
PrepareData.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
# # CS 458
# # Lecture 1: May 2, 2018
#
# ## What is security
#
# In the context of computers, usually means:
#
# 1. **C**confidentiality
# * Access to systems or data is limited to authorized parties
# 2. **I**ntegrity
# * Receive the "right" data
# 3. **A**vailability
# * System or data is there when needed
#
# A computing system is _usually_ said to be secure if it has all three properties.
#
# ### Security and reliability
#
# Security goes hand in hand with "reliability."
# A secure system can be relied on to:
#
# 1. Keep personal data confidential
# 2. Allow only authorized access or modifications to resources
# 3. Ensrue that any prouced results are correct
# 4. Give correct and meaningful results _on demand_
#
# ## What is privacy
#
# A useful definition of privacy: "informational self-determination"
#
# * _user_ gets to _control_ information _about ther user_
# * "Control" means:
# * Who gets to see it
# * Who gets to use it
# * What they can use it for
# * Who they can give it to
# * etc
#
# ### PIPEDA
#
# Personal Information Protection and Electronic Documents Act.
# Canada's private-sector privacy legislation.
#
# Ten Fair Information Principles companies need to abide by:
#
# 1. Identify the purpose of data collection
# 2. Obtain consent
# 3. Limit collection
# 4. Limit use, disclosure and retention
# 5. Use appropriate safeguards
# 6. Give individuals access
# 7. Be accurate
# 8. Be open
# 9. Be accountable
# 10. Provide recourse
#
# ## Security vs. privacy
#
# Sometimes people places security and privacy as if they're opposing forces.
# Are thye really? Do we have to give up one to get the other?
#
# ## Who are the adversaries
#
# Who's trying to mess with us?
# Various groups:
#
# * Murphy
# * Amateurs
# * "Script kiddies"
# * Crackers
# * Organised crime
# * Government "cyberwarriors"
# * Terrorists
#
# ## Terminologies
#
# ### Assets
#
# Things we might want to protect, such as:
#
# * Hardware
# * Software
# * Data
#
# ### Vulnerabilities
#
# Weaknesses in a system that may be able to be **exploited** in order to cause loss or harm
#
# ### Threats
#
# A loss or harm that might befall a system
#
# Four major categories of threats:
#
# 1. Interception
# 2. Interruption
# 3. Modification
# 4. Fabrication
#
# When designing a system, we need to state the **threat model**,
#
# * Set of threats we are undertaking to defend against
# * **Whom** do we want to prevent from doing **what**
#
# ### Attack
#
# An action which **exploits** a **vulnerability** to **execute** a **threat**
#
# ### Control/Defence
#
# Removing or reducing a vulnerability.
# You **control** a **vulnerability** to prevent an **attack** and defense against a **threat**.
#
# ## Methods of defence
#
# * **Prevent it**: prevent the attack*
# * **Deter it**: make the attack harder or more expensive
# * **Deflect it**: make yourself less attractive to attacker
# * **Detect it**: notice that attack is occurring (or has occurred)
# * **Recover from it**: mitigate the effects of the attack
#
# Often, we'll want to do many things to defend against the same threat: "**Defense in depth**"
#
# ## How secure should we make it?
#
# Principle of Easiest Penetration
#
# * "A system is only as strong as its weakest link"
# * The attacker will go after whatever part of the system is easiest for attacker, not most convenient for defender
# * In order to build secure systems, we need to **learn how to think like an attacker**
#
# Principle of Adequate Protection
#
# * "Security is economics"
# * Don't spent a lot of money to protect a system that can only cause a little damage
#
# ## Defence of computer systems
#
# We may want to protect any of our **assets**: hardware, software, data.
#
# Many ways to achieve this (not exhaustive).
#
# ### Cryptography
#
# Protecting data by making it unreadable to an attacker.
# Authenticatig users with digital signatures.
# Authenticating transactions with cryptographic protocols.
# Ensuring the integrity of stored data.
# Aid customers' privacy by having their personal information automatically become unreadable after a certain length of time.
#
# ### Software controls
#
# Passowrds and other forms of access control.
# Operating systems separate users' actions from each other.
# Virus scanners watch for some kinds of malware.
# Development controls enforce quality measures on the original source code.
# Personal firewalls that run on your desktop.
#
# ### Hardware controls
#
# Not usually protection of the hardware itself, but rather using separate hardware to protect the system as a whole.
# Fingerprint readers.
# Smart totkens.
# Firewalls.
# Intrusion detection systems.
#
# ### Physical controls
#
# Protection of the hardware itself, as well as physical access to the console, storage media, etc.
# Locks.
# Guards.
# Off-site backups.
# Don't put your data centre on a fault line in California.
# Don't put your nuclear power plant in a tsunami zone.
#
# ### Policies and procedures
#
# Non-technical means can be used to protect against some classes of attack.
#
# If an employee connects his own Wi-Fi access point to the internal company network, that can accidentally open the network to outside attack, so don't allow the employee to do that!
#
# Rules about choosing passwords.
# Training in best security practices.f
# # Lecture 2: May 7, 2018
#
# ## Secure programs
#
# Why is it so hard to write secure programs?
# A simple answer:
#
# * Axiom (Murphy): programs have bugs
# * Corollary: security-relevant programs have security bugs
#
# ## Flaw, faults, and failures
#
# A **flaw** is a problem in a program.
# A **security flaw** is a problem that affects security in some way; confidentiality, integrity, and availability.
#
# Flaws come in two types: **faults** and **failures**.
#
# A **fault** is a mistake, "behind the scenes"
#
# * An error in the code, data, specification, process, etc.
# * A fault is a **potential problem**
#
# A **failure** is when something _actually_ goes wrong
#
# * You log in to the library’s web site, and it shows you someone else’s account
# * "Goes wrong" means a deviation from the desired behaviour, not necessarily from the specified behaviour!
# * The specification itself may be wrong
#
# A fault is the programmer/specifier/inside view.
# A failure is the user/outside view.
#
# ### Finding and fixing faults
#
# How do you find a fault?
#
# * If a user experiences a failure, you can try to work backwards to uncover the underlying fault
# * What about faults that haven't (yet) led to failures?
# * Intentionally try to _cause_ failures, then proceed as above
# * Think like an attacker!
#
# Fixing faults:
#
# * Usually by making small edits (**patches**) to the program; this process is "penetrate and patch"
# * ex. Microsoft's "Patch Tuesday"
#
# ### Problems with patching
#
# Sometimes patching makes things _worse_!
#
# * Pressure to patch a fault is often high, causing a narrow focus on the observed failure, instead of a broad look at what may be a more serious underlying problem
# * The fault may have caused other, unnoticed failures, and a partial fix may cause inconsistencies or other problem
# * The patch for this fault may be introducing new faults
#
# Alternatives to patching?
#
# * Very difficult... How can programmers inform the users in the best way possible to avoid exposing the fault
#
# ### Unexpected behaviour
#
# When a behaviour is specified, the spec usually lists the things the program must do; e.g.
#
# * `ls` must list the names of the files in the directory whose name is given, if the user has permissions to read that directory
#
# Most implementors wouldn't care if it did additional things as well
#
# * Sorting the list alphabetically before outputting them is fine
#
# But from a security/privacy point of view, extra behaviour could be bad
#
# * After displaying the filenames, post the list to a public web site
# * After displaying the filenames, delete the files
#
# When implementing a security or privacy relevant program, you should consider "and nothing else" to be implicitly added to the spec
#
# * "should do" vs. "shouldn't do"
# * Testing for "shouldn't do"
#
# ### Types of security flaws
#
# A way to divide up security flaws is be genesis (where they came from).
#
# Some flaws are **intentional**/**inherent**
#
# * **Malicious** flaws are intentionally inserted to attack systems, either in general, or certain systems in particular
# * If it's meant to attack some particular system, we call it a targeted malicious flaw
# * Otherwise, it's a general flaw
# * **Nonmalicious** (but intentional or inherent) flaws are often features that are meant to the be in the system, and are correctly implemented, but nonetheless can cause a failure when used by an attacker
#
# Most security flaws are caused by **unintentional** program errors.
#
# ## Unintentional security flaw
#
# ### The Heartbleed bug in OpenSSL (April 2014)
#
# The **TLS Heartbeat mechanism** is designed to keep SSL/TLS connections alive even when no data is being transmitted.
# Heartbeat messages sent by one peer contain random data and a payload length.
# The other peer is suppose to respond with a mirror of exactly the same data.
#
# User $\rightarrow$ server:
# `Type | Length | Payload`
# e.g. `HB_RQST | 64KB | H`
#
# Relevant [xkcd](http://imgs.xkcd.com/comics/heartbleed_explanation.png).
#
# There was a **missing bounds check**!
# An attacker can request that a TLS server hand over a relatively large slice (up to 64KB) of its private memory space.
# This is the _same_ memory space where OpenSSL also stores the server's private key material as well as TLS session keys.
#
# ### Apple's SSL/TLS bug (February 2014)
#
# The bug occurs in the code used to check the validity of the server's signature on a key used in an SSL/TLS connection.
# Bug existed in certain versions of OSX 10.9 and iOS 6.1 and 7.0.
# An attacker ("man in the middle") could potentially exploit the flaw to get a user to accept a counterfeit key chosen by the attacker.
#
# #### Buggy Code
#
# ```swift
# static OSStatus
# SSLVerifySignedServerKeyExchange(SSLContext *ctx, bool isRsa, SSLBuffer signedParams,
# uint8_t *signature, UInt16 signatureLen)
# {
# OSStatus err;
# ...
#
# if ((err = SSLHashSHA1.update(&hashCtx, &serverRandom)) != 0)
# goto fail;
# if ((err = SSLHashSHA1.update(&hashCtx, &signedParams)) != 0)
# goto fail;
# goto fail;
# if ((err = SSLHashSHA1.final(&hashCtx, &hashOut)) != 0)
# goto fail;
# ...
#
# fail:
# SSLFreeBuffer(&signedHashes);
# SSLFreeBuffer(&hashCtx);
# return err;
# }
# ```
#
# #### Problem
#
# The are two consecutive `goto fail` statements!
# The second `goto fail` statement is always executed _if_ the first two checks succeeded.
# In this case, the third check is bypassed and $0$ is returned as the value of `err`.
#
# ### Types of unintentional flaws
#
# #### Buffer overflows
#
# Most commonly exploited type of security flaw.
#
# Upshot: if the attacker can write data past the end of an array on the stack, attacker can _usually_ overwrite things like the saved return address.
# When the function returns, it will jump to any address of the attacker's choosing.
#
# Targets: programs on a local machine that run with setuid (superuser) privileges, or a network daemons on a remote machine.
#
# ##### Kinds of buffer overflows
#
# In addition to the classic attack which overflows a buffer on the stack to jump to shellcode, there are many variants:
#
# * Attack which work when a **single byte** can be written past the end of the buffer (often caused by a common off-by-one error)
# * Overflows or buffers on the heap instead of the stack
# * Jump to other parts of the program, or parts of standard libraries, instead of shellcode
#
# ##### Defences against buffer overflows
#
# Programmer: use a language with bounds checking
#
# * Also catch those exceptions
#
# Compiler: place padding between data and return address ("canaries")
#
# * Detect if the stack has been overwritten before the return from each function
#
# Memory: non-executable stack
#
# * "W$\oplus$X" (memory page is either writable or executable, but never both)
#
# OS: stack (and sometimes code, heap, libraries) at random virtual addresses for each process
#
# * All mainstream OSes do this
#
# #### Integer overflows
#
# Machine integers represents a finite set of numbers.
# This may not correspond to a programmer's mental model.
#
# Suppose Program assumes that integer is always positive, overflow will make (signed) integer wrap and become negative, which will violate assumption!
#
# * Program casts large unsigned integers to signed integer
# * Result of a mathematical operation causes overflow
#
# Attack can pass values to program that will trigger overflow.
#
# #### Format string vulnerabilities
#
# Class of vulnerabilities discovered only in 2000.
#
# Unfiltered user input is used as format string in `printf()`, `fprintf()`, `sprintf()`, ...
# e.g. `printf(buffer)` instead of `printf("%s", buffer)`
#
# * `printf(buffer)` will parse buffer for %'s and use whatever is currently on the stack to process found format parameters
#
# `printf("%s%s%s%s")` likely crashes your program.
# `printf("%x%x%x%x")` dumps parts of the stack.
# `%n` will **write** to an address found on the stack.
#
# ##### Example expansion code
#
# ```c
# char output [44];
# char buffer [44];
#
# snprintf(buffer, sizeof(buffer), "Input %s", input);
# sprintf(output, buffer);
# ```
#
# What happens if input=%48d+(address of a libc routine)?
#
# #### Incomplete mediation
#
# Inputs to programs are often specified by untrusted users,
#
# * web-based applications are a common example
#
# Users sometimes mistype data in web forms, e.g.:
#
# * phone number: 51998884567
# * email: <EMAIL>
#
# The web application needs to ensure that what the user has entered constitutes a **meaningful** request.
# This is called **mediation**.
#
# Incomplete mediation occurs when the application accepts incorrect data from the user.
# Sometimes this is hard to avoid, e.g. `519-886-4567` as a phone number which is a reasonable entry.
#
# Focus on catching entries that are clearly wrong, e.g.:
#
# * not well formed; DOB: 1980-04-31
# * unreasonable values; DOB: 1876-10-12
# * inconsistent with other entries
#
# ##### Why do we care?
#
# Security concerns:
#
# * Buffer overflow
# * SQL injection; [relevant xkcd](https://xkcd.com/327/)
#
# Any user-supplied input falls within well-specified values, known to be safe.
#
# ##### Client-side mediation
#
# There are some web sites with form that do **client-side** mediation (via Javascript).
# If invalid data is entered, a popup will prevent the user from submitting it.
#
# Related issues: client-side state
#
# * Many web sites rely on the client to keep state for them
# * They will put hidden fields in the form which are passed back to the server when the user submits the form
#
# Problem: what if the user
#
# * turns off Javascript
# * edits the form before submitting it
# * writes a script that interacts with the web server instead of using a web browser at all
# * connects to the server "manually"
#
# Note that the user can send arbitrary (unmediated) values to the server this way.
# The user can also modify any client-side state.
#
# ##### Defences against incomplete mediation
#
# Client-side mediation is an okay method to use in order to have a friendlier user interface, but is useless for security purposes.
#
# **server-side mediation** is required regardless of whether client-side mediation is used.
#
# For values entered by the user:
#
# * Always check carefully on the values of all fields
# * These values can potentially contain completely arbitrary 8-bit data (including accented chars, control chars, etc.) and by of any length
#
# For state stored by the client:
#
# * Ensure client has not modified the data in any way
#
# #### TOCTTOU errors
#
# TOCTTOU ("TOCK-too") errors
#
# * Time-Of-Check To Time-Of-Use
# * Also known as "race condition" errors
#
# These errors may occur when the following happens:
#
# * User requests the system to perform an action
# * The system verifies the user is allowed to perform the action
# * The system performs the action
#
# What happens if the state of the system changes between steps 2 and 3?
#
# ##### Example problem
#
# A particular Unix terminal program is `setuid` (runs with superuser privileges) so that it can allocate terminals to users (a privileged operation).
#
# It supports a command to write the contents of the terminal to a log file.
# It first checks if the user has permissions to write to the requested file; if so, it opens the file for writing.
#
# The attacker makes a symbolic link:
# `logfile -> file_she_owns`
#
# Between the "check" and the "open", the attacker changes it
# `logfile -> /etc/passwd`
#
# The state of the system _changed_ between the check for permission and the execution of the operation.
# The file whose permissions were checked for writeability by the user (`file_she_owns`) wasn't (`/etc/passwd`).
#
# Can the attacker really "win this race"? **Yes**.
#
# ##### Defences against TOCTTOU errors
#
# When performing a privileged action on behalf of another party, make sure all information relevant to the access control decisions is **constant** between the time of the check and the time of the action ("the race")
#
# * Keep a private copy of the request itself so that the request can't be altered during the race
# * Where possible, act on the object itself, and not on some level of indirection
# * e.g. make access control decisions based on filehandles, not filenames
# * If that's not possible, use locks to ensure the object is not changed during the race
#
# ## Malicious code: Malware
#
# Various forms of software written with malicious content.
# A common characteristic is that it needs to be executed in order to cause harm.
#
# Ways a malware get executed:
#
# * User action
# * Downloading and running malicious software
# * Viewing a web page containing malicious code
# * Opening an executable email attachment
# * Inserting a CD/DVD or USB flash drive
# * Exploiting an existing flaw in a system
# * Buffer overflows in network daemons
# * Buffer overflows in email clients or web browser
#
# ### Types of malware
#
# * Virus
# * Malicious code that adds itself to benign programs/files
# * Code for spreading + code for actual attack
# * _Usually_ activated by users
# * Worms
# * Malicious code spreading with no or little user involvement
# * Trojans
# * Malicious code hidden in seemingly innocent program that you downloaded
# * Logic Bombs
# * Malicious code hidden in programs already on your machine
#
# ### Virus
#
# A **virus** is a particular kind of malware that infects other files
#
# * Traditionally, a virus could infect only executable programs
# * Nowadays, many data document formats can contain executable code (such as macros)
# * Many different types of files can be infected with viruses now
#
# Typically, when the file is executed (or sometimes just opened), the virus activates, and tries to infect other files with copies of itself.
#
# In this way, the virus can spread between files, or between computers.
#
# #### Infection
#
# The virus wants to modify an existing (non-malicious) program or document (the **host**) in such a way that executing or opening it will transfer control to the virus
#
# * The virus can do its "dirty work" and then transfer control back the host
#
# For executable programs:
# Typically, the virus will modify other programs and copy itself to the beginning of the targets' program code.
#
# For documents with macros:
# The virus will edit other documents to add itself as a macro which starts automatically when the file is opened.
#
# In addition to infecting other files, a virus will often try to infect the computer itself
#
# * This way, every time the computer is booted, the virus is automatically activated
#
# It might put itself in the boot sector of the hard disk.
#
# It might add itself to the list of programs the OS runs at boot time.
#
# It might infect one or more of the programs the OS runs at boot time.
#
# It might try many of these strategies, but it is still trying to _evade detection_.
#
# #### Spreading
#
# For a virus to spread between computers,
#
# * Usually, when the user sends infected files (hopefully not knowing they're infected!) to others
# * Or puts them on a p2p network
# * A virus usually requires some kind of user action in order to spread to another machine
# * If it can spread on its own (e.g. via email), it's more likely to be a worm than a virus
#
# #### Payload
#
# In addition to trying to spread, what else might a virus try to do?
#
# Some viruses try to evade detection by disabling any active virus scanning software.
#
# Most viruses have some sort of **payload**.
# At some point, the payload of an infected machine will activate, and something (usually bad) will happen
#
# * Erase hard drive
# * Subtly corrupt some of your spreadsheets
# * Install a keystroke logger to capture online banking password
# * Start attacking a particular target website
#
# #### Spotting viruses
#
# When to look for viruses:
#
# * As files are added to the computer
# * via portable media
# * via a network
# * From time to time, scan the entire state of the computer
# * to catch anything missed when on its way in
# * however, any damage the virus have done may not be reversible
#
# How to look for viruses:
#
# * Signature-based protection
# * Behaviour-based protection
#
# #### Signature-based protection
#
# Keep a list of all known viruses.
# Each virus has some characteristic feature (the **signature**),
#
# * Most signature-based systems use features of the virus code itself
# * the infection code
# * the payload code
# * Can also try to identify other patterns characteristics of a particular virus
# * where on the system it tries to hide itself
# * how it propagates from one place to another
#
# ##### Polymorphism
#
# To try to evade signature-based virus scanners, some viruses are **polymorphic**,
#
# * The virus makes a _modified_ copy instead of a _perfect_ copy every time it infects a new file
# * Often done by having most of the virus code encrypted
# * The virus starts with a decryption routine which decrypts the rest of the virus, which is then executed
# * When the virus spreads, it encrypts the new copy with a newly chosen random key
#
# #### Behaviour-based protection
#
# Signature-based protection systems have a major limitation
#
# * Can only scan for viruses that are in the list
# * Several new viruses identified _every day_
# * one anti-virus recognizes over _36 million_ signatures
#
# Behaviour-based systems look for suspicious patterns of behaviour, rather than for specific code fragments; some systems run suspicious code in a sandbox first.
#
# ##### False negatives and positives
#
# Any kind of test or scanner can have two types of errors:
#
# 1. False negatives: fail to identify a threat that is present
# 2. False positives: claim a thread is present when it is not
#
# Which is worse? How do you think signature-based and behaviour-based systems compare?
#
# ##### Base rate fallacy
#
# Suppose a breathalyzer reports false drunkness in 5% of cases, but never fails to detect true drunkness.
# Suppose that 1 in every 1000 drivers is drunk (the **base rate**).
# If a breathalyzer test of a random driver indicates that he or she is drunk, what is the probability that he or she really is drunk?
#
# Applied to a virus scanner, these numbers imply that there will be many more false positives than true positives, potentially causing the true positives to be overlooked or the scanner disabled.
#
# ### Worms
#
# A **worm** is a self-contained piece of code that can replicate with little or no user involvement.
# Worms often exploit security flaws in widely deployed software as a path to infection.
#
# Typically:
#
# * A worm exploits a security flaw in some software on the computer, infecting it
# * The worm immediately starts searching for other computers to infect
# * There may or may not be a payload that activates at a certain time, or by another trigger
#
# #### The Morris worm
#
# First Internet worm, launched by a graduate student at Cornell in 1988.
# Once infected, a machine would try to infect other machines in three ways:
#
# 1. Exploit a buffer overflow in the "finger" daemon
# 2. Use a back door left in the "sendmail" mail daemon
# 3. Try a "dictionary attack" against local users' passwords.
# If successful, log in as them, and spread to other machines they can access without requiring a password
#
# All three methods were well known!
# First example of buffer overflow exploit in the wild.
# Thousands of systems were offline for several days.
#
# #### The Code Red worm
#
# Launched in 2001, exploited a buffer overflow in Microsft's IIS web server (for which a patch had been available for a month).
# An infected machine would
#
# * Deface its home page
# * Launch attacks on other web servers (IIS or not)
# * Launch a DoS attack on a handful of websites, including www.whitehouse.gov
# * Installed a back door to deter disinfection
#
# Infected 250k systems in 9 hours.
#
# #### The Slammer worm
#
# Launched in 2003, performed DoS attacks.
# First example of a "Warhol worm"; a worm which can infect nearly all vulnerable machines in just 15 minutes.
# Exploited a buffer overflow in Microsoft's SQL Server (also having a patch available).
# A vulnerable machine could be infected with a single UDP packet!
#
# * This enabled the worm to spread extremely quickly
# * Exponential growth, double every _8.5 seconds_
# * 90% of vulnerable hosts infected in 10 minutes
# Dropped due to midterm conflicts. :(
|
CS 458.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Explicit Feedback Neural Recommender Systems
#
# Goals:
# - Understand recommender data
# - Build different models architectures using Keras
# - Retrieve Embeddings and visualize them
# - Add metadata information as input to the model
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os.path as op
from zipfile import ZipFile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2 compat
from urllib import urlretrieve
ML_100K_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
ML_100K_FILENAME = ML_100K_URL.rsplit('/', 1)[1]
ML_100K_FOLDER = 'ml-100k'
if not op.exists(ML_100K_FILENAME):
print('Downloading %s to %s...' % (ML_100K_URL, ML_100K_FILENAME))
urlretrieve(ML_100K_URL, ML_100K_FILENAME)
if not op.exists(ML_100K_FOLDER):
print('Extracting %s to %s...' % (ML_100K_FILENAME, ML_100K_FOLDER))
ZipFile(ML_100K_FILENAME).extractall('.')
# -
# ### Ratings file
#
# Each line contains a rated movie:
# - a user
# - an item
# - a rating from 1 to 5 stars
# +
import pandas as pd
raw_ratings = pd.read_csv(op.join(ML_100K_FOLDER, 'u.data'), sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
raw_ratings.head()
# -
# ### Item metadata file
#
# The item metadata file contains metadata like the name of the movie or the date it was released. The movies file contains columns indicating the movie's genres. Let's only load the first five columns of the file with `usecols`.
m_cols = ['item_id', 'title', 'release_date', 'video_release_date', 'imdb_url']
items = pd.read_csv(op.join(ML_100K_FOLDER, 'u.item'), sep='|',
names=m_cols, usecols=range(5), encoding='latin-1')
items.head()
# Let's write a bit of Python preprocessing code to extract the release year as an integer value:
# +
def extract_year(release_date):
if hasattr(release_date, 'split'):
components = release_date.split('-')
if len(components) == 3:
return int(components[2])
# Missing value marker
return 1920
items['release_year'] = items['release_date'].map(extract_year)
items.hist('release_year', bins=50);
# -
# Enrich the raw ratings data with the collected items metadata:
all_ratings = pd.merge(items, raw_ratings)
all_ratings.head()
# ### Data preprocessing
#
# To understand well the distribution of the data, the following statistics are computed:
# - the number of users
# - the number of items
# - the rating distribution
# - the popularity of each movie
min_user_id = all_ratings['user_id'].min()
min_user_id
max_user_id = all_ratings['user_id'].max()
max_user_id
min_item_id = all_ratings['item_id'].min()
min_item_id
max_item_id = all_ratings['item_id'].max()
max_item_id
all_ratings['rating'].describe()
# Let's do a bit more pandas magic compute the popularity of each movie (number of ratings):
popularity = all_ratings.groupby('item_id').size().reset_index(name='popularity')
items = pd.merge(popularity, items)
items.nlargest(10, 'popularity')
items["title"][181] # which is wrong because the movie ID is not yet the index
indexed_items = items.set_index('item_id')
indexed_items["title"][181]
all_ratings = pd.merge(popularity, all_ratings)
all_ratings.describe()
all_ratings.head()
# Later in the analysis we will assume that this popularity does not come from the ratings themselves but from an external metadata, e.g. box office numbers in the month after the release in movie theaters.
#
# Let's split the enriched data in a train / test split to make it possible to do predictive modeling:
# +
from sklearn.model_selection import train_test_split
ratings_train, ratings_test = train_test_split(
all_ratings, test_size=0.2, random_state=0)
user_id_train = np.array(ratings_train['user_id'])
item_id_train = np.array(ratings_train['item_id'])
rating_train = np.array(ratings_train['rating'])
user_id_test = np.array(ratings_test['user_id'])
item_id_test = np.array(ratings_test['item_id'])
rating_test = np.array(ratings_test['rating'])
# -
# # Explicit feedback: supervised ratings prediction
#
# For each pair of (user, item) try to predict the rating the user would give to the item.
#
# This is the classical setup for building recommender systems from offline data with explicit supervision signal.
# ## Predictive ratings as a regression problem
#
# The following code implements the following architecture:
#
# <img src="images/rec_archi_1.svg" style="width: 600px;" />
from tensorflow.keras.layers import Embedding, Flatten, Dense, Dropout
from tensorflow.keras.layers import Dot
from tensorflow.keras.models import Model
# +
# For each sample we input the integer identifiers
# of a single user and a single item
class RegressionModel(Model):
def __init__(self, embedding_size, max_user_id, max_item_id):
super().__init__()
self.user_embedding = Embedding(output_dim=embedding_size,
input_dim=max_user_id + 1,
input_length=1,
name='user_embedding')
self.item_embedding = Embedding(output_dim=embedding_size,
input_dim=max_item_id + 1,
input_length=1,
name='item_embedding')
# The following two layers don't have parameters.
self.flatten = Flatten()
self.dot = Dot(axes=1)
def call(self, inputs):
user_inputs = inputs[0]
item_inputs = inputs[1]
user_vecs = self.flatten(self.user_embedding(user_inputs))
item_vecs = self.flatten(self.item_embedding(item_inputs))
y = self.dot([user_vecs, item_vecs])
return y
model = RegressionModel(64, max_user_id, max_item_id)
model.compile(optimizer="adam", loss='mae')
# -
max_item_id, max_user_id
user_id_train.shape, item_id_train.shape
# Useful for debugging the output shape of model
initial_train_preds = model.predict([user_id_train, item_id_train])
initial_train_preds.shape
# ### Model error
#
# Using `initial_train_preds`, compute the model errors:
# - mean absolute error
# - mean squared error
#
# Converting a pandas Series to numpy array is usually implicit, but you may use `rating_train.values` to do so explicitly. Be sure to monitor the shapes of each object you deal with by using `object.shape`.
rating_train.reshape(-1,1).shape, initial_train_preds.shape
MSE(np.ones(5), np.zeros(5))
# +
def MAE(y_true, y_pred):
return np.mean(np.abs(y_true - y_pred))
def MSE(y_true, y_pred):
return np.mean(np.square(y_true - y_pred))
print("MSE:", MSE(initial_train_preds, rating_train.reshape(-1,1)),
"\nMAE:", MAE(initial_train_preds, rating_train.reshape(-1,1)),)
# +
# # %load solutions/compute_errors.py
# -
# ### Monitoring runs
#
# Keras enables to monitor various variables during training.
#
# `history.history` returned by the `model.fit` function is a dictionary
# containing the `'loss'` and validation loss `'val_loss'` after each epoch
# +
# %%time
# Training the model
history = model.fit([user_id_train, item_id_train], rating_train,
batch_size=64, epochs=10, validation_split=0.1,
shuffle=True)
# -
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.ylim(0, 2)
plt.legend(loc='best')
plt.title('Loss');
# **Questions**:
#
# - Why is the train loss higher than the first loss in the first few epochs?
# - Why is Keras not computing the train loss on the full training set at the end of each epoch as it does on the validation set?
#
#
# Now that the model is trained, the model MSE and MAE look nicer:
def plot_predictions(y_true, y_pred):
plt.figure(figsize=(4, 4))
plt.xlim(-1, 6)
plt.xlabel("True rating")
plt.ylim(-1, 6)
plt.ylabel("Predicted rating")
plt.scatter(y_true, y_pred, s=60, alpha=0.01)
# +
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
test_preds = model.predict([user_id_test, item_id_test])
print("Final test MSE: %0.3f" % mean_squared_error(test_preds, rating_test))
print("Final test MAE: %0.3f" % mean_absolute_error(test_preds, rating_test))
plot_predictions(rating_test, test_preds)
# -
train_preds = model.predict([user_id_train, item_id_train])
print("Final train MSE: %0.3f" % mean_squared_error(train_preds, rating_train))
print("Final train MAE: %0.3f" % mean_absolute_error(train_preds, rating_train))
plot_predictions(rating_train, train_preds)
# ### Model Embeddings
#
# - It is possible to retrieve the embeddings by simply using the Keras function `model.get_weights` which returns all the model learnable parameters.
# - The weights are returned the same order as they were build in the model
# - What is the total number of parameters?
# weights and shape
weights = model.get_weights()
[w.shape for w in weights]
# +
# Solution:
# model.summary()
# -
user_embeddings = weights[0]
item_embeddings = weights[1]
item_id = 181
print(f"Title for item_id={item_id}: {indexed_items['title'][item_id]}")
print(f"Embedding vector for item_id={item_id}")
print(item_embeddings[item_id])
print("shape:", item_embeddings[item_id].shape)
# ### Finding most similar items
#
# Finding k most similar items to a point in embedding space
#
# - Write in numpy a function to compute the cosine similarity between two points in embedding space.
# - Test it on the following cells to check the similarities between popular movies.
# - Bonus: try to generalize the function to compute the similarities between one movie and all the others and return the most related movies.
#
# Notes:
# - you may use `np.linalg.norm` to compute the norm of vector, and you may specify the `axis=`
# - the numpy function `np.argsort(...)` enables to compute the sorted indices of a vector
# - `items["name"][idxs]` returns the names of the items indexed by array idxs
# +
EPSILON = 1e-07 # to avoid division by 0.
def cosine(x, y):
# TODO: implement me!
x /= np.sqrt(np.sum(np.square(x+EPSILON)))
y /= np.sqrt(np.sum(np.square(y+EPSILON)))
return np.dot(x, y)
# +
# # %load solutions/similarity.py
# +
def print_similarity(item_a, item_b, item_embeddings, titles):
print(titles[item_a])
print(titles[item_b])
similarity = cosine(item_embeddings[item_a],
item_embeddings[item_b])
print(f"Cosine similarity: {similarity:.3}")
print_similarity(50, 181, item_embeddings, indexed_items["title"])
# -
print_similarity(181, 288, item_embeddings, indexed_items["title"])
print_similarity(181, 1, item_embeddings, indexed_items["title"])
print_similarity(288, 1, item_embeddings, indexed_items["title"])
print_similarity(181, 181, item_embeddings, indexed_items["title"])
# +
def cosine_similarities(item_id, item_embeddings):
"""Compute similarities between item_id and all items embeddings"""
query_vector = item_embeddings[item_id]
dot_products = item_embeddings @ query_vector
query_vector_norm = np.linalg.norm(query_vector)
all_item_norms = np.linalg.norm(item_embeddings, axis=1)
norm_products = query_vector_norm * all_item_norms
return dot_products / (norm_products + EPSILON)
similarities = cosine_similarities(181, item_embeddings)
similarities
# -
plt.hist(similarities, bins=30);
# +
def most_similar(item_id, item_embeddings, titles,
top_n=30):
sims = cosine_similarities(item_id, item_embeddings)
# [::-1] makes it possible to reverse the order of a numpy
# array, this is required because most similar items have
# a larger cosine similarity value
sorted_indexes = np.argsort(sims)[::-1]
idxs = sorted_indexes[0:top_n]
return list(zip(idxs, titles[idxs], sims[idxs]))
most_similar(50, item_embeddings, indexed_items["title"], top_n=10)
# +
# items[items['title'].str.contains("Star Trek")]
# -
most_similar(227, item_embeddings, indexed_items["title"], top_n=10)
# The similarities do not always make sense: the number of ratings is low and the embedding does not automatically capture semantic relationships in that context. Better representations arise with higher number of ratings, and less overfitting in models or maybe better loss function, such as those based on implicit feedback.
# ### Visualizing embeddings using TSNE
#
# - we use scikit learn to visualize items embeddings
# - Try different perplexities, and visualize user embeddings as well
# - What can you conclude ?
# +
from sklearn.manifold import TSNE
item_tsne = TSNE(perplexity=30, ).fit_transform(item_embeddings)
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
plt.scatter(item_tsne[:, 0], item_tsne[:, 1]);
plt.xticks(()); plt.yticks(());
plt.show()
# -
# %pip install -q plotly
# +
import plotly.express as px
tsne_df = pd.DataFrame(item_tsne, columns=["tsne_1", "tsne_2"])
tsne_df["item_id"] = np.arange(item_tsne.shape[0])
tsne_df = tsne_df.merge(items.reset_index())
px.scatter(tsne_df, x="tsne_1", y="tsne_2",
color="popularity",
hover_data=["item_id", "title",
"release_year", "popularity"])
# -
# Alternatively with [Uniform Manifold Approximation and Projection](https://github.com/lmcinnes/umap):
# +
# # %pip install umap-learn
# +
import umap
item_umap = umap.UMAP().fit_transform(item_embeddings)
plt.figure(figsize=(10, 10))
plt.scatter(item_umap[:, 0], item_umap[:, 1]);
plt.xticks(()); plt.yticks(());
plt.show()
# -
# ## A Deep recommender model
#
# Using a similar framework as previously, the following deep model described in the course was built (with only two fully connected)
#
# <img src="images/rec_archi_2.svg" style="width: 600px;" />
#
# To build this model we will need a new kind of layer:
from tensorflow.keras.layers import Concatenate
#
# ### Exercise
#
# - The following code has **4 errors** that prevent it from working correctly. **Correct them and explain** why they are critical.
# +
class DeepRegressionModel(Model):
def __init__(self, embedding_size, max_user_id, max_item_id):
super().__init__()
self.user_embedding = Embedding(output_dim=embedding_size,
input_dim=max_user_id + 1,
input_length=1,
name='user_embedding')
self.item_embedding = Embedding(output_dim=embedding_size,
input_dim=max_item_id + 1,
input_length=1,
name='item_embedding')
# The following two layers don't have parameters.
self.flatten = Flatten()
self.concat = Concatenate()
# self.dropout = Dropout(0.99) # One error! too much dropped
self.dropout = Dropout(0.3) # One error! too much dropped
self.dense1 = Dense(64, activation="relu")
self.dense2 = Dense(1)
# self.dense2 = Dense(2, activation="tanh") # Error three! the output layer should be of size 1
# Error four! regression--> no activation
def call(self, inputs, training=False):
user_inputs = inputs[0]
item_inputs = inputs[1]
user_vecs = self.flatten(self.user_embedding(user_inputs))
item_vecs = self.flatten(self.item_embedding(item_inputs))
input_vecs = self.concat([user_vecs, item_vecs])
y = self.dropout(input_vecs, training=training)
y = self.dense1(y)
y = self.dense2(y)
return y
model = DeepRegressionModel(64, max_user_id, max_item_id)
model.compile(optimizer='adam', loss='mse')
# model.compile(optimizer='adam', loss='binary_crossentropy') # Error two! we are doing a regression-->MSE or MAE as loss
initial_train_preds = model.predict([user_id_train, item_id_train])
# +
# # %load solutions/deep_explicit_feedback_recsys.py
# -
# %%time
history = model.fit([user_id_train, item_id_train], rating_train,
batch_size=64, epochs=10, validation_split=0.1,
shuffle=True)
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.ylim(0, 2)
plt.legend(loc='best')
plt.title('Loss');
train_preds = model.predict([user_id_train, item_id_train])
print("Final train MSE: %0.3f" % mean_squared_error(train_preds, rating_train))
print("Final train MAE: %0.3f" % mean_absolute_error(train_preds, rating_train))
test_preds = model.predict([user_id_test, item_id_test])
print("Final test MSE: %0.3f" % mean_squared_error(test_preds, rating_test))
print("Final test MAE: %0.3f" % mean_absolute_error(test_preds, rating_test))
# The performance of this model is not necessarily significantly better than the previous model but you can notice that the gap between train and test is lower, probably thanks to the use of dropout.
#
# Furthermore this model is more flexible in the sense that we can extend it to include metadata for hybrid recsys as we will see in the following.
# ### Home assignment:
# - Add another layer, compare train/test error.
# - Can you improve the test MAE?
# - Try adding more dropout and change layer sizes.
#
#
# Manual tuning of so many hyperparameters is tedious. In practice it's better to automate the design of the model using an hyperparameter search tool such as:
#
# - https://keras-team.github.io/keras-tuner/ (Keras specific)
# - https://optuna.org/ (any machine learning framework, Keras included)
from tensorflow.keras.optimizers import Adam
# +
class VeryDeepRegressionModel(Model):
def __init__(self, embedding_size_user, embedding_size_item, max_user_id, max_item_id):
super().__init__()
self.user_embedding = Embedding(output_dim=embedding_size_user,
input_dim=max_user_id + 1,
input_length=1,
name='user_embedding')
self.item_embedding = Embedding(output_dim=embedding_size_item,
input_dim=max_item_id + 1,
input_length=1,
name='item_embedding')
# The following two layers don't have parameters.
self.flatten = Flatten()
self.concat = Concatenate()
# self.dropout = Dropout(0.99) # One error! too much dropped
self.dropout = Dropout(0.5)
self.dense1 = Dense(64, activation="relu")
self.dense2 = Dense(16, activation="relu")
self.dense3 = Dense(1)
def call(self, inputs, training=False):
user_inputs = inputs[0]
item_inputs = inputs[1]
user_vecs = self.flatten(self.user_embedding(user_inputs))
item_vecs = self.flatten(self.item_embedding(item_inputs))
input_vecs = self.concat([user_vecs, item_vecs])
y = self.dropout(input_vecs, training=training)
y = self.dense1(y)
y = self.dense2(y)
y = self.dense3(y)
return y
model = VeryDeepRegressionModel(64, 64, max_user_id, max_item_id)
opt = Adam(learning_rate=3e-3)
model.compile(optimizer=opt, loss='mae', )
# model.compile(optimizer='adam', loss='binary_crossentropy') # Error two! we are doing a regression-->MSE or MAE as loss
initial_train_preds = model.predict([user_id_train, item_id_train])
# -
# %%time
history = model.fit([user_id_train, item_id_train], rating_train,
batch_size=64, epochs=30, validation_split=0.1,
shuffle=True, verbose=0,)
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.ylim(0, 2)
plt.legend(loc='best')
plt.title('Loss');
train_preds = model.predict([user_id_train, item_id_train])
print("Final train MSE: %0.3f" % mean_squared_error(train_preds, rating_train))
print("Final train MAE: %0.3f" % mean_absolute_error(train_preds, rating_train))
test_preds = model.predict([user_id_test, item_id_test])
print("Final test MSE: %0.3f" % mean_squared_error(test_preds, rating_test))
print("Final test MAE: %0.3f" % mean_absolute_error(test_preds, rating_test))
# ## Using item metadata in the model
#
# Using a similar framework as previously, we will build another deep model that can also leverage additional metadata. The resulting system is therefore an **Hybrid Recommender System** that does both **Collaborative Filtering** and **Content-based recommendations**.
#
# <img src="images/rec_archi_3.svg" style="width: 600px;" />
#
# +
from sklearn.preprocessing import QuantileTransformer
meta_columns = ['popularity', 'release_year']
scaler = QuantileTransformer()
item_meta_train = scaler.fit_transform(ratings_train[meta_columns])
item_meta_test = scaler.transform(ratings_test[meta_columns])
# +
class HybridModel(Model):
def __init__(self, embedding_size, max_user_id, max_item_id):
super().__init__()
self.user_embedding = Embedding(output_dim=embedding_size,
input_dim=max_user_id + 1,
input_length=1,
name='user_embedding')
self.item_embedding = Embedding(output_dim=embedding_size,
input_dim=max_item_id + 1,
input_length=1,
name='item_embedding')
# The following two layers don't have parameters.
self.flatten = Flatten()
self.concat = Concatenate()
self.dense1 = Dense(64, activation="relu")
self.dropout = Dropout(0.3)
self.dense2 = Dense(64, activation='relu')
self.dense3 = Dense(1)
def call(self, inputs, training=False):
user_inputs = inputs[0]
item_inputs = inputs[1]
meta_inputs = inputs[2]
user_vecs = self.flatten(self.user_embedding(user_inputs))
user_vecs = self.dropout(user_vecs, training=training)
item_vecs = self.flatten(self.item_embedding(item_inputs))
item_vecs = self.dropout(item_vecs, training=training)
input_vecs = self.concat([user_vecs, item_vecs, meta_inputs])
y = self.dense1(input_vecs)
y = self.dropout(y, training=training)
y = self.dense2(y)
y = self.dropout(y, training=training)
y = self.dense3(y)
return y
model = HybridModel(64, max_user_id, max_item_id)
model.compile(optimizer='adam', loss='mae')
initial_train_preds = model.predict([user_id_train,
item_id_train,
item_meta_train])
# -
# %%time
history = model.fit([user_id_train, item_id_train, item_meta_train],
rating_train,
batch_size=64, epochs=10, validation_split=0.1,
shuffle=True)
test_preds = model.predict([user_id_test, item_id_test, item_meta_test])
print("Final test MSE: %0.3f" % mean_squared_error(test_preds, rating_test))
print("Final test MAE: %0.3f" % mean_absolute_error(test_preds, rating_test))
# The additional metadata seems to improve the predictive power of the model a bit but this should be re-run several times to see the impact of the random initialization of the model.
#
#
# ### A recommendation function for a given user
#
# Once the model is trained, the system can be used to recommend a few items for a user, that he/she hasn't already seen:
# - we use the `model.predict` to compute the ratings a user would have given to all items
# - we build a reco function that sorts these items and exclude those the user has already seen
def recommend(user_id, top_n=10):
item_ids = range(1, max_item_id)
seen_mask = all_ratings["user_id"] == user_id
seen_movies = set(all_ratings[seen_mask]["item_id"])
item_ids = list(filter(lambda x: x not in seen_movies, item_ids))
print("User %d has seen %d movies, including:" % (user_id, len(seen_movies)))
for title in all_ratings[seen_mask].nlargest(20, 'popularity')['title']:
print(" ", title)
print("Computing ratings for %d other movies:" % len(item_ids))
item_ids = np.array(item_ids)
user_ids = user_id * np.ones_like(item_ids)
items_meta = scaler.transform(indexed_items[meta_columns].loc[item_ids])
rating_preds = model.predict([user_ids, item_ids, items_meta])
item_ids = np.argsort(rating_preds[:, 0])[::-1].tolist()
rec_items = item_ids[:top_n]
return [(items["title"][movie], rating_preds[movie][0])
for movie in rec_items]
for title, pred_rating in recommend(5):
print(" %0.1f: %s" % (pred_rating, title))
# ### Home assignment: Predicting ratings as a classification problem
#
# In this dataset, the ratings all belong to a finite set of possible values:
np.unique(rating_train)
# +
from tensorflow.keras.layers import Softmax
from tensorflow.keras.utils import to_categorical
class ClassificationModel(Model):
def __init__(self, embedding_size, max_user_id, max_item_id):
super().__init__()
self.user_embedding = Embedding(output_dim=embedding_size,
input_dim=max_user_id + 1,
input_length=1,
name='user_embedding')
self.item_embedding = Embedding(output_dim=embedding_size,
input_dim=max_item_id + 1,
input_length=1,
name='item_embedding')
# The following two layers don't have parameters.
self.flatten = Flatten()
self.concat = Concatenate()
self.dense1 = Dense(64, activation="relu")
self.dropout = Dropout(0.3)
self.dense2 = Dense(64, activation='relu')
self.dense3 = Dense(5)
self.softmax = Softmax()
def call(self, inputs, training=False):
user_inputs = inputs[0]
item_inputs = inputs[1]
meta_inputs = inputs[2]
user_vecs = self.flatten(self.user_embedding(user_inputs))
user_vecs = self.dropout(user_vecs, training=training)
item_vecs = self.flatten(self.item_embedding(item_inputs))
item_vecs = self.dropout(item_vecs, training=training)
input_vecs = self.concat([user_vecs, item_vecs, meta_inputs])
y = self.dense1(input_vecs)
y = self.dropout(y, training=training)
y = self.dense2(y)
y = self.dropout(y, training=training)
y = self.dense3(y)
y = self.softmax(y)
return y
model = ClassificationModel(64, max_user_id, max_item_id)
model.compile(optimizer='adam', loss='categorical_crossentropy')
categorical_rating_train = to_categorical(rating_train)[:, 1:]
categorical_rating_test = to_categorical(rating_test)[:, 1:]
initial_train_preds = model.predict([user_id_train,
item_id_train,
item_meta_train])
# -
# %%time
history = model.fit([user_id_train, item_id_train, item_meta_train],
categorical_rating_train,
batch_size=64, epochs=10, validation_split=0.1,
shuffle=True)
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.ylim(0, 2)
plt.legend(loc='best')
plt.title('Loss');
cat_train_preds = model.predict([user_id_train, item_id_train, item_meta_train])
train_preds = np.argmax(cat_train_preds, axis=1)+1
print("Final train MSE: %0.3f" % mean_squared_error(train_preds, rating_train))
print("Final train MAE: %0.3f" % mean_absolute_error(train_preds, rating_train))
cat_test_preds = model.predict([user_id_test, item_id_test, item_meta_test])
test_preds = np.argmax(cat_test_preds, axis=1)+1
print("Final test MSE: %0.3f" % mean_squared_error(test_preds, rating_test))
print("Final test MAE: %0.3f" % mean_absolute_error(test_preds, rating_test))
def recommend_class(user_id, top_n=10):
item_ids = range(1, max_item_id)
seen_mask = all_ratings["user_id"] == user_id
seen_movies = set(all_ratings[seen_mask]["item_id"])
item_ids = list(filter(lambda x: x not in seen_movies, item_ids))
print("User %d has seen %d movies, including:" % (user_id, len(seen_movies)))
for title in all_ratings[seen_mask].nlargest(20, 'popularity')['title']:
print(" ", title)
print("Computing ratings for %d other movies:" % len(item_ids))
item_ids = np.array(item_ids)
user_ids = user_id * np.ones_like(item_ids)
items_meta = scaler.transform(indexed_items[meta_columns].loc[item_ids])
rating_preds = np.argmax(model.predict([user_ids, item_ids, items_meta]), axis=1)+1
item_ids = np.argsort(rating_preds)[::-1].tolist()
rec_items = item_ids[:top_n]
return [(items["title"][movie], rating_preds[movie])
for movie in rec_items]
for title, pred_rating in recommend_class(5):
print(" %0.1f: %s" % (pred_rating, title))
# Maybe we can help the model by forcing it to predict those values by treating the problem as a multiclassification problem. The only required changes are:
#
# - setting the final layer to output class membership probabities using a softmax activation with 5 outputs;
# - optimize the categorical cross-entropy classification loss instead of a regression loss such as MSE or MAE.
# +
# # %load solutions/classification.py
# -
|
labs/03_neural_recsys/Explicit_Feedback_Neural_Recommender_System.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook 4 - Control Flow
#
# We now have a substantial toolbox of the built-in data structures for organizing, analyzing, and processing data. We now shift our focus to control flow: that is, writing if-else statements, for loops, and functions. These help us write efficient and clear code.
# ## If-else statements
# Just like in Julia, Python has standard `if-else` statements. Let's see an example.
x = 3
if 2*x == 6:
print('2*x equals 6')
print('Hooray!')
# Some important points to note.
# * The `if` statement ends with a `:`
# * Python uses indents to denote which lines are encompasulated in the `if` statement
#
# Let's see examples which won't work.
x = 3
if 2*x == 6:
print('2*x equals 6')
print('Hooray!')
# We can add an `else` statement to handle the case where the expression fails.
x = 4
if 2*x == 6:
print('2*x equals 6')
else:
print('2*x does not equal 6')
# Finally, we can add a `elif` if we want an else-if expression.
x = 4
if 2*x == 6:
print('2*x equals 5')
print('Hooray!')
elif not (2*x > 8):
print('2*x does not equal 6, but is not greater than 8')
# ## Loops
# We now discuss loops. We have already seen a type of loop, which is list and dictionary comprehensions! Essentially, a for-loop is just a list-comprehension, where the objective is to perform a general task at each iteration, as opposed to the creation of an item in a data structure.
for i in range(5):
print(i, i**2)
# We could use a for loop to perform a list comprehension.
my_list = []
for i in range(5):
my_list.append((i,i**2))
print(my_list)
# In order to monitor the progress for a loop and have an estimate of the remaining time, you can use the `tqdm` package
# #!pip install tqdm # run only if it is not installed yet
from tqdm import tqdm # imports the tqdm function from the tqdm package
a = 0
for i in tqdm(range(100000000)):
a += i
print(i)
# A similar loop is a `while` loop, which runs until the expression is false.
# Find the smallest value of n for which n^n >= 1000000
n = 1
while n**n < 1000000:
n += 1
print(n)
# We could have used a for loop as well, but used `break` to exit once we found the value.
# Find the smallest value of n for which n^n >= 1000000
for i in range(1,100):
if i**i >= 1000000:
break
print(i)
# ## Functions
# A function in Python takes in zero or more arguments, and outputs a single argument. Let's see some examples.
def addition(x,y):
return x + y
# return(x + y) # is also correct
# Notice that a function is defined with a `def`. This particular function takes two arguments, `x` and `y`, and returns their sum. Interestingly, the types of `x` and `y` do not need to be specified.
print(addition(2,3))
print(addition('Hello', ' world!'))
# A function can only return one variable. However, we can easily combine multiple variables into one via a tuple!
def pairwise_addition(x,y,z):
return x+y,x+z,y+z
x,y,z = pairwise_addition(1,2,3)
print(x, y, z)
# ## Exercise
#
# 1) The Fibonacci sequence is the set of numbers $a_0,a_1,\ldots$ that satisfy the following:
#
# $\begin{align*}
# a_i = \begin{cases}
# 1, & \text{if } i=0,1 \\
# a_{i-2} + a_{i-1}, &\text{otherwise}.
# \end{cases}
# \end{align*}$
#
# Write a function `Fibonacci(n)` that produces a list of all Fibonacci numbers that do not exceed $n$, where $n$ is the input.
#
# 2) Let's define the $k$-acci sequence to be the set of numbers $a_0,a_1,\ldots$ that satisfy the following:
#
# $\begin{align*}
# a_i = \begin{cases}
# 1, & \text{if } i=0,\ldots,k-1 \\
# \sum_{j=1}^k a_{i-j}, &\text{otherwise}.
# \end{cases}
# \end{align*}$
#
# Write a function `k_acci(n,k)` that produces a list of all k-acci numbers that do not exceed $n$.
#
#
def Fibonacci(n):
l = [1,1]
while True:
a_i = l[-1] + l[-2]
if a_i > n:
break
l.append(a_i)
return l
def k_acci(n,k):
l = [1 for i in range(k)]
while True:
a_i = sum([l[-1*i] for i in range(k)])
if a_i > n:
break
l.append(a_i)
return l
Fibonacci(10)
k_acci(10,3)
|
intro_to_python/4_Control_flow_Complete.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Putting the "Re" in Reformer: Ungraded Lab
# This ungraded lab will explore Reversible Residual Networks. You will use these networks in this week's assignment that utilizes the Reformer model. It is based on on the Transformer model you already know, but with two unique features.
# * Locality Sensitive Hashing (LSH) Attention to reduce the compute cost of the dot product attention and
# * Reversible Residual Networks (RevNets) organization to reduce the storage requirements when doing backpropagation in training.
#
# In this ungraded lab we'll start with a quick review of Residual Networks and their implementation in Trax. Then we will discuss the Revnet architecture and its use in Reformer.
# ## Outline
# - [Part 1: Residual Networks](#1)
# - [1.1 Branch](#1.1)
# - [1.2 Residual Model](#1.2)
# - [Part 2: Reversible Residual Networks](#2)
# - [2.1 Trax Reversible Layers](#2.1)
# - [2.2 Residual Model](#2.2)
#
#
#
# %%
import trax
from trax import layers as tl # core building block
import numpy as np # regular ol' numpy
from trax.models.reformer.reformer import (
ReversibleHalfResidualV2 as ReversibleHalfResidual,
) # unique spot
from trax import fastmath # uses jax, offers numpy on steroids
from trax import shapes # data signatures: dimensionality and type
from trax.fastmath import numpy as jnp # For use in defining new layer types.
from trax.shapes import ShapeDtype
from trax.shapes import signature
# %% [markdown]
# ## Part 1.0 Residual Networks
# [Deep Residual Networks ](https://arxiv.org/abs/1512.03385) (Resnets) were introduced to improve convergence in deep networks. Residual Networks introduce a shortcut connection around one or more layers in a deep network as shown in the diagram below from the original paper.
#
# <center><img src = "Revnet7.PNG" height="250" width="250"></center>
# <center><b>Figure 1: Residual Network diagram from original paper</b></center>
#
# The [Trax documentation](https://trax-ml.readthedocs.io/en/latest/notebooks/layers_intro.html#2.-Inputs-and-Outputs) describes an implementation of Resnets using `branch`. We'll explore that here by implementing a simple resnet built from simple function based layers. Specifically, we'll build a 4 layer network based on two functions, 'F' and 'G'.
#
# <img src = "Revnet8.PNG" height="200" width="1400">
# <center><b>Figure 2: 4 stage Residual network</b></center>
# Don't worry about the lengthy equations. Those are simply there to be referenced later in the notebook.
# %% [markdown]
# <a name="1.1"></a>
# ### Part 1.1 Branch
# Trax `branch` figures prominently in the residual network layer so we will first examine it. You can see from the figure above that we will need a function that will copy an input and send it down multiple paths. This is accomplished with a [branch layer](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#module-trax.layers.combinators), one of the Trax 'combinators'. Branch is a combinator that applies a list of layers in parallel to copies of inputs. Lets try it out! First we will need some layers to play with. Let's build some from functions.
# %% tags=[]
# simple function taking one input and one output
bl_add1 = tl.Fn("add1", lambda x0: (x0 + 1), n_out=1)
bl_add2 = tl.Fn("add2", lambda x0: (x0 + 2), n_out=1)
bl_add3 = tl.Fn("add3", lambda x0: (x0 + 3), n_out=1)
# try them out
x = np.array([1])
print(bl_add1(x), bl_add2(x), bl_add3(x))
# some information about our new layers
print(
"name:",
bl_add1.name,
"number of inputs:",
bl_add1.n_in,
"number of outputs:",
bl_add1.n_out,
)
# %%
bl_3add1s = tl.Branch(bl_add1, bl_add2, bl_add3)
bl_3add1s
# %% [markdown]
# Trax uses the concept of a 'stack' to transfer data between layers.
# For Branch, for each of its layer arguments, it copies the `n_in` inputs from the stack and provides them to the layer, tracking the max_n_in, or the largest n_in required. It then pops the max_n_in elements from the stack.
# <img src = "branch1.PNG" height="260" width="600">
# <center><b>Figure 3: One in, one out Branch</b></center>
# On output, each layer, in succession pushes its results onto the stack. Note that the push/pull operations impact the top of the stack. Elements that are not part of the operation (n, and m in the diagram) remain intact.
# %%
# n_in = 1, Each bl_addx pushes n_out = 1 elements onto the stack
bl_3add1s(x)
# %%
# n = np.array([10]); m = np.array([20]) # n, m will remain on the stack
n = "n"
m = "m" # n, m will remain on the stack
bl_3add1s([x, n, m])
# %% [markdown]
# Each layer in the input list copies as many inputs from the stack as it needs, and their outputs are successively combined on stack. Put another way, each element of the branch can have differing numbers of inputs and outputs. Let's try a more complex example.
# %%
bl_addab = tl.Fn(
"addab", lambda x0, x1: (x0 + x1), n_out=1
) # Trax figures out how many inputs there are
bl_rep3x = tl.Fn(
"add2x", lambda x0: (x0, x0, x0), n_out=3
) # but you have to tell it how many outputs there are
bl_3ops = tl.Branch(bl_add1, bl_addab, bl_rep3x)
# %% [markdown]
# In this case, the number if inputs being copied from the stack varies with the layer
# <img src = "branch2.PNG" height="260" width="600">
# <center><b>Figure 4: variable in, variable out Branch</b></center>
# The stack when the operation is finished is 5 entries reflecting the total from each layer.
# %%
# Before Running this cell, what is the output you are expecting?
y = np.array([3])
bl_3ops([x, y, n, m])
# %% [markdown]
# Branch has a special feature to support Residual Network. If an argument is 'None', it will pull the top of stack and push it (at its location in the sequence) onto the output stack
# <img src = "branch3.PNG" height="260" width="600">
# <center><b>Figure 5: Branch for Residual</b></center>
# %% tags=[]
bl_2ops = tl.Branch(bl_add1, None)
bl_2ops([x, n, m])
# %% [markdown]
# <a name="1.2"></a>
# ### Part 1.2 Residual Model
# OK, your turn. Write a function 'MyResidual', that uses `tl.Branch` and `tl.Add` to build a residual layer. If you are curious about the Trax implementation, you can see the code [here](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py).
# %%
def MyResidual(layer):
return tl.Serial(
### START CODE HERE ###
# tl.----,
# tl.----,
### END CODE HERE ###
)
# %%
# Lets Try it
mr = MyResidual(bl_add1)
x = np.array([1])
mr([x, n, m])
# %% [markdown]
# **Expected Result**
# (array([3]), 'n', 'm')
# %% [markdown]
# Great! Now, let's build the 4 layer residual Network in Figure 2. You can use `MyResidual`, or if you prefer, the tl.Residual in Trax, or a combination!
# %%
Fl = tl.Fn("F", lambda x0: (2 * x0), n_out=1)
Gl = tl.Fn("G", lambda x0: (10 * x0), n_out=1)
x1 = np.array([1])
# %%
resfg = tl.Serial(
### START CODE HERE ###
# None, #Fl # x + F(x)
# None, #Gl # x + F(x) + G(x + F(x)) etc
# None, #Fl
# None, #Gl
### END CODE HERE ###
)
# %%
# Lets try it
resfg([x1, n, m])
# %% [markdown]
# **Expected Results**
# (array([1089]), 'n', 'm')
# %% [markdown]
# <a name="2"></a>
# ## Part 2.0 Reversible Residual Networks
# The Reformer utilized RevNets to reduce the storage requirements for performing backpropagation.
# <img src = "Reversible2.PNG" height="260" width="600">
# <center><b>Figure 6: Reversible Residual Networks </b></center>
# The standard approach on the left above requires one to store the outputs of each stage for use during backprop. By using the organization to the right, one need only store the outputs of the last stage, y1, y2 in the diagram. Using those values and running the algorithm in reverse, one can reproduce the values required for backprop. This trades additional computation for memory space which is at a premium with the current generation of GPU's/TPU's.
#
# One thing to note is that the forward functions produced by two networks are similar, but they are not equivalent. Note for example the asymmetry in the output equations after two stages of operation.
# <img src = "Revnet1.PNG" height="340" width="1100">
# <center><b>Figure 7: 'Normal' Residual network (Top) vs REversible Residual Network </b></center>
#
# ### Part 2.1 Trax Reversible Layers
#
# Let's take a look at how this is used in the Reformer.
# %%
refm = trax.models.reformer.ReformerLM(
vocab_size=33000, n_layers=2, mode="train" # Add more options.
)
refm
# %% [markdown]
# Eliminating some of the detail, we can see the structure of the network.
# <img src = "Revnet2.PNG" height="300" width="350">
# <center><b>Figure 8: Key Structure of Reformer Reversible Network Layers in Trax </b></center>
#
# We'll review the Trax layers used to implement the Reversible section of the Reformer. First we can note that not all of the reformer is reversible. Only the section in the ReversibleSerial layer is reversible. In a large Reformer model, that section is repeated many times making up the majority of the model.
# <img src = "Revnet3.PNG" height="650" width="1600">
# <center><b>Figure 9: Functional Diagram of Trax elements in Reformer </b></center>
# %% [markdown]
# The implementation starts by duplicating the input to allow the two paths that are part of the reversible residual organization with [Dup](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py#L666). Note that this is accomplished by copying the top of stack and pushing two copies of it onto the stack. This then feeds into the ReversibleHalfResidual layer which we'll review in more detail below. This is followed by [ReversibleSwap](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.py#L83). As the name implies, this performs a swap, in this case, the two topmost entries in the stack. This pattern is repeated until we reach the end of the ReversibleSerial section. At that point, the topmost 2 entries of the stack represent the two paths through the network. These are concatenated and pushed onto the stack. The result is an entry that is twice the size of the non-reversible version.
#
# Let's look more closely at the [ReversibleHalfResidual](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.py#L154). This layer is responsible for executing the layer or layers provided as arguments and adding the output of those layers, the 'residual', to the top of the stack. Below is the 'forward' routine which implements this.
# <img src = "Revnet4.PNG" height="650" width="1600">
# <center><b>Figure 10: ReversibleHalfResidual code and diagram </b></center>
#
# Unlike the previous residual function, the value that is added is from the second path rather than the input to the set of sublayers in this layer. Note that the Layers called by the ReversibleHalfResidual forward function are not modified to support reverse functionality. This layer provides them a 'normal' view of the stack and takes care of reverse operation.
#
# Let's try out some of these layers! We'll start with the ones that just operate on the stack, Dup() and Swap().
# %%
x1 = np.array([1])
x2 = np.array([5])
# Dup() duplicates the Top of Stack and returns the stack
dl = tl.Dup()
dl(x1)
# %%
# ReversibleSwap() duplicates the Top of Stack and returns the stack
sl = tl.ReversibleSwap()
sl([x1, x2])
# %% [markdown]
# You are no doubt wondering "How is ReversibleSwap different from Swap?". Good question! Lets look:
# <img src = "Revnet5.PNG" height="389" width="1000">
# <center><b>Figure 11: Two versions of Swap() </b></center>
# The ReverseXYZ functions include a "reverse" compliment to their "forward" function that provides the functionality to run in reverse when doing backpropagation. It can also be run in reverse by simply calling 'reverse'.
# %%
# Demonstrate reverse swap
print(x1, x2, sl.reverse([x1, x2]))
# %% [markdown]
# Let's try ReversibleHalfResidual, First we'll need some layers..
# %%
Fl = tl.Fn("F", lambda x0: (2 * x0), n_out=1)
Gl = tl.Fn("G", lambda x0: (10 * x0), n_out=1)
# %% [markdown]
# Just a note about ReversibleHalfResidual. As this is written, it resides in the Reformer model and is a layer. It is invoked a bit differently that other layers. Rather than tl.XYZ, it is just ReversibleHalfResidual(layers..) as shown below. This may change in the future.
# %%
half_res_F = ReversibleHalfResidual(Fl)
print(type(half_res_F), "\n", half_res_F)
# %%
half_res_F([x1, x1]) # this is going to produce an error - why?
# %%
# we have to initialize the ReversibleHalfResidual layer to let it know what the input is going to look like
half_res_F.init(shapes.signature([x1, x1]))
half_res_F([x1, x1])
# %% [markdown]
# Notice the output: (DeviceArray([3], dtype=int32), array([1])). The first value, (DeviceArray([3], dtype=int32) is the output of the "Fl" layer and has been converted to a 'Jax' DeviceArray. The second array([1]) is just passed through (recall the diagram of ReversibleHalfResidual above).
# %% [markdown]
# The final layer we need is the ReversibleSerial Layer. This is the reversible equivalent of the Serial layer and is used in the same manner to build a sequence of layers.
# %% [markdown]
# <a name="2.2"></a>
# ### Part 2.2 Build a reversible model
# We now have all the layers we need to build the model shown below. Let's build it in two parts. First we'll build 'blk' and then a list of blk's. And then 'mod'.
# <center><img src = "Revnet6.PNG" height="800" width="1600"> </center>
# <center><b>Figure 12: Reversible Model we will build using Trax components </b></center>
# %%
blk = [ # a list of the 4 layers shown above
### START CODE HERE ###
None,
None,
None,
None,
]
blks = [None, None]
### END CODE HERE ###
# %%
mod = tl.Serial(
### START CODE HERE ###
None,
None,
None,
### END CODE HERE ###
)
mod
# %% [markdown]
# **Expected Output**
# ```
# Serial[
# Dup_out2
# ReversibleSerial_in2_out2[
# ReversibleHalfResidualV2_in2_out2[
# Serial[
# F
# ]
# ]
# ReversibleSwap_in2_out2
# ReversibleHalfResidualV2_in2_out2[
# Serial[
# G
# ]
# ]
# ReversibleSwap_in2_out2
# ReversibleHalfResidualV2_in2_out2[
# Serial[
# F
# ]
# ]
# ReversibleSwap_in2_out2
# ReversibleHalfResidualV2_in2_out2[
# Serial[
# G
# ]
# ]
# ReversibleSwap_in2_out2
# ]
# Concatenate_in2
# ]
# ```
# %%
mod.init(shapes.signature(x1))
out = mod(x1)
out
# %% [markdown]
# **Expected Result**
# DeviceArray([ 65, 681], dtype=int32)
# %% [markdown]
# OK, now you have had a chance to try all the 'Reversible' functions in Trax. On to the Assignment!
|
utf-8''C4_W4_Ungraded_Lab_Revnet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''base'': conda)'
# name: python3
# ---
# +
import hana_ml
from hana_ml import dataframe
from hana_ml.algorithms.pal.utility import DataSets, Settings
url, port, user, pwd = Settings.load_config("../../config/e2edata.ini")
connection_context = dataframe.ConnectionContext(url, port, user, pwd)
full_set, train_set, test_set, _ = DataSets.load_diabetes_data(connection_context)
# -
data = full_set
data.head(3).collect()
from hana_ml.algorithms.pal.auto_ml import Preprocessing
result = Preprocessing(name="FeatureNormalizer").fit_transform(data=data, key="ID", features=["BMI"])
result.collect()
result = Preprocessing(name="KBinsDiscretizer").fit_transform(data=data, key="ID", features=["BMI"])
result.collect()
result = Preprocessing(name="Imputer").fit_transform(data=data, key="ID", features=["BMI"])
result.collect()
result = Preprocessing(name="Discretize").fit_transform(data=data, key="ID", features=["BMI"])
result.collect()
result = Preprocessing(name="MDS").fit_transform(data=data, key="ID", features=["PEDIGREE", "BMI", "PREGNANCIES"])
result.collect()
result = Preprocessing(name="PCA").fit_transform(data=data, key="ID", features=["PEDIGREE", "BMI", "PREGNANCIES"])
result.collect()
result = Preprocessing(name="FeatureSelection", fs_method="CSO").fit_transform(data=data, key="ID", label="BMI")
result.collect()
|
Python-API/pal/notebooks/Preprocessing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py38 mmc-sgp
# language: python
# name: mmc-sgp
# ---
# +
import pandas as pd
import glob
f_dir = '/Users/leec813/OneDrive - PNNL/Documents/mmc/sgp/data/0805_0901/ts/0805-edmf/'
run_hours = 12.5
time_step_sec = 9
ideal_file_num = run_hours*60*60/time_step_sec
# WORKS
#for file in glob.glob(f_dir+'*d02*')[:1]:
file = f_dir+'c1.d02.PH'
new_file = file+'_new'
print(file)
lines_seen = set() # holds lines already seen
outfile = open(new_file, 'w')
for line in open(file, 'r'):
if line not in lines_seen: # not a duplicate
outfile.write(line)
lines_seen.add(line)
outfile.close()
with open(new_file, 'r') as f:
lines = f.readlines()
if len(lines) != ideal_file_num:
print('--- WRONG line numbers ---')
print('file has', str(len(lines)))
print('should be', str(ideal_file_num))
# -
print(new_file)
df = pd.read_csv(new_file, delimiter=' ', skiprows=1, header=None)
df
lines_seen = set() # holds lines already seen
outfile = open(test_2, 'w')
for line in open(file, 'r'):
if line not in lines_seen: # not a duplicate
outfile.write(line)
lines_seen.add(line)
outfile.close()
df = pd.read_csv(test_2, delimiter=' ', skiprows=1, header=None)
df
# !echo "$file"
# !sed '253963d' < "$file" > "$test_f"
with open(file, "r") as f:
lines = f.readlines()
len(lines)
with open(test_f, "r") as f:
lines = f.readlines()
len(lines)
with open(file, "r") as f:
lines = f.readlines()
with open(file, "w") as f:
for line in lines:
if line.strip("\n") != "nickname_to_delete":
f.write(line)
df.columns
df.head()
# +
# Python program to
# demonstrate merging
# of two files
data = data2 = ""
# Reading data from file1
with open('file1.txt') as fp:
data = fp.read()
# Reading data from file2
with open('file2.txt') as fp:
data2 = fp.read()
# Merging 2 files
# To add the data of file2
# from next line
data += "\n"
data += data2
with open ('file3.txt', 'w') as fp:
fp.write(data)
|
sgp/we42120/e01_test-debug-tsfiles.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Factors influencing client choice
# ## Context
# This analysis was done for a company which provides online and mobile platform that matches freelance labor with local demand.
#
# ## The problem
# Determine the factors that impact a client choice to select a freelancer.
#
# ## Approach
# Predictive model using supervised machine learning algorithm (Random Forest).
#
# ### Data
# Sample data, simulating a real-world data set. The dataset contains information about "recommendations". A "recommendation" is the group of Taskers from which the Client can choose one to book.
#
# Column description:
# * recommendation_id: unique identifier for this recommendation, or set of freelancers shown
# * timestamp: when this recommendation was shown to the client
# * tasker_id: unique identifier for the freelancer
# * position: the position of the freelancer in the recommendation set, 1 - first, 2 - second, etc.
# * hourly_rate: the hourly rate for the freelancer when they were shown
# * num_completed_tasks: the number of tasks the freelancer had completed in that category, when they were shown
# * hired: was the freelancer hired or not? Only 1 tasker out of a set of recommendations can be hired
# * category: the category of work the client needs help with
# +
# Importing packages
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from IPython.display import Image
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
# Randomized search on hyper parameters
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
import sklearn.metrics as metrics
from sklearn.metrics import accuracy_score, roc_curve, classification_report, confusion_matrix
from sklearn.model_selection import cross_val_score, KFold
from sklearn.utils import resample
# -
# ### Importing the data
df0 = pd.read_csv('sample_data.csv',
parse_dates=['created_at'])
df0.head()
# # Exploratory Data Analysis
print(df0.info())
# The data ranges from 2017-09-01 to 2017-09-30
df0.created_at.apply(['min','max'])
# ### Number of recommendation sets in this data sample
df0.recommendation_id.nunique()
# * There are 2,100 recommendation sets in this dataset.
# ### Trends over time
# The chart below gives a an overview of the daily recommendation shown and bookings during the period analyzed.
# +
daily = df0.groupby(pd.Grouper(key='created_at',freq='D'))\
['recommendation_id','hired'].agg({'recommendation_id': pd.Series.nunique,
'hired': 'sum'})
#print(daily_recom.head())
daily.plot(figsize=(12,4))
plt.title('Daily recommendations and bookings \n Period: 2017-09-01 to 2017-09-30',
size=16)
plt.ylabel('Number');
# -
# The chart below gives shows the weekly count of recommendation and bookings during the period analyzed. It looks that after the first week, the number of recommendations and bookings stays constant.
# +
weekly = df0.groupby(pd.Grouper(key='created_at',freq='W'))\
['recommendation_id','hired'].agg({'recommendation_id': pd.Series.nunique,
'hired': 'sum'})
#print(daily_recom.head())
weekly.plot(figsize=(12,4))
plt.title('Weekly recommendations and bookings \n Period: 2017-09-01 to 2017-09-30',
size=16)
plt.ylabel('Number');
# -
# * Note that when aggregating over week, the time dependecy desapeared.
# ### Conversion rate per category
# Considering the formula:
# $$\text{Conversion_Rate}=\frac{\text{Freelancers_hired}}{\text{Total_recommendations_shown}}$$<br>
#
# The conversion rate per category is calculated for the period analyzed.
recom_by_cat = df0.groupby('category').recommendation_id.nunique()
hires_by_cat = df0.groupby('category').hired.sum()
conv_rate_by_cat = hires_by_cat*100/recom_by_cat
print("\n * Recommendations by category: "+str(recom_by_cat) +
"\n\n * Hires by category: " + str(hires_by_cat)+
"\n\n * Conversion rate by category: " + str(round(conv_rate_by_cat,1)))
# ### Each recommendation set shows from 1 to 15 freelancers. Below are calculated:
# - average number of freelancers shown
# - median number of freelancers shown
taskers_per_rec = df0.groupby(['recommendation_id'])['tasker_id'].count()
# Average and median of number of taskers shown per recommendation
taskers_per_rec.agg({'Mean': np.mean, 'Median': np.median}).round(1)
# * On average, each recommendation shows 14 freelancers.<br>
# * The median number of freelancers shown per recommendation is 15.<br>
taskers_per_rec.hist(figsize=(9,4), bins=15);
plt.title('Frequency of taskers shown per recommendation', size=16)
plt.ylabel('# of recommendations');
# * The chart below displays the distribution of number of freelancers shown. In this dataset, few recommendations showed less than 15 freelancers. Almost 2,000 users were shown 15 freelancers per recommendations during the analyzed period.
#
# ### Distribution of booking rate by position for each category
# Booking rate: number of hires divided by number of tasker displayed
booking_rate = df0.groupby( ['category', 'position'] )['hired']\
.mean().round(2)
booking_rate_df = pd.DataFrame({'booking_rate' : booking_rate}).reset_index()
booking_rate_df[['booking_rate','position','category']]
# The bar chart below shows the that for all three categories, taskers that are displayed in the first position have the highest booking rate.
plt.figure(figsize=(9, 14))
booking_rate.plot('barh', color='grey')
plt.title('Booking rate by position for each category', size=16)
plt.xlabel('rate');
# The below table presents the same information in a format that simplifies comarison across categories.
# Presenting the information in a format that allows to compare categories
booking_rate.unstack(level=1).T
# ### For each category, average position of the freelancer who is hired
df0[df0['hired'] == 1].groupby(['category'])['position'].mean().round(1)
# * On average, freelancers that are hired are displayed in the positions shown above.<br>
# * It is worth mentioning that the median position for hired freelancers is lower than the average position, which indicates that the data is right skewed. See results below.
df0[df0['hired'] == 1].groupby(['category'])['position'].agg({'mean','median'}).round(1)
# ### For each category, average hourly rate and average number of completed tasks for the freelancers who are hired
df0[df0['hired'] ==1].groupby(['category']).agg({
'hourly_rate':['mean', 'median'],
'num_completed_tasks':['mean','median']
}).round(2)
# On average, hired freelancers in the:
# * furniture assembly category charge 38.70 UDS per hour (the lowest rate of all three categories)
# * mounting category charge 50.15 USD per hour
# * moving help charge 63.01 USD per hour (the highest rate of all three categories). The median hourly rate is only 49 USD.
#
# On average, freelancers hired have completed more than 250 tasks. However, the median value is significantly lower.
# ### For each category, average hourly rate and average number of completed tasks for the freelancers who are not hired
df0[df0['hired'] ==0].groupby(['category']).agg({
'hourly_rate':['mean', 'median'],
'num_completed_tasks':['mean','median']
}).round(2)
# * On average, freelancers that are not hired have similar hourly rate than hired ones, with the exception of moving help (this freelancers charge 83.74 USD per hour => ~ 21 USD per hour more, on average, than hired freelancers).
#
# * Freelancers that are not higher have completed less tasks, on average, than the freelancers that are hired.
# # What factors do impact a client choice to select a freelancer?
#
# This is a binary classification problem, where 'hired' is the target variable (1 when the freelancer is hired and 0 when not).<br>
# The features used for the predictive model are position, hourly_rate, num_completed_tasks, and category.
#
# As seen at the beginning of this notebook, time dependency disappears when aggregating data over week, however, there will not be enough data points after averaging over week. Therefore, For simplicity, as a minimum viable product model, the timestamp is not considered as a feature in the machine learning modelling. <br>
#
# ### Building the new dataframe for the analysis
col = ['position', 'hourly_rate', 'num_completed_tasks', 'category', 'hired']
data = df0[col]
data.shape
# Checking how many observation are in each class of the target variable 'hired'.
# +
result_count = data.groupby('hired')['category'].count()
print(result_count)
plt.title('\n Distribution of the target variable "hired"', size=16)
result_count.plot(kind='bar')
plt.xlabel('# taskers');
# -
# * There are more than 28,000 observations of taskers that are not hired (0) and only 1,705 observations of hired taskers. Therefore the data are umbalance. This represents a problem for training the model, which will be addressed in more detail later.
# ### Distribution of variables
# The boxplots below shows the distribution of the explanatory variables.
# +
data.boxplot(column='position', by='hired')
plt.title("Boxplot of position - grouped by hired")
plt.suptitle("");
data.boxplot(column='hourly_rate', by='hired')
plt.title("Boxplot of hourly_rate - grouped by hired")
plt.suptitle("");
data.boxplot(column='num_completed_tasks', by='hired')
plt.title("Boxplot of num_completed_tasks - grouped by hired")
plt.suptitle("");
# -
# * For not hired taskers, the median position is 8, whereas the median position for hired taskers is 2. This feature is a potential important factor for the predictive model.
#
# * On the otehr hand, there is not a clear difference in the hourly rate charged by hired and not hired taskers (as it was pointed out in question 6).
sns.pairplot(data, hue="hired");
# * The plot above shows the relationship between variables (scatterplots) and the distribution of features. For both classes (hired and non hired) the distribution of _'hourly rate'_ and _'number of completed tasks'_ are right skewed. This is the case for _'position'_ of taskers that are hired but the distribution of taskers that are not hired is very close to be uniform.
#
# ## Correlation Matrix
corr = data.corr() # .corr is used to find corelation
f,ax = plt.subplots(figsize=(10, 4))
sns.heatmap(corr, cbar = True, square = True, annot = True, fmt= '.1f',
xticklabels= True, yticklabels= True
,cmap="coolwarm", linewidths=.5, ax=ax);
# * The heatmap shows that the correlations between explanatory variables is very low. That is, each of them has a different effect on the target variable. Therefore, there are not multicollinearity issues.
#
# ## Building a predictive model
#
# The dataset has a categorical feature, 'category', which needs to be codified in order to be understood by the algorithm.
# +
X = data.drop('hired', axis =1, inplace=False) # features
y = data.hired # target
X.head(2)
# -
# Each category was converted to a number using LabelEncoder:
# * 0 ==> Furniture Assembly
# * 1 ==> Moving Help
# * 2 ==> Mounting
# +
labelencoder = LabelEncoder()
X.category = labelencoder.fit_transform(X.category)
X.head()
# -
# But now the categories have an ordered relationship which is not true in reality. OnehotEncoder converts those features to vectors, as shown below.
# +
# Separating the labels
ohe = OneHotEncoder(categorical_features = [3])
X_onehot = ohe.fit_transform(X).toarray()
X_onehot.shape # array
# -
# The result is an array that was converted into a dataframe.
columns = ['category_1','category_2', 'category_3', 'position',
'hourly_rate', 'num_completed_tasks']
df_onehot=pd.DataFrame(X_onehot, columns=columns)
df_onehot.head()
df_onehot.shape
y.shape
# ### Splitting the data into train and test
#
# 75% of the data was used to train the model and the other 25% was used to test it.
X_train, X_test, y_train, y_test = train_test_split(df_onehot,y, test_size=0.25,
random_state=0)
X_train.shape
# ## Random Forest Classifier (umbalanced data)
#
# The best hyperparameters for the model were found using RandomizedSearchCV.
# +
# Create the random grid
param_dist = {'n_estimators': [50,100,150,200,250],
"max_features": [1,2,3,4,5,6],
'max_depth': [1,2,3,4,5,6,7,8],
"criterion": ["gini", "entropy"]}
rf = RandomForestClassifier()
rf_cv = RandomizedSearchCV(rf, param_distributions = param_dist,
cv = 5, random_state=0, n_jobs = -1)
rf_cv.fit(X_train, y_train)
print("Tuned Random Forest Parameters: %s" % (rf_cv.best_params_))
# -
# The algorithm was instanciated (using the above parameters), the model was trained (fit) using the training dataset, and finally, predictions were made using the test data.
Ran = RandomForestClassifier(criterion= 'gini', max_depth= 6,
max_features= 5, n_estimators= 50,
class_weight="balanced",
random_state=0)
Ran.fit(X_train, y_train)
y_pred = Ran.predict(X_test)
# ### Model Evaluation
#
# The metrics used to evaluate performance of the model are: accuracy, precision, recall, f1-score, and ROC (AUC). Confusion matrix was used to visualized predictions vs acual values.<br>
#
# ![][Confusion Matrix]
#
# [Confusion Matrix]:https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSDNliWqUcn9s7oL6L76dcJx3G0UWZFfVAryQ4zAlbs0y1bIobj "Confusion Matrix"
#
# $$\text{Accuracy}=\frac{\text{TP + TN}}{\text{TP + FP + TN + FN}}$$<br>
# $$\text{Precision}=\frac{\text{TP}}{\text{TP + FP}}$$<br>
# $$\text{Recall}=\frac{\text{TP}}{\text{TP + FN}}$$<br>
# $$\text{F1-score}=\frac{\text{2 TP}}{\text{2TP + FP + FN}}$$<br>
#
# It was also used 5-fold cross-validation to obtain a less baised model (avoid overfitting the data). The five values of accuracy were averaged and the errors are estimated by calculating the standard deviation, see below.
# +
print('Accuracy:', metrics.accuracy_score(y_pred,y_test))
## 5-fold cross-validation
cv_scores =cross_val_score(Ran, X_train, y_train, cv=5)
# Print the 5-fold cross-validation scores
print()
print(classification_report(y_test, y_pred))
print()
print("Average 5-Fold CV Score: {}".format(round(np.mean(cv_scores),4)),
", Standard deviation: {}".format(round(np.std(cv_scores),4)))
plt.figure(figsize=(4,3))
ConfMatrix = confusion_matrix(y_test,Ran.predict(X_test))
sns.heatmap(ConfMatrix,annot=True, cmap="Blues" ,fmt='g',
xticklabels = ['Not hired', 'Hired'],
yticklabels = ['Not hired', 'Hired'])
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title("Confusion Matrix - Random Forest");
# -
# * The average accuracy using 5-Fold cross-validation is 0.779 (78%), with standard deviation of 0.0085. This is very similar to the accuracy obtained by the test set (0.78).
#
# * Precision is very high (0.98) for the class were freelancers are not hired. However, for taskers that are hired, the precision is extremely low with a value of 0.15. This translates into a low f1 score of 0.24 for the hired class.
# ### Feature selection using umbalanced data
# +
# Feature selection using umbalanced data
#RandomForest
impor_Forest = Ran.feature_importances_
#indices = np.argsort(importances)[::-1]
indices_1 = np.argsort(impor_Forest)[::-1]
featimp_1 = pd.Series(impor_Forest, index=X_train.columns).sort_values(ascending=False)
Table_impor = pd.DataFrame({'Random-Forest': featimp_1}).sort_values('Random-Forest',
ascending=True)
print(Table_impor)
Table_impor.plot(kind='barh', figsize = (7, 4), legend=None)
plt.title('Feature importance', size=20);
# -
# ### Results using umbalanced dataset:
# * The most important factors that impact a client's choice to select a freelancer are: position, number of completed tasks, and hourly rate.
#
# * Due to the fact that the target variable is umbalanced, the algorithm does not have enough records to train the model for the case when a freelancer is hired (hired = 1), resulting in a low precision for these cases (0.15 - high number of false positives).
#
# * A low precision (positive class) means that the model will have a high number of false positives (FP), i.e., freelancers that are not booked but the model predicts as hired.
#
# * On the other hand, having a low recall means that the number of false negative (FN) would be high, i.e., freelancers that are booked are predicted as not hired.
#
# * Because both scenarios are not favorable for our use case, a good metric is the f1-score (harmonic mean of precision and recall).
# ## Balancing the data and Random Forest Classifier
#
# In order to increase the precision of this model, both target classes need to be balanced, so the algorithm has enough example from hired and not hired to train the model. In order to balance the dataset the minority class was up-sampled.
#
# The steps for up-sampling are:
#
# 1. Separate observations from each class into different DataFrames.
# 2. Resample the minority class with replacement, setting the number of samples to match the majority class.
# 3. Combine the up-sampled minority class DataFrame with the original majority class DataFrame.
# Creating a dataframe with features and target
df_enc = pd.concat([X_train, y_train], axis=1)
df_enc.shape
df_enc.head()
# checking the amount of hired and nom-hired cases
df_enc.hired.value_counts().plot(kind='bar');
# +
# separate minority and majority classes
not_hired = df_enc[df_enc.hired == 0]
hired = df_enc[df_enc.hired == 1]
# upsample minority
hired_upsampled = resample(hired,
replace=True, # sample with replacement
n_samples=len(not_hired), # match number in majority class
random_state=0) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([not_hired, hired_upsampled])
# -
upsampled.shape
upsampled.hired.value_counts().plot(kind='bar');
X_upsampled = upsampled[columns]
X_upsampled.shape
y_upsampled = upsampled.hired
y_upsampled.shape
X_train, X_test, y_train, y_test = train_test_split(X_upsampled, y_upsampled,
test_size=0.25,
random_state=0)
print(X_train.shape)
print(y_train.shape);
# +
# Create the random grid
param_dist = {'n_estimators': [50,100,150,200,250],
"max_features": [1,2,3,4,5,6],
'max_depth': [1,2,3,4,5,6,7,8],
"criterion": ["gini", "entropy"]}
rf = RandomForestClassifier()
rf_cv = RandomizedSearchCV(rf, param_distributions = param_dist,
cv = 5, random_state=0, n_jobs = -1)
rf_cv.fit(X_train, y_train)
print("Tuned Random Forest Parameters: %s" % (rf_cv.best_params_))
# +
Ran = RandomForestClassifier(criterion= 'entropy', max_depth= 7,
max_features= 2, n_estimators= 250,
random_state=0)
Ran.fit(X_train, y_train)
y_pred = Ran.predict(X_test)
print('Accuracy:', metrics.accuracy_score(y_pred,y_test))
## 5-fold cross-validation
cv_scores =cross_val_score(Ran, X_train, y_train, cv=5)
# Print the 5-fold cross-validation scores
print()
print(classification_report(y_test, y_pred))
print()
print("Average 5-Fold CV Score: {}".format(round(np.mean(cv_scores),4)),
", Standard deviation: {}".format(round(np.std(cv_scores),4)))
plt.figure(figsize=(4,3))
ConfMatrix = confusion_matrix(y_test,Ran.predict(X_test))
sns.heatmap(ConfMatrix,annot=True, cmap="Blues" ,fmt='g',
xticklabels = ['Not hired', 'Hired'],
yticklabels = ['Not hired', 'Hired'])
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title("Confusion Matrix - Random Forest (up-sampled data)");
# -
# * By upsampling the data, the f1-score of the 'hired' class increased from 0.24 to 0.75. This result was expected since the algorithm had more samples to be trained and thus, it is able to better generalize and make predictions using unseen data.
#
# ### Receiver Operating Characteristic (ROC)
# The performance of the binary classifier is visualized using the ROC graph. For all possible threshold of probabilities, the false positive and true positive rates are calculated.
#
# ### Area Under the Curve (AUC)
# It measures performance across all classification thresholds. An AUC of 1 means the model can perfectly separate classes. An AUC of 0.5 means the model has no separation power and it is equivalent to a random chance.
#
# Below, there is an schema showing the distributions of probabilities of a model and its corresponding ROC curve.
# ![][Distribution of Probabilities]
#
# [Distribution of Probabilities]:https://miro.medium.com/max/507/1*yF8hvKR9eNfqqej2JnVKzg.png "Distribution of Probabilities"
#
# ![][ROC]
#
# [ROC]:https://miro.medium.com/max/365/1*-tPXUvvNIZDbqXP0qqYNuQ.png "ROC"
# +
# ROC
y_pred_proba_RF = Ran.predict_proba(X_test)[::,1]
fpr1, tpr1, _ = metrics.roc_curve(y_test, y_pred_proba_RF)
auc1 = metrics.roc_auc_score(y_test, y_pred_proba_RF)
plt.figure(figsize=(7,5))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr1,tpr1,label="Random Forest, auc="+str(round(auc1,2)))
plt.legend(loc=4, title='Models', facecolor='white')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate (Recall, Sensititvity, hit rate)')
plt.title('ROC', size=15)
#plt.box(False)
plt.savefig('ImageName', format='png', dpi=200, transparent=True);
# -
# * The AUC obtained for this model is 0.85, which means that there is 85% chance that the model will be able to classify correctly hired from non hired freelancers.
#
# ### Feature importance using balanced dataset
# +
# Feature selection using umbalanced data
#RandomForest
impor_Forest = Ran.feature_importances_
#indices = np.argsort(importances)[::-1]
indices_1 = np.argsort(impor_Forest)[::-1]
featimp_1 = pd.Series(impor_Forest, index=X_train.columns).sort_values(ascending=False)
Table_impor = pd.DataFrame({'Random-Forest': featimp_1}).sort_values('Random-Forest',
ascending=True)
print(Table_impor)
Table_impor.plot(kind='barh', figsize = (7, 4), legend=None)
plt.title('Feature importance', size=20);
# -
# ## Conclusions
#
# The factors that impact the most a client choice to select a freelancer are:
# > POSITION (the most importat one) <br>
# > NUMBER COMPLETED TASKS <br>
# > HOURLY RATE <br>
#
# - Feature importance is the same for the models built with umbalanced and balanced data, which indicates that the feature importance obtained by this model is robust.<br>
# - By balancing the classes of the data set, the precision of the positive class was increased from 0.15 to 0.77, at the same time the model mantains a good recall of about 0.76.<br>
# - Accuracy obtained by the test set and using 5-foldcross-validation are around the same value, 0.76.
|
Factors_Influencing_Client_Choice.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# encoding: utf-8
# encoding: iso-8859-1
# encoding: win-1252
import json
import nltk
from nltk.tokenize import TweetTokenizer
from IPython.display import clear_output, display
from nltk.corpus import stopwords
import re
import math
import operator
tknzr = TweetTokenizer()
with open('../inverted_index/inverted_index.json') as f:
inverted_index = json.load(f)
stopwords_en = set(stopwords.words('english'))
stopwords_pt = nltk.corpus.stopwords.words('portuguese')
especial_c = ["-",",",";","'",")","(",":", ".","’","–","","...",
"eua","estados","unidos","filme","reino","brasil","franca",
"espanha","unido","japao","alemanha","italia","suecia","canada"]
# -
# ### Dicionario de titulo, autor e elenco
# +
titulo = {}
direcao = {}
elenco = {}
def converter_string(t):
t = t.lower()
t= t.replace("ã©","é")
t = t.replace("ã§","ç")
t= t.replace("ãº","ú")
t= t.replace("ã¡","á")
t=t.replace("ã£","ã")
t=t.replace("ãª","ê")
t= t.replace("ã³","ó")
t =t.replace("ãµ","õ")
t =t.replace("ã´","ô")
t = t.replace("ã\xad","í")
t = re.sub('\W+','', t )
t = ''.join([i for i in t if not i.isdigit()])
return t
nome = ""
inverted_index['titulo.pele']
for obj in inverted_index:
if("titulo." in obj):
t = obj
t = t.replace("titulo.","")
t = converter_string(t)
if (t not in stopwords_en) and (t not in stopwords_pt) and (t not in especial_c):
titulo[t] = inverted_index[obj][1]
if("direcao." in obj):
t = obj
t = t.replace("direcao.","")
t = converter_string(t)
if (t not in stopwords_en) and (t not in stopwords_pt) and (t not in especial_c):
direcao[t] = inverted_index[obj][1]
if("elenco." in obj):
t = obj
t = t.replace("elenco.","")
t = converter_string(t)
if (t not in stopwords_en) and (t not in stopwords_pt) and (t not in especial_c):
elenco[t] = inverted_index[obj][1]
print(len(elenco))
print(len(titulo))
print(len(direcao))
# -
# ### Dicionario com as probabilidades de cada palavra
# +
prob_titulo = {}
prob_direcao = {}
prob_elenco = {}
qtd_documents = 3892
for key, valor in titulo.items():
prob_titulo[key] = len(valor)/qtd_documents
qtd_direcao = len(direcao)
for key, valor in direcao.items():
prob_direcao[key] = len(valor)/qtd_documents
qtd_elenco = len(elenco)
for key, valor in elenco.items():
prob_elenco[key] = len(valor)/qtd_documents
#prob_direcao
# -
# #### Carega o cython
# %load_ext Cython
# + language="cython"
# from __main__ import *
#
#
# def mutual_info(dicionario, prob_list):
# retorno = {}
# independente = 1
# cont = 0
# keys = list(dicionario.keys())
# tamanho = len(keys)
# for i in range(tamanho):
# cont = cont+1
# clear_output(wait=True)
# print("porcentagem: " + str(((cont/tamanho)*100)))
# key_a = keys[i]
# val_a = dicionario[key_a]
# for j in range(i+1,tamanho):
# key_b = keys[j]
# val_b = dicionario[key_b]
# for k in range(j+1,tamanho):
# key_c = keys[k]
# val_c = dicionario[key_c]
# for a in val_a:
# for b in val_b:
# if b == a:
# for c in val_c:
# if c == a:
# t = (key_a, key_b, key_c)
# #print(t)
# if t in retorno:
# retorno[t] = retorno[t] +1
# else:
# retorno[t] = 1
# if c > a:
# break
# if b > a:
# break
# if k>1000:
# break
# for n in retorno:
# pn = retorno[n]/tamanho
# retorno[n] = math.log2(pn/(prob_list[n[0]]*prob_list[n[1]]*prob_list[n[2]]))
#
# return retorno
#
#
#
# -
# ### Mutual information direção
# +
#direcao_mi = mutual_info(direcao, prob_direcao)
# sorted_direcao = sorted(direcao_mi.items(), key=operator.itemgetter(1), reverse=True)
# sorted_direcao
# -
# ### Mutual information titulo
#
# +
#titulo_mi = mutual_info(titulo, prob_titulo)
# sorted_titulo= sorted(titulo_mi.items(), key=operator.itemgetter(1), reverse=True)
# sorted_titulo
# sorted_titulo
# -
# ### Mutual information elenco
#
elenco_mi = mutual_info(elenco, prob_elenco)
sorted_elenco= sorted(elenco_mi.items(), key=operator.itemgetter(1), reverse=True)
sorted_elenco
|
interface/.ipynb_checkpoints/mutual_information-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''ml'': conda)'
# language: python
# name: python37664bitmlconda6d43f5d01a134e84acc727f43d072b5b
# ---
# +
from tqdm.notebook import tqdm
import math
import gym
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from collections import deque
from active_rl.networks.dqn_atari import ENS_DQN_Large
from active_rl.utils.memory import ReplayMemory
from active_rl.utils.optimization import standard_optimization_ensemble
from active_rl.environments.atari_wrappers import make_atari, wrap_deepmind
from active_rl.utils.atari_utils import fp, evaluate
from active_rl.utils.action_selection import ActiveActionSelector
from active_rl.utils.acquisition_functions import ens_BALD
# -
env_name = 'Breakout'
env_raw = make_atari('{}NoFrameskip-v4'.format(env_name))
env = wrap_deepmind(env_raw, frame_stack=False, episode_life=True, clip_rewards=True)
c,h,w = c,h,w = fp(env.reset()).shape
n_actions = env.action_space.n
# +
BATCH_SIZE = 64
LR = 0.0000625
GAMMA = 0.99
EPS_START = 1.
EPS_END = 0.05
EPS_DECAY = 1000000
NUM_STEPS = 20000000
POLICY_UPDATE = 4
TARGET_UPDATE = 4000
EVALUATE_FREQ = 10000
MEMORY_CAPACITY = 100000
INITIAL_STEPS=1000
NAME = 'AMN_ens_ac_BALD_eps_5'
# -
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # if gpu is to be used
policy_net = ENS_DQN_Large(n_actions).to(device)
target_net = ENS_DQN_Large(n_actions).to(device)
expert_net = torch.load("models/dqn_expert_breakout_model").to(device)
policy_net.apply(policy_net.init_weights)
target_net.load_state_dict(policy_net.state_dict())
expert_net.eval()
optimizer = optim.Adam(policy_net.parameters(), lr=LR, eps=1.5e-4)
memory = ReplayMemory(MEMORY_CAPACITY, [5,h,w], n_actions, device)
action_selector = ActiveActionSelector(EPS_START, EPS_END, policy_net, expert_net, EPS_DECAY, n_actions, ens_BALD, device)
steps_done = 0
writer = SummaryWriter(f'runs/{NAME}')
q = deque(maxlen=5)
done=True
episode_len = 0
num_labels = 0
progressive = tqdm(range(NUM_STEPS), total=NUM_STEPS, ncols=400, leave=False, unit='b')
for step in progressive:
if done:
env.reset()
sum_reward = 0
episode_len = 0
img, _, _, _ = env.step(1) # BREAKOUT specific !!!
for i in range(10): # no-op
n_frame, _, _, _ = env.step(0)
n_frame = fp(n_frame)
q.append(n_frame)
# Select and perform an action
state = torch.cat(list(q))[1:].unsqueeze(0)
action, eps = action_selector.select_action(state)
n_frame, reward, done, info = env.step(action)
n_frame = fp(n_frame)
# 5 frame as memory
q.append(n_frame)
memory.push(torch.cat(list(q)).unsqueeze(0), action, reward, done) # here the n_frame means next frame from the previous time step
episode_len += 1
# Perform one step of the optimization (on the target network)
if step % POLICY_UPDATE == 0 and step > INITIAL_STEPS:
loss = standard_optimization_ensemble(policy_net, target_net, optimizer, memory, batch_size=BATCH_SIZE)
if loss is not None:
writer.add_scalar('Performance/loss', loss, step)
# Update the target network, copying all weights and biases in DQN
if step % TARGET_UPDATE == 0 and step > INITIAL_STEPS:
target_net.load_state_dict(policy_net.state_dict())
if step % EVALUATE_FREQ == 0 and step > INITIAL_STEPS:
evaluated_reward = evaluate(step, policy_net, device, env_raw, n_actions, eps=0.05, num_episode=15)
writer.add_scalar('Performance/reward', evaluated_reward, step)
|
experiments/breakout/AMN_ens_ac_BALD.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Similar to change counter notebook, but explictly iterates, rather than using Dendropy iteration. This does not make a difference to the final outcome.
import dendropy
import pandas as pd
data = pd.read_csv('../Data/PyronParityData.csv', index_col=0, header=False)
taxa = dendropy.TaxonSet()
mle = dendropy.Tree.get_from_path('../2598364_0', 'newick', taxon_set=taxa, preserve_underscores=True,extract_comment_metadata=True)
for idx, nd in enumerate(mle.leaf_iter()):
if nd.label is None:
lookup = '{}'.format(nd.taxon)
nd.label = int(data.ix[lookup])
else:
pass
# +
origins = [] #changes to viviparity
reversions = [] #reversions to oviparity
total = [] #should equal 3951
childs = []
for index, node in enumerate(mle.postorder_node_iter()):
if node.parent_node is None:
pass
if float(node.label) == 0 or 1 > float(node.label) > .5:
total.append(node)
if node.parent_node is None:
pass
elif float(node.parent_node.label) < 0.05:
reversions.append(node)
foci = node.parent_node
if foci.parent_node is None:
print 'root'
elif float(node.parent_node.label) > .5:
new_foci = node.parent_node
if new_foci.parent_node is None:
pass
elif float(new_foci.parent_node.label) < 0.05:
reversions.append(new_foci)
elif float(node.parent_node.label) > .05:
new_foci = node.parent_node
if new_foci.parent_node is None:
pass
elif float(new_foci.parent_node.label) < 0.05:
reversions.append(new_foci)
print len(set(reversions)), 'reversions'
print set(reversions)
# +
origins = [] #changes to viviparity
for index, node in enumerate(mle.postorder_node_iter()):
if node.parent_node is None:
pass
if float(node.label) == 1 or 0 < float(node.label) < .05:
total.append(node)
if node.parent_node is None:
pass
elif float(node.parent_node.label) > 0.95:
origins.append(node)
foci = node.parent_node
if foci.parent_node is None:
print 'root'
elif float(node.parent_node.label) > .5:
new_foci = node.parent_node
if new_foci.parent_node is None:
pass
elif float(new_foci.parent_node.label) > 0.95:
origins.append(new_foci)
elif float(node.parent_node.label) > .95:
new_foci = node.parent_node
if new_foci.parent_node is None:
pass
elif float(new_foci.parent_node.label) > 0.95:
origins.append(new_foci)
print len(set(origins)), 'reversions'
print set(origins)
# -
print len(childs)
|
ExploratoryNotebooks/PostOrder.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import xlnet
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import model_utils
# +
import sentencepiece as spm
from prepro_utils import preprocess_text, encode_ids
sp_model = spm.SentencePieceProcessor()
sp_model.Load('sp10m.cased.v9.model')
# +
# # !wget https://raw.githubusercontent.com/huseinzol05/Malaya-Dataset/master/subjectivity/subjectivity-negative-bm.txt
# # !wget https://raw.githubusercontent.com/huseinzol05/Malaya-Dataset/master/subjectivity/subjectivity-positive-bm.txt
# +
with open('subjectivity-negative-bm.txt','r') as fopen:
texts = fopen.read().split('\n')
labels = [0] * len(texts)
with open('subjectivity-positive-bm.txt','r') as fopen:
positive_texts = fopen.read().split('\n')
labels += [1] * len(positive_texts)
texts += positive_texts
assert len(labels) == len(texts)
# +
from prepro_utils import preprocess_text, encode_ids
def tokenize_fn(text):
text = preprocess_text(text, lower= False)
return encode_ids(sp_model, text)
# +
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
special_symbols = {
"<unk>" : 0,
"<s>" : 1,
"</s>" : 2,
"<cls>" : 3,
"<sep>" : 4,
"<pad>" : 5,
"<mask>" : 6,
"<eod>" : 7,
"<eop>" : 8,
}
VOCAB_SIZE = 32000
UNK_ID = special_symbols["<unk>"]
CLS_ID = special_symbols["<cls>"]
SEP_ID = special_symbols["<sep>"]
MASK_ID = special_symbols["<mask>"]
EOD_ID = special_symbols["<eod>"]
def XY(left_train):
X, segments, masks = [], [], []
for i in tqdm(range(len(left_train))):
tokens_a = tokenize_fn(left_train[i])
segment_id = [SEG_ID_A] * len(tokens_a)
tokens_a.append(SEP_ID)
tokens_a.append(CLS_ID)
segment_id.append(SEG_ID_A)
segment_id.append(SEG_ID_CLS)
input_mask = [0] * len(tokens_a)
X.append(tokens_a)
segments.append(segment_id)
masks.append(input_mask)
return X, segments, masks
# -
X, segments, masks = XY(texts)
# +
kwargs = dict(
is_training=True,
use_tpu=False,
use_bfloat16=False,
dropout=0.1,
dropatt=0.1,
init='normal',
init_range=0.1,
init_std=0.05,
clamp_len=-1)
xlnet_parameters = xlnet.RunConfig(**kwargs)
xlnet_config = xlnet.XLNetConfig(json_path='alxlnet-base/config.json')
# +
epoch = 10
batch_size = 60
warmup_proportion = 0.1
num_train_steps = int(len(X) / batch_size * epoch)
num_warmup_steps = int(num_train_steps * warmup_proportion)
print(num_train_steps, num_warmup_steps)
learning_rate = 2e-5
training_parameters = dict(
decay_method = 'poly',
train_steps = num_train_steps,
learning_rate = learning_rate,
warmup_steps = num_warmup_steps,
min_lr_ratio = 0.0,
weight_decay = 0.00,
adam_epsilon = 1e-8,
num_core_per_host = 1,
lr_layer_decay_rate = 1,
use_tpu=False,
use_bfloat16=False,
dropout=0.0,
dropatt=0.0,
init='normal',
init_range=0.1,
init_std=0.05,
clip = 1.0,
clamp_len=-1,)
# +
class Parameter:
def __init__(self, decay_method, warmup_steps, weight_decay, adam_epsilon,
num_core_per_host, lr_layer_decay_rate, use_tpu, learning_rate, train_steps,
min_lr_ratio, clip, **kwargs):
self.decay_method = decay_method
self.warmup_steps = warmup_steps
self.weight_decay = weight_decay
self.adam_epsilon = adam_epsilon
self.num_core_per_host = num_core_per_host
self.lr_layer_decay_rate = lr_layer_decay_rate
self.use_tpu = use_tpu
self.learning_rate = learning_rate
self.train_steps = train_steps
self.min_lr_ratio = min_lr_ratio
self.clip = clip
training_parameters = Parameter(**training_parameters)
# -
class Model:
def __init__(
self,
dimension_output,
learning_rate = 2e-5,
):
self.X = tf.placeholder(tf.int32, [None, None])
self.segment_ids = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.float32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
xlnet_model = xlnet.XLNetModel(
xlnet_config=xlnet_config,
run_config=xlnet_parameters,
input_ids=tf.transpose(self.X, [1, 0]),
seg_ids=tf.transpose(self.segment_ids, [1, 0]),
input_mask=tf.transpose(self.input_masks, [1, 0]))
output_layer = xlnet_model.get_sequence_output()
output_layer = tf.transpose(output_layer, [1, 0, 2])
self.logits_seq = tf.layers.dense(output_layer, dimension_output)
self.logits_seq = tf.identity(self.logits_seq, name = 'logits_seq')
self.logits = self.logits_seq[:, 0]
self.logits = tf.identity(self.logits, name = 'logits')
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = self.logits, labels = self.Y
)
)
self.optimizer, self.learning_rate, _ = model_utils.get_train_op(training_parameters, self.cost)
correct_pred = tf.equal(
tf.argmax(self.logits, 1, output_type = tf.int32), self.Y
)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# +
dimension_output = 2
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
dimension_output,
learning_rate
)
sess.run(tf.global_variables_initializer())
# +
import collections
import re
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match('^(.*):\\d+$', name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ':0'] = 1
return (assignment_map, initialized_variable_names)
# -
tvars = tf.trainable_variables()
checkpoint = 'alxlnet-base/model.ckpt'
assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(tvars,
checkpoint)
saver = tf.train.Saver(var_list = assignment_map)
saver.restore(sess, checkpoint)
# +
from sklearn.model_selection import train_test_split
train_X, test_X, train_masks, test_masks, train_segments, test_segments, train_Y, test_Y = train_test_split(
X, segments, masks, labels, test_size = 0.2
)
# +
pad_sequences = tf.keras.preprocessing.sequence.pad_sequences
i = 0
index = 4
batch_x = train_X[i : index]
batch_y = train_Y[i : index]
batch_masks = train_masks[i : index]
batch_segments = train_segments[i : index]
batch_x = pad_sequences(batch_x, padding='post')
batch_segments = pad_sequences(batch_segments, padding='post', value = 4)
batch_masks = pad_sequences(batch_masks, padding='post', value = 1)
sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.X: batch_x,
model.Y: batch_y,
model.segment_ids: batch_segments,
model.input_masks: batch_masks,
},
)
# +
from tqdm import tqdm
import time
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 1, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n' % (EPOCH))
break
train_acc, train_loss, test_acc, test_loss = [], [], [], []
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = train_X[i : index]
batch_y = train_Y[i : index]
batch_masks = train_masks[i : index]
batch_segments = train_segments[i : index]
batch_x = pad_sequences(batch_x, padding='post')
batch_segments = pad_sequences(batch_segments, padding='post', value = 4)
batch_masks = pad_sequences(batch_masks, padding='post', value = 1)
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.X: batch_x,
model.Y: batch_y,
model.segment_ids: batch_segments,
model.input_masks: batch_masks,
},
)
train_loss.append(cost)
train_acc.append(acc)
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(range(0, len(test_X), batch_size), desc = 'test minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i : index]
batch_y = test_Y[i : index]
batch_masks = test_masks[i : index]
batch_segments = test_segments[i : index]
batch_x = pad_sequences(batch_x, padding='post')
batch_segments = pad_sequences(batch_segments, padding='post', value = 4)
batch_masks = pad_sequences(batch_masks, padding='post', value = 1)
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.X: batch_x,
model.Y: batch_y,
model.segment_ids: batch_segments,
model.input_masks: batch_masks,
},
)
test_loss.append(cost)
test_acc.append(acc)
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss = np.mean(train_loss)
train_acc = np.mean(train_acc)
test_loss = np.mean(test_loss)
test_acc = np.mean(test_acc)
if test_acc > CURRENT_ACC:
print(
'epoch: %d, pass acc: %f, current acc: %f'
% (EPOCH, CURRENT_ACC, test_acc)
)
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
EPOCH += 1
# -
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'alxlnet-base-subjectivity/model.ckpt')
# +
kwargs = dict(
is_training=False,
use_tpu=False,
use_bfloat16=False,
dropout=0.0,
dropatt=0.0,
init='normal',
init_range=0.1,
init_std=0.05,
clamp_len=-1)
xlnet_parameters = xlnet.RunConfig(**kwargs)
xlnet_config = xlnet.XLNetConfig(json_path='alxlnet-base/config.json')
# +
dimension_output = 2
learning_rate = 2e-5
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
dimension_output,
learning_rate
)
sess.run(tf.global_variables_initializer())
# -
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(sess, 'alxlnet-base-subjectivity/model.ckpt')
# +
real_Y, predict_Y = [], []
pbar = tqdm(range(0, len(test_X), batch_size), desc = 'test minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = test_X[i : index]
batch_y = test_Y[i : index]
batch_masks = test_masks[i : index]
batch_segments = test_segments[i : index]
batch_x = pad_sequences(batch_x, padding='post')
batch_segments = pad_sequences(batch_segments, padding='post', value = 4)
batch_masks = pad_sequences(batch_masks, padding='post', value = 1)
predict_Y += np.argmax(sess.run(model.logits,
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
model.segment_ids: batch_segments,
model.input_masks: batch_masks
},
), 1, ).tolist()
real_Y += batch_y
# +
from sklearn import metrics
print(
metrics.classification_report(
real_Y, predict_Y, target_names = ['negative', 'positive'],digits=5
)
)
# -
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or 'logits' in n.name
or 'alphas' in n.name
or 'self/Softmax' in n.name)
and 'Adam' not in n.name
and 'beta' not in n.name
and 'global_step' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('alxlnet-base-subjectivity', strings)
# +
import boto3
bucketName = 'huseinhouse-storage'
Key = 'alxlnet-base-subjectivity/frozen_model.pb'
outPutname = "v34/subjective/alxlnet-base-subjective.pb"
s3 = boto3.client('s3')
s3.upload_file(Key,bucketName,outPutname)
# -
|
session/subjectivity/alxlnet-base.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
from local.imports import *
from local.test import *
from local.core import *
from local.data.core import *
from local.data.external import *
from local.notebook.showdoc import show_doc
# +
#default_exp text.core
#default_cls_lvl 3
# -
# # Text core
#
# > Basic function to preprocess text before assembling it in a `DataBunch`.
#export
import concurrent.futures
from concurrent.futures import as_completed
from multiprocessing import Process, Queue
import spacy,html
from spacy.symbols import ORTH
# ## Multiprocessing
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):
self.no_workers = max_workers==0
if self.no_workers: max_workers=1
super().__init__(max_workers, mp_context, initializer=initializer, initargs=initializer)
def map(self, f, items):
return [f(o) for o in items] if self.no_workers else super().map(f, items)
#export
def parallel(func, items, n_workers=defaults.cpus):
"Applies `func` in parallel to `items`, using `n_workers`"
with ProcessPoolExecutor(max_workers=n_workers) as ex:
return [x for x in progress_bar(ex.map(func,items), total=len(items), leave=False)]
# +
def add_one(x):
time.sleep(random.random()/100)
return x+1
test_eq(parallel(add_one, range(100)), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=1), range(1,101))
test_eq(parallel(add_one, range(100), n_workers=0), range(1,101))
# -
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, as_gen=False, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
queue = Queue()
batches = np.array_split(items, n_workers)
idx = np.cumsum(0 + L(batches).mapped(len))
def _f(batch, start_idx):
f = cls(**kwargs)
for i,b in enumerate(f(batch)): queue.put((start_idx+i,b))
processes = [Process(target=_f, args=o) for o in zip(batches,idx)]
for p in processes: p.start()
res = (queue.get() for _ in progress_bar(items, leave=False))
try: return res if as_gen else [o[1] for o in sorted(res)]
finally:
for p in processes: p.join()
# `cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a list of all the results, matching the order of `items` (if not `as_gen`) or a generator of tuples of item indices and results (if `as_gen`).
# +
class SleepyBatchFunc:
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/10)
yield k+self.a
x = np.linspace(0,0.99,100)
res = parallel_gen(SleepyBatchFunc, x, n_workers=2)
test_eq(res, x+1)
# -
# ## Preprocessing rules
# The following are rules applied to texts before or after it's tokenized.
#export
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
#export
_all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
# +
#export
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
# -
test_eq(spec_add_spaces('#fastai'), ' # fastai')
test_eq(spec_add_spaces('/fastai'), ' / fastai')
test_eq(spec_add_spaces('\\fastai'), ' \\ fastai')
# +
#export
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
# -
test_eq(rm_useless_spaces('a b c'), 'a b c')
# +
#export
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
# -
# It starts replacing at 3 repetitions of the same character or more.
test_eq(replace_rep('aa'), 'aa')
test_eq(replace_rep('aaaa'), f' {TK_REP} 4 a ')
#export
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
#hide
"""
Matches any word repeated at least four times with spaces between them
(?:\s|^) Non-catching group with either a whitespace character or the beginning of text
(\w+) Catching group of any alphanumeric character
\s+ One or more whitespace
((?:\1\s+)+) Catching group of a repetition of one or more times \1 followed by one or more whitespace
\1 Occurence of \1
(\s|\W|$) Catching group of last whitespace, non alphanumeric character or end of text
""";
#export
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
# It starts replacing at 3 repetitions of the same word or more.
test_eq(replace_wrep('ah ah'), 'ah ah')
test_eq(replace_wrep('ah ah ah'), f' {TK_WREP} 3 ah ')
test_eq(replace_wrep('ah ah ah ah'), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah '), f' {TK_WREP} 4 ah ')
test_eq(replace_wrep('ah ah ah ah.'), f' {TK_WREP} 4 ah .')
test_eq(replace_wrep('ah ah ahi'), f'ah ah ahi')
#export
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
test_eq(fix_html('#39;bli#146;'), "'bli'")
test_eq(fix_html('Sarah amp; Duck...'), 'Sarah & Duck …')
test_eq(fix_html('a nbsp; #36;'), 'a $')
test_eq(fix_html('\\" <unk>'), f'" {UNK}')
test_eq(fix_html('quot; @.@ @-@ '), "' .-")
test_eq(fix_html('<br />text\\n'), '\ntext\n')
#export
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
#hide
"""
Catches any word in all caps, even with ' or - inside
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z]+ Catching group with one capitalized letter or more...
[^a-z\s]*) ...followed by anything that's non lowercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
test_eq(replace_all_caps("I'M SHOUTING"), f"{TK_UP} i'm {TK_UP} shouting")
test_eq(replace_all_caps("I'm speaking normally"), "I'm speaking normally")
test_eq(replace_all_caps("I am speaking normally"), "i am speaking normally")
#export
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
#hide
"""
Catches any capitalized word
(\s|^) Catching group with either a whitespace or the beginning of text
([A-Z] Catching group with exactly one capitalized letter...
[^A-Z\s]*) ...followed by anything that's not uppercase or whitespace
(?=(\s|$)) Look ahead for a space of end of text
The look ahead is there to not move the pointer ahead of the next space in case we have consecutive words in all caps.
""";
#export
def replace_maj(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
test_eq(replace_maj("<NAME>"), f'{TK_MAJ} jeremy {TK_MAJ} howard')
test_eq(replace_maj("I don't think there is any maj here"), ("i don't think there is any maj here"),)
#export
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
#export
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
#export
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
# ## Tokenizing
# A tokenizer is a class that must implement a `pipe` method. This `pipe` method receives a generator of texts and must return a generator with their tokenized versions. Here is the most basic example:
#export
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def pipe(self, items): return (t.split(self.split_char) for t in items)
tok = BaseTokenizer()
for t in tok.pipe(["This is a text"]): test_eq(t, ["This", "is", "a", "text"])
tok = BaseTokenizer('x')
for t in tok.pipe(["This is a text"]): test_eq(t, ["This is a te", "t"])
#export
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, batch_size=5000):
special_toks = ifnone(special_toks, defaults.text_spec_tok)
self.nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in special_toks: self.nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.batch_size=batch_size
def pipe(self, items):
for doc in self.nlp.pipe(items, batch_size=self.batch_size):
yield [d.text for d in doc]
tok = SpacyTokenizer()
for t in tok.pipe(["This isn't the easiest text."]):
test_eq(t, ["This", "is", "n't", "the", "easiest", "text", "."])
#export
def apply_rules(items, rules):
"Returns a generator that apply `rules` to `items`"
return map(compose(*rules), items)
for t in apply_rules(["This is a text"], [replace_maj]): test_eq(t, f"{TK_MAJ} this is a text")
#export
class TokenizeBatch:
"A wrapper around `tok_func` to apply `rules` and tokenize in parallel"
def __init__(self, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs ):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok_func(**tok_kwargs)
def __call__(self, batch):
for o in self.tok.pipe(apply_rules(batch, self.rules)): yield L(o).mapped(self.post_f)
f = TokenizeBatch()
test_eq(f(["This isn't a problem"]), [[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem']])
f = TokenizeBatch(BaseTokenizer, rules=[], split_char="'")
test_eq(f(["This isn't a problem"]), [['This▁isn', 't▁a▁problem']])
# The main function that will be called during one of the processes handling tokenization. It will create an instance of a tokenizer with `tok_func` and `tok_kwargs` at init, then iterate through the `batch` of texts, apply them `rules` and tokenize them.
texts = ["this is a text", "this is another text"]
tok = TokenizeBatch(BaseTokenizer, texts.__getitem__)
test_eq([t for t in tok([0,1])],[['this', 'is', 'a', 'text'], ['this', 'is', 'another', 'text']])
#export
def tokenize1(text, tok_func=SpacyTokenizer, rules=None, post_rules=None, **tok_kwargs):
"Tokenize one `text` with an instance of `tok_func` and some `rules`"
return next(iter(TokenizeBatch(tok_func, rules, post_rules, **tok_kwargs)([text])))
test_eq(tokenize1("This isn't a problem"),
[BOS, TK_MAJ, 'this', 'is', "n't", 'a', 'problem'])
test_eq(tokenize1("This isn't a problem", BaseTokenizer, rules=[], split_char="'"),
['This▁isn', 't▁a▁problem'])
#export
def parallel_tokenize(items, tok_func, rules, as_gen=False, n_workers=defaults.cpus, **tok_kwargs):
"Calls a potential setup on `tok_func` before launching `TokenizeBatch` in parallel"
if hasattr(tok_func, 'setup'): tok_kwargs = tok_func(**tok_kwargs).setup(items, rules)
return parallel_gen(TokenizeBatch, items, as_gen=as_gen, tok_func=tok_func,
rules=rules, n_workers=n_workers, **tok_kwargs)
# ### Tokenize texts in files
# Preprocessing function for texts in filenames. Tokenized texts will be saved in a similar fashion in a directory suffixed with `_tok` in the parent folder of `path` (override with `output_dir`).
#export
@patch
def read(self:Path):
"Read the content of `fname`"
with self.open() as f: return f.read()
#export
@patch
def write(self:Path, txt):
"Write `txt` to `self`, creating directories as needed"
self.parent.mkdir(parents=True,exist_ok=True)
with self.open('w') as f: f.write(txt)
#export
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, n_workers=defaults.cpus,
rules=None, tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
fnames = get_files(path, extensions=extensions, recurse=True, folders=folders)
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
rules = Path.read + L(ifnone(rules, defaults.text_proc_rules.copy()))
counter = Counter()
for i,tok in parallel_tokenize(fnames, tok_func, rules, as_gen=True, n_workers=n_workers, **tok_kwargs):
out = output_dir/fnames[i].relative_to(path)
out.write(' '.join(tok))
out.with_suffix('.len').write(str(len(tok)))
counter.update(tok)
pickle.dump(counter, open(output_dir/'counter.pkl','wb'))
# The result will be in `output_dir` (defaults to a folder in the same parent directory as `path`, with `_tok` added to `path.name`) with the same structure as in `path`. Tokenized texts for a given file will be in the file having the same name in `output_dir`. Additionally, a file with a .len suffix contains the number of tokens and the count of all words is stored in `output_dir/counter.pkl`.
#
# `extensions` will default to `['.txt']` and all text files in `path` are treated unless you specify a list of folders in `include`. `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer.
# +
# TODO: test include option
path = Path('tmp')
os.makedirs(path, exist_ok=True)
for d in ['a', 'b', 'c']:
os.makedirs(path/d, exist_ok=True)
for i in range(5):
with open(path/d/f'text{i}.txt', 'w') as f: f.write(f"This is an example of text {d} {i}")
tokenize_folder(path)
outp = Path('tmp_tok')
assert outp.is_dir()
for d in ['a', 'b', 'c']:
p = outp/d
assert p.is_dir()
for i in range(5):
assert (p/f'text{i}.txt').is_file()
assert (p/f'text{i}.len').is_file()
test_eq((p/f'text{i}.txt').read(), ' '.join([
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', d, str(i) ]))
test_eq((p/f'text{i}.len').read(), '10')
shutil.rmtree(path)
shutil.rmtree(outp)
# -
# ### Tokenize texts in a dataframe
#export
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
#export
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, **tok_kwargs):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers`"
text_cols = L(text_cols)
mark_fields = ifnone(mark_fields, len(text_cols) > 1)
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok_func, rules, n_workers=n_workers, **tok_kwargs))
lengths = outputs.mapped(len)
counter = Counter()
for o in outputs: counter.update(o)
other_cols = [c for c in df.columns if c not in text_cols]
res = df[other_cols].copy()
res['text'],res['text_lengths'] = outputs,lengths
return res,counter
# This function returns a new dataframe with the same non-text columns, a colum named text that contains the tokenized texts and a column named text_lengths that contains their respective length. It also returns a counter of all words see to quickly build a vocabulary afterward.
#
# `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text')
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i) ])
test_eq(out['text_lengths'][i], 9)
# hide
#With two columns of text, mark_fields defaults to True
df['text1'] = df['text'].values
out,cnt = tokenize_df(df, text_cols=['text', 'text1'])
test_eq(list(out.columns), ['label', 'text', 'text_lengths'])
test_eq(out['label'].values, df['label'].values)
for i in range(len(df)):
test_eq(out['text'][i], [
BOS, FLD, '1', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i),
FLD, '2', TK_MAJ, 'this', 'is', 'an', 'example', 'of', 'text', str(i)
])
test_eq(out['text_lengths'][i], 21)
#export
#TODO: test + rework
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok_func=SpacyTokenizer, header='infer', chunksize=None, **tok_kwargs):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
kwargs = dict(n_workers=n_workers, pre_rules=pre_rules, post_rules=post_rules,
mark_fields=mark_fields, tok_func=tok_func, **tok_kwargs)
if chunksize is None:
out,cnt = tok_df(df, text_cols, **kwargs)
out.to_csv(outname, header=header, index=False)
else:
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tok_df(dfp, text_cols, **kwargs)
out.to_csv(outname, header=header if i==0 else None, index=False, mode='w' if i==0 else 'a')
cnt.update(c)
pickle.dump(cnt, open(outname.parent/'counter.pkl', 'wb'))
# The result will be written in a new csv file in `outname` (defaults to the same as `fname` with the suffix `_tok.csv`) and will have the same header as the original file, the same non-text columns, a text and a text_lengths column as described in `tokenize_df`.
#
# `tok_func` is instantiated in each process with `tok_kwargs`, and `rules` (that defaults to `defaults.text_proc_rules`) are applied to each text before going in the tokenizer. If `mark_fields` isn't specified, it defaults to `False` when there is a single text column, `True` when there are several. In that case, the texts in each of those columns are joined with `FLD` markes followed by the number of the field.
#
# The csv file is opened with `header` and optionally with blocks of `chunksize` at a time. If this argument is passed, each chunk is processed independtly and saved in the output file to save memory usage.
# ## Sentencepiece
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
#export
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return res
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1",
f"--user_defined_symbols={','.join(spec_tokens)}"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules):
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(apply_rules(items, rules), total=len(items), leave=False):
f.write(f'{t}\n')
return {'sp_model': self.train(raw_text_path)}
def pipe(self, items):
for t in items: yield self.tok.EncodeAsPieces(t)
texts = [f"This is an example of text {i}" for i in range(10)]
df = pd.DataFrame({'text': texts, 'label': list(range(10))}, columns=['text', 'label'])
out,cnt = tokenize_df(df, text_cols='text', tok_func=SentencePieceTokenizer, vocab_sz=34)
out
# ## Export -
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
|
dev/30_text_core.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.3
# language: julia
# name: julia-1.6
# ---
# +
#Note, Cartopy is a python package which will install through Conda:
#using Conda; Conda.add("Cartopy")
using PyPlot, PyCall, CSV, DataFrames, Dates
#See example of Cartopy pie charts in Python here:
#https://stackoverflow.com/questions/45266955/adding-pie-chart-at-given-coordinates-to-cartopy-projection
#The following adapts these for use in Julia:
function plot_pie_inset(data,ilon,ilat,ax,width)
ax_sub = inset_axes(ax, width=width, height=width, loc=10,
bbox_to_anchor=(ilon, ilat),
bbox_transform=ax.transData,
borderpad=0)
wedges,texts= ax_sub.pie(data, colors = color_list, normalize=true)
ax_sub.set_aspect("equal")
end
function add_pie(freqs,loc; siz = 0.3)
lat,lon = loc
lonr,latr = ccrs.PlateCarree().transform_point(lon,lat, ccrs.PlateCarree())
plot_pie_inset(freqs,lonr,latr,ax,siz)
end
inset_locator = pyimport("mpl_toolkits.axes_grid1.inset_locator")
inset_axes = inset_locator.inset_axes
sum2one(v) = v/sum(v);
# +
df = CSV.read("lat_and_lon.csv",DataFrame);
name_loc_dict = Dict(zip(df.Country, collect(zip(df.Latitude,df.Longitude))));
estimate_df = CSV.read("CountryEstimates.csv",DataFrame);
#Manually adjusting which get plotted, for aesthetic/overlap reasons, or unreliable estimates/data.
blacklist = ["Ghana", "Turkey","Nigeria", "Rwanda"]
whitelist = ["Uganda"]
bl = union(estimate_df.Country[(estimate_df.NeffBefore .< 10.0) .| (estimate_df.NeffAfter .< 5.0)])
blacklist = union(vcat(blacklist,bl));
blacklist = [b for b in blacklist if !(b in whitelist)]
clean_df = estimate_df[[!(c in blacklist) for c in estimate_df.Country],:];
#For easily figuring out which colors you want to plot each lineage as:
variant_labels = names(estimate_df)[5:end];
color_list = ["#f032e6", "pink", "#911eb4", "#46f0f0", "#ffe119", "black","grey","#f58231","#e6194b","#3cb44b","blue"];
figure(figsize = (5,1))
leg_ordering = [11,10,9,5,8,7,6,4,3,2,1]
scatter(1:length(color_list),ones(length(color_list)),c = color_list[leg_ordering])
xticks(1:length(color_list),variant_labels[leg_ordering], rotation = 90);
# +
#Note: this makes strong assumptions about the structure of the exported estimates.
#Will probably only work on data exported by the other notebook in this repo.
ccrs = pyimport("cartopy.crs")
cfeat = pyimport("cartopy.feature")
#The loop below isn't happy unless this is global.
ax = subplot(projection=ccrs.PlateCarree());
for shf in 1:3 #Loop through each of the dates at which frequencies have been estimated.
date = clean_df.Day[shf]
freq_vecs = Array{Float64}[]
locs = Tuple{Float64, Float64}[]
for i in shf:3:size(clean_df)[1]
push!(freq_vecs,Array{Float64}(clean_df[i,5:end]))
push!(locs,name_loc_dict[clean_df.Country[i]])
end
#Plot the world
fig = figure(figsize = (15,8))
ax = subplot(projection=ccrs.PlateCarree())
ax.add_feature(cfeat.OCEAN, alpha = 0.2)
ax.add_feature(cfeat.BORDERS, linestyle="-", alpha=0.1)
ax.set_extent([-60, 180, -40, 90])
#Europe box
lonl, lonh = -13, 40
latl,lath = 32, 68
for i in 1:length(locs)
si = 0.35
#Smaller sizes inside the Europe box
if locs[i][1] > latl && locs[i][1] < lath && locs[i][2] > lonl && locs[i][2] < lonh
si = 0.15
end
add_pie(sum2one(freq_vecs[i]),locs[i], siz = si)
end
for i in 1:length(color_list)
ax.scatter([],[],s = 100.0, label = variant_labels[leg_ordering][i], color = color_list[leg_ordering][i])
end
ax.annotate(string(date),(0.865,0.84),xycoords = "figure fraction", size = "x-large")
ax.legend(loc = "lower left")
tight_layout()
savefig("Maps/world$(date).svg")
savefig("Maps/world$(date).pdf")
#Now plot Europe
fig = figure(figsize = (5,5))
ax = subplot(projection=ccrs.PlateCarree())
ax.add_feature(cfeat.OCEAN, alpha = 0.2)
ax.add_feature(cfeat.BORDERS, linestyle="-", alpha=0.1)
lonl, lonh = -11, 28
latl,lath = 34, 65
ax.set_extent([lonl, lonh, latl,lath])
for i in 1:length(locs)
if locs[i][1] > latl && locs[i][1] < lath && locs[i][2] > lonl && locs[i][2] < lonh
add_pie(sum2one(freq_vecs[i]),locs[i],siz = 0.3)
end
end
savefig("Maps/europe$(date).svg")
savefig("Maps/europe$(date).pdf")
end
|
VariantFrequenciesToMaps.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy
import toyplot
numpy.random.seed(1234)
# -
# Generate 10 sets of samples, each with different counts and distributions
data = []
for i in numpy.arange(10):
mean = numpy.random.uniform()
scale = numpy.random.uniform()
size = numpy.random.randint(10, 20)
data.append(numpy.random.normal(mean, scale, size=size))
canvas = toyplot.Canvas()
axes = canvas.cartesian()
for index, series in enumerate(data):
axes.scatterplot(numpy.repeat(index, len(series)), series, marker="-", size=15, mstyle={"stroke-width":2})
|
sandbox/violin-plot-alternative.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: myenv
# language: python
# name: myenv
# ---
# # NLP Transformers GUI
# This notebook uses the pipelines introduced in the *NLP_Transformers_Pipelines* notebook and encapsulates them into [Gradio](https://www.gradio.app/) Interfaces that provides Graphic User Interfaces (GUI) to easily explore its capabilities.
import gradio as gr
from transformers import pipeline
# ## Load of Pipelines
#
# Please, take in mind that the pipelines above contain large pretrained language models to download (+250 MB). Feel free to use whatever of them you might find useful.
sentiment_analysis_pipeline = pipeline('sentiment-analysis')
question_answering_pipeline = pipeline('question-answering')
summarization_pipeline = pipeline('summarization')
# ## Sentiment Analysis
# +
def sentiment_analysis(text):
response = sentiment_analysis_pipeline(text)
sentiment = response[0]['label']
score = response[0]['score']
return sentiment, score
gr.Interface(fn=sentiment_analysis,
inputs=["textbox"],
outputs=[gr.outputs.Textbox(label='sentiment'),
gr.outputs.Textbox(label='score')]).launch()
# -
# ## Question Answering
# +
def question_answering(context, question):
response = question_answering_pipeline(question=question, context=context)
answer = response['answer']
location = f"From {response['start']} to {response['end']}"
score = response['score']
return answer, location, score
gr.Interface(fn=question_answering,
inputs=["textbox", "text"],
outputs=[gr.outputs.Textbox(label='answer'),
gr.outputs.Textbox(label='text location'),
gr.outputs.Textbox(label='score')]).launch()
# -
# ## Text Summarization
# +
def text_summarization(text, max_length=130, min_length=30):
response = summarization_pipeline(text, max_length, min_length)
summary = response[0]['summary_text']
return summary
gr.Interface(fn=text_summarization,
inputs=gr.inputs.Textbox(lines=20, label='text'),
outputs=gr.outputs.Textbox(label='summary')).launch()
# -
|
NLP_App.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Update sklearn to prevent version mismatches
# !pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
# !pip install joblib
import pandas as pd
# # Read the CSV and Perform Basic Data Cleaning
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
# # Select your features (columns)
# Set features. This will also be used as your x values.
X = df[["koi_impact","koi_time0bk","koi_period","koi_slogg","koi_depth","koi_prad","koi_insol","koi_model_snr","koi_tce_plnt_num","koi_steff"]]
y = df["koi_disposition"]
# # Create a Train Test Split
#
# Use `koi_disposition` for the y values
# +
# split data into training and testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# -
X_train.head()
# +
# prepare second model
X2 = df.drop(columns=["koi_disposition"])
# -
X2_train, X2_test, y_train, y_test = train_test_split(X2, y, random_state=23)
X2_train.head()
# # Pre-processing
#
# Scale the data using the MinMaxScaler and perform some feature selection
# +
# Scale your data
from sklearn.preprocessing import StandardScaler
X_scaler = StandardScaler().fit(X_train)
# -
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
X.shape
y.shape
X2_scaler = StandardScaler().fit(X2_train)
X2_train_scaled = X2_scaler.transform(X2_train)
X2_test_scaled = X2_scaler.transform(X2_test)
X2.shape
# # Train the Model
#
#
# Create the Model
from sklearn.svm import SVC
model = SVC(kernel='rbf')
model.fit(X_train_scaled,y_train)
# model.fit(X,y)
print(f"Training Data Score: {model.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {model.score(X_test_scaled, y_test)}")
# create the second SVC model, try different kernels
model2 = SVC(kernel='linear')
# model2 = SVC(kernel='rbf')
model2.fit(X2_train_scaled,y_train)
print(f"Training Data Score: {model2.score(X2_train_scaled, y_train)}")
print(f"Testing Data Score: {model2.score(X2_test_scaled, y_test)}")
# +
model3 = SVC(kernel='rbf')
model3.fit(X2_train_scaled,y_train)
# -
print(f"Training Data Score: {model3.score(X2_train_scaled, y_train)}")
print(f"Testing Data Score: {model3.score(X2_test_scaled, y_test)}")
model4 = SVC(kernel='poly')
model4.fit(X2_train_scaled,y_train)
print(f"Training Data Score: {model4.score(X2_train_scaled, y_train)}")
print(f"Testing Data Score: {model4.score(X2_test_scaled, y_test)}")
# # Hyperparameter Tuning
#
# Use `GridSearchCV` to tune the model's parameters
# Create the GridSearchCV model
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [1, 5, 10, 50],
'gamma': [0.0001, 0.0005, 0.001, 0.005]}
grid = GridSearchCV(model, param_grid, verbose=3)
grid.fit(X_train_scaled,y_train)
print(grid.best_params_)
print(grid.best_score_)
# train X2 with gridsearch
grid.fit(X2_train_scaled,y_train)
print(grid.best_params_)
print(grid.best_score_)
# model2
param_grid = {'C': [1, 5, 10, 50],
'gamma': [0.0001, 0.0005, 0.001, 0.005]}
grid = GridSearchCV(model2, param_grid, verbose=3)
grid.fit(X2_train_scaled,y_train)
print(grid.best_params_)
print(grid.best_score_)
# further refine model2
param_grid = {'C': [3, 5, 7],
'gamma': [0.0001, 0.0002, 0.00009]}
grid = GridSearchCV(model2, param_grid, verbose=3)
grid.fit(X2_train_scaled,y_train)
print(grid.best_params_)
print(grid.best_score_)
# further refine model2
param_grid = {'C': [0.1, 1, 2],
'gamma': [0.0001, 0.0002, 0.00009]}
grid = GridSearchCV(model2, param_grid, verbose=3)
grid.fit(X2_train_scaled,y_train)
print(grid.best_params_)
print(grid.best_score_)
# Make predictions with the hypertuned model
predictions = grid.predict(X2_test_scaled)
# Calculate classification report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions,
target_names=["CANDIDATE","FALSE POSITIVE","CONFIRMED"]))
# next try KNN classifier
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
# +
train_scores = []
test_scores = []
for k in range(1, 30, 2):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X2_train_scaled, y_train)
train_score = knn.score(X2_train_scaled, y_train)
test_score = knn.score(X2_test_scaled, y_test)
train_scores.append(train_score)
test_scores.append(test_score)
print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}")
plt.plot(range(1, 30, 2), train_scores, marker='o')
plt.plot(range(1, 30, 2), test_scores, marker="x")
plt.xlabel("k neighbors")
plt.ylabel("Testing accuracy Score")
plt.show()
# -
knn = KNeighborsClassifier(n_neighbors=17)
knn.fit(X2_train, y_train)
print('k=17 Test Acc: %.3f' % knn.score(X2_test, y_test))
knn = KNeighborsClassifier(n_neighbors=23)
knn.fit(X2_train, y_train)
print('k=23 Test Acc: %.3f' % knn.score(X2_test, y_test))
# # Save the Model
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'olayemi.sav'
joblib.dump(X2, filename)
|
exo_exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Algo - Calculs de surface et autres calculs
#
# C'est l'histoire d'une boucle, puis d'une autre, puis enfin d'un couple de boucles, voire d'un triplé.
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# ## Enoncé
# ### Exercice 1 : calcul de la surface d'un cercle
#
# On cherche à écrire une fonction qui calcule la surface d'un cercle de rayon *r*.
def surface_cerle(r):
# ...
return 0.
# #### 1.1 En utilisant la constante pi
# #### 1.2 Sans utiliser pi ni aucune autre fonction
#
# Donc juste des additions, des multiplications, des divisions. On a le droit aux boucles aussi.
# ### Exercice 2 : tri aléatoire
#
# On implémente le tri suivant (est-ce vraiment un tri d'ailleurs ?) :
#
# * Dans un tableau *T*, on tire deux élements aléatoires *i < j*, si *T[i] > T[j]*, on les permute.
# * On s'arrête après *n* tirages sans permutations.
# ### Exercice 3 : petits calculs parfaits pour une machine
#
# On suppose que le tableau précédent est de taille *n=10*, l'algorithme précédent s'arrête après *n* tirages sans permutations. Comment choisir *n* de telle sorte que le tableau finisse trié dans 90% des cas...
# ## Réponses
# ### 1.1. calcul de la surface d'un cercle avec pi
# +
from math import pi
def surface_cercle(r):
return r ** 2 * pi
surface_cercle(5)
# -
# ### 1.2. calcul de la surface d'un cercle sans pi ou autre fonction
#
# Une approche possible est probabiliste : on construit un estimateur de $\pi$ en tirant aléatoirement des points dans un carré de côté 1. Si le point $P_i$ tombe dans le quart de cercle inscrit dans le carré, on compte 1, sinon on compte 0. Donc:
#
# $$\frac{1}{n} \sum_{i=1}^n \mathbb{1}_{\Vert P_i \Vert^2 \leqslant 1} \rightarrow \frac{\pi}{4}$$
#
# Ce ratio converge vers la probabilité pour le point $P_i$ de tomber dans le quart de cercle, qui est égale au ratio des deux aires : $\frac{\pi r^2}{r^2}$ avec $ r=1$.
# +
import numpy
def estimation_pi(n=10000):
rnd = numpy.random.rand(1000, 2)
norme = rnd[:, 0] ** 2 + rnd[:, 1] ** 2
dedans = norme <= 1
dedans_entier = dedans.astype(numpy.int64)
return dedans_entier.sum() / dedans.shape[0] * 4
pi = estimation_pi()
pi
# +
def surface_cercle_pi(r, pi):
return r ** 2 * pi
surface_cercle_pi(5, pi)
# -
# ### 2. tri aléatoire
#
#
# +
def tri_alea(T, n=1000):
T = T.copy()
for i in range(0, n):
i, j = numpy.random.randint(0, len(T), 2)
if i < j and T[i] > T[j]:
T[i], T[j] = T[j], T[i]
return T
tableau = [1, 3, 4, 5, 3, 2, 7, 11, 10, 9, 8, 0]
tri_alea(tableau)
# -
# Et si *i > j*, on ne fait rien et c'est bien dommage.
# +
def tri_alea2(T, n=1000):
T = T.copy()
for i in range(0, n):
i = numpy.random.randint(0, len(T) - 1)
j = numpy.random.randint(i + 1, len(T))
if T[i] > T[j]:
T[i], T[j] = T[j], T[i]
return T
tableau = [1, 3, 4, 5, 3, 2, 7, 11, 10, 9, 8, 0]
tri_alea2(tableau)
# -
# Le résultat n'est pas forcément meilleur mais il est plus rapide à obtenir puisqu'on fait un test en moins.
# Et si on s'arrête quand cinq permutations aléatoires de suite ne mènen à aucune permutations dans le tableau.
# +
def tri_alea3(T, c=100):
T = T.copy()
compteur = 0
while compteur < c:
i = numpy.random.randint(0, len(T) - 1)
j = numpy.random.randint(i + 1, len(T))
if T[i] > T[j]:
T[i], T[j] = T[j], T[i]
compteur = 0
else:
compteur += 1
return T
tableau = [1, 3, 4, 5, 3, 2, 7, 11, 10, 9, 8, 0]
tri_alea3(tableau)
# -
# ### 3. petits calculs parfaits pour une machine
def est_trie(T):
for i in range(1, len(T)):
if T[i] < T[i-1]:
return False
return True
# +
def eval_c(n, c, N=100):
compteur = 0
for i in range(N):
T = numpy.random.randint(0, 20, n)
T2 = tri_alea3(T, c=c)
if est_trie(T2):
compteur += 1
return compteur * 1. / N
eval_c(10, 100)
# +
from tqdm import tqdm # pour afficher une barre de défilement
cs = []
ecs = []
for c in tqdm(range(1, 251, 25)):
cs.append(c)
ecs.append(eval_c(10, c=c))
ecs[-5:]
# -
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(cs, ecs)
plt.plot([0, max(cs)], [0.9, 0.9], '--');
# La réponse se situe aux alentours de 150, on ne peut pas dire précisément car tout est aléatoire, on peut seulement estimer la distribution de ce résultat qui est aussi une variable aléatoire. Cette réponse dépend de la taille du tableau à tirer.
|
_doc/notebooks/td1a_home/2020_surface.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
flags = [i for i in dir(cv2) if i.startswith('COLOR_')]
# +
print(len(flags))
print(flags[40])
# +
import matplotlib.pyplot as plt
import numpy as np
nemo = cv2.imread("C:\\Users\\<NAME>\\Desktop\\pie.jpg")
plt.imshow(nemo)
plt.show()
# +
nemo = cv2.cvtColor(nemo, cv2.COLOR_BGR2RGB)
plt.imshow(nemo)
plt.show()
hsv_nemo = cv2.cvtColor(nemo, cv2.COLOR_RGB2HSV)
# +
light_orange = (1, 190, 200)
dark_orange = (18, 255, 255)
# +
# FINDING ORANGE COLOUR IN AN IMAGE
mask = cv2.inRange(hsv_nemo, light_orange, dark_orange)
result = cv2.bitwise_and(nemo, nemo, mask=mask)
plt.subplot(1, 2, 1)
plt.imshow(mask, cmap="gray")
plt.subplot(1, 2, 2)
plt.imshow(result)
plt.show()
# -
|
color_detect_openCV.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
print(np)
x = np.ones((6,6,3))
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn
plt.imshow(x)
plt.show()
fig = plt.figure()
# +
import matplotlib.animation as animation
from IPython.display import HTML
fig = plt.figure()
def f(x, y):
return np.sin(x) + np.cos(y)
x = np.linspace(0, 2 * np.pi, 120)
y = np.linspace(0, 2 * np.pi, 100).reshape(-1, 1)
im = plt.imshow(f(x, y), animated=True)
def updatefig(*args):
global x, y
x += np.pi / 15.
y += np.pi / 20.
im.set_array(f(x, y))
return im,
anim = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
HTML(anim.to_html5_video())
# -
|
notebooks/ipynb_viz_experiments.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 2
# ## Introduction
# This lab introduces slope fields and a numerical differential equation solver. The solver is an improved version of Euler’s Method, which we will implement ourselves in future labs. Using these techniques involves a number of commands.
#
# ### Slope fields
# Plot the slope field for the differential equation
# \begin{align*}
# \frac{\mathrm{d}y}{\mathrm{d}x} = x - y
# \end{align*}
# for $-1<x<5$ and $-2<y<4$.
#
# This week, in addition to Seaborn, NumPy, and pandas, we will need Matplotlib and SciPy.
#
# Matplotlib was the original popular Python plotting pakage. We need Matplotlib because Seaborn does not implement quiver plots. Fortunately, because Seaborn is built on top of Matplotlib, they play nicely together.
#
# SciPy is NumPy's bigger sibling. We need SciPy to integrate the differential equations.
# +
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from numpy import meshgrid, linspace, sqrt
from numpy.testing import assert_almost_equal
from scipy.integrate import odeint
# -
# Now plot the slope field. A slope field is a special type of _quiver_ plot. We create NumPy arrays that say where to plot the line segments (`x` and `y`) and arrays to point them in the right direction (`1/L` and `S/L`).
#
# Matplotlib is more hands-on that Seaborn, so you need extra steps like the `subplot` command to create the figure and axes in the first place, `set_title` to set the title of the plot, `plot.axis` command to set the aspect ratio of the plot, and various options within `quiver` to make it look good.
#
# When we write `figsize=(5, 5)` in the inputs to `subplots`, are we creating a variable called `figsize`, or doing something else?
x, y = meshgrid(linspace(-1, 5, 25), linspace(-2, 4, 25))
S = x - y
L = sqrt(1 + S**2)
fig, ax = plt.subplots(figsize=(5, 5))
q = ax.quiver(x, y, 1/L, S/L, scale=25, headwidth=0, headlength=0, color='blue')
ax.set_title('Slopefield for dy/dx = x - y')
plt.axis('equal');
# `1/L` and `S/L` in the `create_quiver` command set the $x$ and $y$ lengths (components) of the line segment at each point in the grid.
#
# Note that NumPy operates element-wise by default, so `x - y` creates an array of differences, and `S/L` creates an array of quotients. For `1/L`, NumPy does something special called _broadcasting_. It assumes that you meant "divide an array of ones by the elements of `L`".
#
# The slope of the line segment is then $(S/L)/(1/L) = S$, and the length is
# \begin{align*}
# \sqrt{\left(\frac{1}{L}\right)^2 + \left(\frac{S}{L}\right)^2} &= \sqrt{\frac{1+S^2}{L^2}}\\
# &= 1.
# \end{align*}
# ### Numerical/graphical solution of an initial-value problem
# Plot the (approximate) solution to the initial-value problem
# \begin{align*}
# \frac{\mathrm{d}y}{\mathrm{d}x} = x - y\qquad y(-1)=0
# \end{align*}
# for $-1 < x <5$. Find $y(5)$.
#
# Here we use a numerical DE solver `scipy.integrate.odeint`, which we imported as `odeint`. To use `odeint`, we need to define the differential equation in a Python function and then feed it to `odeint`.
#
# First define the function. Remember that in Python, [white space is important](https://xkcd.com/353/). That is, you have to indent the contents of your function or Python will complain. Most of the time your Jupyter Notebook will figure out your intentions and auto-indent.
def diff_eq(y, x):
return x - y
# - The `def` keyword tells Python you would like to define a function.
# - In this case the function is called `diff_eq` and takes arguments `y` and `x`.
# - The `return` statement tells Python what you would like to return.
# - When you stop indenting, the function is over.
#
# Note that `odeint` expects the function (`diff_eq` here) to take (at least) two arguments, where the first (`y` here) is the dependent variable and the second (`x` here) is the independent variable. `odeint` needs the function to take both of those arguments (at least), even if these variables are not used in the function (for instance if they are not used in the DE).
#
# Now ask `odeint` to generate a solution to our DE.
x = linspace(-1, 5, 61)
y = odeint(diff_eq, 0, x)[:, 0]
# - `linspace` creates an array of (`61`, in this case) equally-spaced elements.
# - `odeint` calculates `y` for each value of `x`.
# - In Python, functions are variables like any other. In this case we pass `diff_eq` as an argument to `odeint`.
# - The second argument to `odeint` (`0` here) is the initial value of $y$. It must correspond to the first value of `x`.
# - `odeint` returns a 2D array with 61 rows and 1 column. We need a 1D array for plotting, so we extract the first column using `[:, 0]`.
#
# The following will plot `x` and `y` in a line plot, just like last week.
data = pd.DataFrame({'x': x, 'y': y})
sns.lineplot(data=data, x='x', y='y');
# Finally, to calculate $y(5)$, we realise that the values calculated by `odeint` are stored in the array `y`. So display `y`.
y
# Here we just want the last value. We can grab the last element of the array with `y[-1]`. (`y[-2]` gives the second last element.)
y[-1]
# `x[-1]` is th elast element of `x`. Check it too.
x[-1]
# Now we will plot multiple (approximate) solutions on the same graph. The procedure is similar, but now we need an additional `DataFrame.melt` step, to get the data into the shape that Seaborn would like it.
#
# Technically
# - `melt` is required because Seaborn likes _long_ format data, and the DataFrame we have created is in _wide_ format.
# - `id_vars` says that `x` is the independent (mathematical) variable
# - `value_name` says that `y` is the (common) dependent (mathematical) variable
# - `var_name` is the label that will eventually appear in the plot key
# - telling Seaborn to vary the hue (colour) by `initial value` results in multiple lines on the same plot
x = linspace(-1, 5, 61)
data = {'x': x,
'y(-1) = 0': odeint(diff_eq, 0, x)[:, 0],
'y(-1) = 2': odeint(diff_eq, 2, x)[:, 0],
'y(-1) = -2': odeint(diff_eq, -2, x)[:, 0]}
data = pd.DataFrame(data)
data = data.melt(id_vars=['x'], value_name='y', var_name='initial value')
sns.lineplot(data=data, x='x', y='y', hue='initial value');
# Now let’s put the slope field and the numerical solutions together. Copy and paste the code from above where we created the quiver plot into the cell below, then copy and paste the code from above where we created the line plots below it (in the same cell).
#
# If you have done it properly, the result should look something like this:
#
# 
#
# (Changing the colour of the slopefield makes the blue solution line pop.)
# + [markdown] nbgrader={"grade": false, "grade_id": "cell-17e44717e17ca409", "locked": true, "schema_version": 1, "solution": false}
# ## Exercises
# + [markdown] nbgrader={"grade": false, "grade_id": "cell-def8f5ac90289a79", "locked": true, "schema_version": 1, "solution": false}
# ### Slope field and DE solution plot
#
# Plot on one figure the slopefield for the DE
# \begin{align*}
# \frac{\mathrm{d} y}{\mathrm{d} x} = 2.5y (1 − y),
# \end{align*}
# and the solutions to the initial value problems $y(0) = 0.2$, $y(0) = 0.5$ and $y(0) = 0.8$.
#
# Start by writing down a new definition for `diff_eq` below. Do not change the function's name or inputs.
# + nbgrader={"grade": false, "grade_id": "cell-05cc4f7824ab2d84", "locked": false, "schema_version": 1, "solution": true}
def diff_eq(y, x):
### diff_eq implementation goes here
# -
# If you have implemented `diff_eq` correctly, the following should print "nice job".
# + nbgrader={"grade": true, "grade_id": "cell-0a0fa9099e30078d", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert_almost_equal(diff_eq(0.4, 0), 0.6)
assert_almost_equal(diff_eq(0.4, 10), 0.6)
assert_almost_equal(diff_eq(1, 0), 0)
print("nice job")
# + [markdown] nbgrader={"grade": false, "grade_id": "cell-4e81a7c558ed87e4", "locked": true, "schema_version": 1, "solution": false}
# Now create your graph. Note that you will have to redefine `S` (from above). You can do that using your new definition for `diff_eq` or by writing out the RHS of the equation again.
#
# You will also have to change your definition of the meshgrid for the slopefield and the domain and initial values in the `odeint` commands. You want about 21 steps in the x and y ranges in meshgrid.
#
# Create the plot for the region $0 < x < 1$ and $0 < y < 1$.
# + nbgrader={"grade": true, "grade_id": "cell-8945b9507fff370f", "locked": false, "points": 2, "schema_version": 1, "solution": true}
# + [markdown] nbgrader={"grade": false, "grade_id": "cell-0050a7948893bc7b", "locked": true, "schema_version": 1, "solution": false}
# ### Solution at a point
# What is $y(1)$ if $y(0)=0.8$?
# + nbgrader={"grade": false, "grade_id": "cell-962d55b6bbeb85ad", "locked": false, "schema_version": 1, "solution": true}
|
notebooks/lab-02.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1. 단어 빈도수 얻기
# +
from collections import Counter
from string import punctuation
from text import data
def count_word_freq(data) :
_data = data.lower()
for p in punctuation:
_data = _data.replace(p, '')
_data = _data.split()
counter = Counter(_data)
print(counter)
return counter
if __name__ == "__main__" :
count_word_freq(data)
# -
# ### 2. 워드클라우드 객체 생성 및 그리기
# * 배경색이 흰색인 WordCloud 객체를 생성
# * 단어들의 횟수를 기반으로 워드클라우드를 생성
# * 생성한 워드클라우드를 그림 파일로 저장
# +
from wordcloud import WordCloud
# from count import count_word_freq
from text import data
def create_word_cloud(data) :
counter = count_word_freq(data)
cloud = WordCloud(background_color='white')
cloud.fit_words(counter)
cloud.to_file('cloud.png')
if __name__ == "__main__" :
create_word_cloud(data)
# -
# 생성한 이미지를 주피터 노트북상에서 출력
from IPython.display import Image
Image(filename='cloud.png')
|
practice/word_counter_cloud.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Machine Translation English-German Example Using SageMaker Seq2Seq
#
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 3. [Download dataset and preprocess](#Download-dataset-and-preprocess)
# 3. [Training the Machine Translation model](#Training-the-Machine-Translation-model)
# 4. [Inference](#Inference)
# ## Introduction
#
# Welcome to our Machine Translation end-to-end example! In this demo, we will train a English-German translation model and will test the predictions on a few examples.
#
# SageMaker Seq2Seq algorithm is built on top of [Sockeye](https://github.com/awslabs/sockeye), a sequence-to-sequence framework for Neural Machine Translation based on MXNet. SageMaker Seq2Seq implements state-of-the-art encoder-decoder architectures which can also be used for tasks like Abstractive Summarization in addition to Machine Translation.
#
# To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
# ## Setup
#
# Let's start by specifying:
# - The S3 bucket and prefix that you want to use for training and model data. **This should be within the same region as the Notebook Instance, training, and hosting.**
# - The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp in the cell below with a the appropriate full IAM role arn string(s).
# + isConfigCell=true tags=["parameters"]
# S3 bucket and prefix
bucket = '<your_s3_bucket_name_here>'
prefix = 'sagemaker/DEMO-seq2seq'
# +
import boto3
import re
from sagemaker import get_execution_role
role = get_execution_role()
# -
# Next, we'll import the Python libraries we'll need for the remainder of the exercise.
# +
from time import gmtime, strftime
import time
import numpy as np
import os
import json
# For plotting attention matrix later on
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
# -
# ## Download dataset and preprocess
# In this notebook, we will train a English to German translation model on a dataset from the
# [Conference on Machine Translation (WMT) 2017](http://www.statmt.org/wmt17/).
# + language="bash"
# wget http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/corpus.tc.de.gz & \
# wget http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/corpus.tc.en.gz & wait
# gunzip corpus.tc.de.gz & \
# gunzip corpus.tc.en.gz & wait
# mkdir validation
# curl http://data.statmt.org/wmt17/translation-task/preprocessed/de-en/dev.tgz | tar xvzf - -C validation
# -
# Please note that it is a common practise to split words into subwords using Byte Pair Encoding (BPE). Please refer to [this](https://github.com/awslabs/sockeye/tree/master/tutorials/wmt) tutorial if you are interested in performing BPE.
# Since training on the whole dataset might take several hours/days, for this demo, let us train on the **first 10,000 lines only**. Don't run the next cell if you want to train on the complete dataset.
# !head -n 10000 corpus.tc.en > corpus.tc.en.small
# !head -n 10000 corpus.tc.de > corpus.tc.de.small
# Now, let's use the preprocessing script `create_vocab_proto.py` (provided with this notebook) to create vocabulary mappings (strings to integers) and convert these files to x-recordio-protobuf as required for training by SageMaker Seq2Seq.
# Uncomment the cell below and run to see check the arguments this script expects.
# + language="bash"
# # python3 create_vocab_proto.py -h
# -
# The cell below does the preprocessing. If you are using the complete dataset, the script might take around 10-15 min on an m4.xlarge notebook instance. Remove ".small" from the file names for training on full datasets.
# %%time
# %%bash
python3 create_vocab_proto.py \
--train-source corpus.tc.en.small \
--train-target corpus.tc.de.small \
--val-source validation/newstest2014.tc.en \
--val-target validation/newstest2014.tc.de
# The script will output 4 files, namely:
# - train.rec : Contains source and target sentences for training in protobuf format
# - val.rec : Contains source and target sentences for validation in protobuf format
# - vocab.src.json : Vocabulary mapping (string to int) for source language (English in this example)
# - vocab.trg.json : Vocabulary mapping (string to int) for target language (German in this example)
#
# Let's upload the pre-processed dataset and vocabularies to S3
# +
def upload_to_s3(bucket, prefix, channel, file):
s3 = boto3.resource('s3')
data = open(file, "rb")
key = prefix + "/" + channel + '/' + file
s3.Bucket(bucket).put_object(Key=key, Body=data)
upload_to_s3(bucket, prefix, 'train', 'train.rec')
upload_to_s3(bucket, prefix, 'validation', 'val.rec')
upload_to_s3(bucket, prefix, 'vocab', 'vocab.src.json')
upload_to_s3(bucket, prefix, 'vocab', 'vocab.trg.json')
# -
region_name = boto3.Session().region_name
# +
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(region_name, 'seq2seq')
print('Using SageMaker Seq2Seq container: {} ({})'.format(container, region_name))
# -
# ## Training the Machine Translation model
# +
job_name = 'DEMO-seq2seq-en-de-' + strftime("%Y-%m-%d-%H", gmtime())
print("Training job", job_name)
create_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": "s3://{}/{}/".format(bucket, prefix)
},
"ResourceConfig": {
# Seq2Seq does not support multiple machines. Currently, it only supports single machine, multiple GPUs
"InstanceCount": 1,
"InstanceType": "ml.p2.xlarge", # We suggest one of ["ml.p2.16xlarge", "ml.p2.8xlarge", "ml.p2.xlarge"]
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
# Please refer to the documentation for complete list of parameters
"max_seq_len_source": "60",
"max_seq_len_target": "60",
"optimized_metric": "bleu",
"batch_size": "64", # Please use a larger batch size (256 or 512) if using ml.p2.8xlarge or ml.p2.16xlarge
"checkpoint_frequency_num_batches": "1000",
"rnn_num_hidden": "512",
"num_layers_encoder": "1",
"num_layers_decoder": "1",
"num_embed_source": "512",
"num_embed_target": "512",
"checkpoint_threshold": "3",
"max_num_batches": "2100"
# Training will stop after 2100 iterations/batches.
# This is just for demo purposes. Remove the above parameter if you want a better model.
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 48 * 3600
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/train/".format(bucket, prefix),
"S3DataDistributionType": "FullyReplicated"
}
},
},
{
"ChannelName": "vocab",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/vocab/".format(bucket, prefix),
"S3DataDistributionType": "FullyReplicated"
}
},
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/{}/validation/".format(bucket, prefix),
"S3DataDistributionType": "FullyReplicated"
}
},
}
]
}
sagemaker_client = boto3.Session().client(service_name='sagemaker')
sagemaker_client.create_training_job(**create_training_params)
status = sagemaker_client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print(status)
# -
status = sagemaker_client.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print(status)
# if the job failed, determine why
if status == 'Failed':
message = sagemaker_client.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
raise Exception('Training job failed')
# > Now wait for the training job to complete and proceed to the next step after you see model artifacts in your S3 bucket.
# You can jump to [Use a pretrained model](#Use-a-pretrained-model) as training might take some time.
# ## Inference
#
# A trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means translating sentence(s) from English to German.
# This section involves several steps,
# - Create model - Create a model using the artifact (model.tar.gz) produced by training
# - Create Endpoint Configuration - Create a configuration defining an endpoint, using the above model
# - Create Endpoint - Use the configuration to create an inference endpoint.
# - Perform Inference - Perform inference on some input data using the endpoint.
#
# ### Create model
# We now create a SageMaker Model from the training output. Using the model, we can then create an Endpoint Configuration.
use_pretrained_model = False
# ### Use a pretrained model
# #### Please uncomment and run the cell below if you want to use a pretrained model, as training might take several hours/days to complete.
# +
#use_pretrained_model = True
#model_name = "DEMO-pretrained-en-de-model"
# #!curl https://s3-us-west-2.amazonaws.com/seq2seq-data/model.tar.gz > model.tar.gz
# #!curl https://s3-us-west-2.amazonaws.com/seq2seq-data/vocab.src.json > vocab.src.json
# #!curl https://s3-us-west-2.amazonaws.com/seq2seq-data/vocab.trg.json > vocab.trg.json
#upload_to_s3(bucket, prefix, 'pretrained_model', 'model.tar.gz')
#model_data = "s3://{}/{}/pretrained_model/model.tar.gz".format(bucket, prefix)
# +
# %%time
sage = boto3.client('sagemaker')
if not use_pretrained_model:
info = sage.describe_training_job(TrainingJobName=job_name)
model_name=job_name
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_name)
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
# -
# ### Create endpoint configuration
# Use the model to create an endpoint configuration. The endpoint configuration also contains information about the type and number of EC2 instances to use when hosting the model.
#
# Since SageMaker Seq2Seq is based on Neural Nets, we could use an ml.p2.xlarge (GPU) instance, but for this example we will use a free tier eligible ml.m4.xlarge.
# +
from time import gmtime, strftime
endpoint_config_name = 'DEMO-Seq2SeqEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
# -
# ### Create endpoint
# Lastly, we create the endpoint that serves up model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 10-15 minutes to complete.
# +
# %%time
import time
endpoint_name = 'DEMO-Seq2SeqEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = sage.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
resp = sage.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
print("Status: " + status)
# wait until the status has changed
sage.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
# print the status of the endpoint
endpoint_response = sage.describe_endpoint(EndpointName=endpoint_name)
status = endpoint_response['EndpointStatus']
print('Endpoint creation ended with EndpointStatus = {}'.format(status))
if status != 'InService':
raise Exception('Endpoint creation failed.')
# -
# If you see the message,
# > Endpoint creation ended with EndpointStatus = InService
#
# then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.
#
# We will finally create a runtime object from which we can invoke the endpoint.
runtime = boto3.client(service_name='runtime.sagemaker')
# # Perform Inference
# ### Using JSON format for inference (Suggested for a single or small number of data instances)
# #### Note that you don't have to convert string to text using the vocabulary mapping for inference using JSON mode
# +
sentences = ["you are so good !",
"can you drive a car ?",
"i want to watch a movie ."
]
payload = {"instances" : []}
for sent in sentences:
payload["instances"].append({"data" : sent})
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload))
response = response["Body"].read().decode("utf-8")
response = json.loads(response)
print(response)
# -
# ### Retrieving the Attention Matrix
# Passing `"attention_matrix":"true"` in `configuration` of the data instance will return the attention matrix.
# +
sentence = 'can you drive a car ?'
payload = {"instances" : [{
"data" : sentence,
"configuration" : {"attention_matrix":"true"}
}
]}
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload))
response = response["Body"].read().decode("utf-8")
response = json.loads(response)['predictions'][0]
source = sentence
target = response["target"]
attention_matrix = np.array(response["matrix"])
print("Source: %s \nTarget: %s" % (source, target))
# -
# Define a function for plotting the attentioan matrix
def plot_matrix(attention_matrix, target, source):
source_tokens = source.split()
target_tokens = target.split()
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens)
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plot_matrix(attention_matrix, target, source)
# ### Using Protobuf format for inference (Suggested for efficient bulk inference)
# Reading the vocabulary mappings as this mode of inference accepts list of integers and returns list of integers.
# +
import io
import tempfile
from record_pb2 import Record
from create_vocab_proto import vocab_from_json, reverse_vocab, write_recordio, list_to_record_bytes, read_next
source = vocab_from_json("vocab.src.json")
target = vocab_from_json("vocab.trg.json")
source_rev = reverse_vocab(source)
target_rev = reverse_vocab(target)
# -
sentences = ["this is so cool",
"i am having dinner .",
"i am sitting in an aeroplane .",
"come let us go for a long drive ."]
# Converting the string to integers, followed by protobuf encoding:
# Convert strings to integers using source vocab mapping. Out-of-vocabulary strings are mapped to 1 - the mapping for <unk>
sentences = [[source.get(token, 1) for token in sentence.split()] for sentence in sentences]
f = io.BytesIO()
for sentence in sentences:
record = list_to_record_bytes(sentence, [])
write_recordio(f, record)
# +
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-recordio-protobuf',
Body=f.getvalue())
response = response["Body"].read()
# -
# Now, parse the protobuf response and convert list of integers back to strings
def _parse_proto_response(received_bytes):
output_file = tempfile.NamedTemporaryFile()
output_file.write(received_bytes)
output_file.flush()
target_sentences = []
with open(output_file.name, 'rb') as datum:
next_record = True
while next_record:
next_record = read_next(datum)
if next_record:
rec = Record()
rec.ParseFromString(next_record)
target = list(rec.features["target"].int32_tensor.values)
target_sentences.append(target)
else:
break
return target_sentences
targets = _parse_proto_response(response)
resp = [" ".join([target_rev.get(token, "<unk>") for token in sentence]) for
sentence in targets]
print(resp)
# # Stop / Close the Endpoint (Optional)
#
# Finally, we should delete the endpoint before we close the notebook.
sage.delete_endpoint(EndpointName=endpoint_name)
|
introduction_to_amazon_algorithms/seq2seq_translation_en-de/SageMaker-Seq2Seq-Translation-English-German.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/motkeg/Msc-Project/blob/master/fashion_mnist_gan_v2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="88fo6PERGgyH" colab_type="text"
# # Fasion_MNIST GAN (Generative Adversarial Networks)
# + [markdown] id="Icv-0pv3vHGT" colab_type="text"
# ### import and define sime variables
# + id="nyE-C8fuBAYM" colab_type="code" colab={}
import os
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import (Input, Dense, Reshape, Flatten, Dropout,
BatchNormalization, Activation, ZeroPadding2D,
LeakyReLU, UpSampling2D, Conv2D)
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import initializers
tf.keras.backend.set_image_data_format('channels_first')
JOB_DIR = "./weights/gan_v2"
USE_TPU = False
EPOCHS = 10
BATCH= 128
SAVE = 100
USE_TPU = False
# Deterministic output.
# Tired of seeing the same results every time? Remove the line below.
np.random.seed(1000)
if not os.path.exists("./samples/fashion_mnist_v2"):
os.makedirs("./samples/fashion_mnist_v2")
if not os.path.exists(JOB_DIR):
os.makedirs(JOB_DIR)
# + [markdown] id="Q7l363p0vTxt" colab_type="text"
# ### define helpfull methods
# + id="WxOMMlJiBM4L" colab_type="code" colab={}
import matplotlib.pyplot as plt
# Plot the loss from each batch
def plot_loss(epoch,d_Losses,g_Losses):
plt.figure(figsize=(10, 8))
plt.plot(d_Losses, label='Discriminitive loss')
plt.plot(d_Losses, label='Generative loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig('dcgan_loss_epoch_%d.png' % epoch)
# Create a wall of generated images
def plot_generated_images(imgs , epoch, examples=25, dim=(5,5), figsize=(5, 5)):
noise = np.random.normal(0, 1, size=[examples, 100])
plt.figure(figsize=figsize)
for i in range(imgs.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(imgs[i, 0], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
name = "fashion_mnist_v2_{}.png".format(epoch)
plt.savefig('./samples/fashion_mnist_v2/' + name)
# + [markdown] id="W-eqR-Ynvbyz" colab_type="text"
# ### define the GAN class
# the __call__() method is the 'action' of the model - make it running
# + id="pqFyzNX4GgIh" colab_type="code" colab={}
class DCGAN_V2():
def __init__(self):
# The results are a little better when the dimensionality of the random vector is only 10.
# The dimensionality has been left at 100 for consistency with other GAN implementations.
self.randomDim = 100
# Load data
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5)/127.5
#self.X_train = np.expand_dims(X_train, -1)
self.X_train = X_train[:, np.newaxis, :, :]
self.tensorboard = keras.callbacks.TensorBoard(log_dir=JOB_DIR+ "/logs",
batch_size=BATCH,
write_graph=True,
histogram_freq=0,
write_images=True,
write_grads=True)
#self.checkpointer = keras.callbacks.ModelCheckpoint(filepath=f'{FLAGS.job_dir}/gan_model.best.hdf5', verbose = 1, save_best_only=True)
# Optimizer
self.optimizer = Adam(lr=0.0002, beta_1=0.5)
self.discriminator = self.build_D()
self.generator = self.build_G()
# Combined network
self.discriminator.trainable = False
ganInput = Input(shape=(self.randomDim,))
x = self.generator(ganInput)
ganOutput = self.discriminator(x)
self.gan = Model(inputs=ganInput, outputs=ganOutput)
self.gan.compile(loss='binary_crossentropy', optimizer=self.optimizer)
self.dLosses = []
self.gLosses = []
def build_G(self):
with tf.variable_scope("Generatoe"):
# Generator
generator = Sequential()
generator.add(Dense(128*7*7, input_dim=self.randomDim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Reshape((128,7, 7)))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
generator.add(LeakyReLU(0.2))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=self.optimizer)
generator.summary()
return generator
def build_D(self):
with tf.variable_scope("Discriminator"):
# Discriminator
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=(5, 5), strides=(2, 2), padding='same', input_shape=(1,28, 28), kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Conv2D(128, kernel_size=(5, 5), strides=(2, 2), padding='same'))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=self.optimizer)
discriminator.summary()
return discriminator
def __call__(self, epochs=1, batchSize=BATCH):
batchCount = self.X_train.shape[0] // batchSize
print (f'Epochs:{epochs}\nBatch size: {batchSize}\t | Batches per epoch: {batchCount}')
for e in range(1, epochs+1):
print ('-'*15, 'Epoch %d' % e, '-'*15)
for _ in tqdm(range(batchCount)):
# Get a random set of input noise and images
noise = np.random.normal( 0,1, size=[batchSize, self.randomDim])
imageBatch = self.X_train[np.random.randint(0, self.X_train.shape[0], size=batchSize)]
# Generate fake images
generatedImages = self.generator.predict(noise)
X = np.concatenate([imageBatch, generatedImages])
# Labels for generated and real data
yDis = np.zeros(2*batchSize)
# One-sided label smoothing
yDis[:batchSize] = 0.9
# Train discriminator
self.discriminator.trainable = True
dloss = self.discriminator.train_on_batch(X, yDis)
# Train generator
noise = np.random.normal(0, 1, size=[batchSize,self.randomDim])
yGen = np.ones(batchSize)
self.discriminator.trainable = False
gloss = self.gan.train_on_batch(noise, yGen)
print ("%d/%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (e,epochs, dloss, 100*dloss, gloss))
self.dLosses.append(dloss)
self.gLosses.append(gloss)
# write tensorboard logs
self.tensorboard.set_model(self.discriminator)
self.tensorboard.on_epoch_end(e,{"loss":dloss , "accuracy":dloss})
self.tensorboard.set_model(self.generator)
self.tensorboard.on_epoch_end(e,{"loss":gloss , "accuracy":dloss})
if e == 1 or e % 5 == 0:
noise = np.random.normal(0, 1, size=[25, self.randomDim])
imgs = self.generator.predict(noise)
plot_generated_images(imgs,e)
self.save_models(e)
# Plot losses from every epoch
plot_loss(e , self.dLosses,self.gLosses)
# Save the generator and discriminator networks (and weights) for later use
def save_models(self,epoch):
self.generator.save(f'{JOB_DIR}/dcgan_generator.h5')
self.discriminator.save(f'{JOB_DIR}/dcgan_discriminator.h5')
self.gan.save(f'{JOB_DIR}/dcgan_combined.h5')
def named_logs(self,model, logs):
result = {}
for l in zip(model.metrics_names, logs):
result[l[0]] = l[1]
return result
# + [markdown] id="VH10PtEVv3Yc" colab_type="text"
# ## Start the training
# + id="jPbMdi3pBTOx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3003} outputId="a89f845c-63ed-423e-c7ed-2eede57cfcf0"
model = DCGAN_V2()
model(epochs=EPOCHS)
# + [markdown] id="uqR6KmNMv-pi" colab_type="text"
# ## You can run this cell below as a standalone
#
#
# * you need the weights file of this model : <a href="https://colab.research.google.com/github/motkeg/Msc-Project/blob/master/fashion_cnn_tpu.ipynb\" target="_parent\"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab\"/></a>
#
#
# * you need the weights of the model that trained above
#
#
#
# ```
# the files need to be like this:
# - dcgan_generator.h5
# - fashion-cnn-weights.best.hdf5
# ```
#
#
# + id="PBxMVINwjDQ7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 127} outputId="7df5b6cb-33a6-4ebb-ce59-6db12593b1fb"
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
LABEL_NAMES = ['t_shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boots']
PREDICT = 'trouser'
# Create a wall of generated images
def plot_images(imgs , dim=(5,5), figsize=(5, 5)):
plt.figure(figsize=figsize)
for i in range(imgs.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(imgs[i, 0], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.show()
label = " "
runs =0
fashion_cnn = keras.models.Sequential()
fashion_cnn.add(keras.layers.Reshape((28,28,1)))
generator = load_model("dcgan_generator.h5")
model = load_model('fashion-cnn-weights.best.hdf5')
fashion_cnn.add(model)
#fashion_cnn = keras.models.Model(keras.layers.Reshape((28,28,1)), model.output)
while label != PREDICT:
runs +=1
noise = np.random.normal(0, 1, size=[1, 100]) # generate one image
imgs = generator.predict(noise)
score = fashion_cnn.predict(imgs)
indx = list(score[0]).index(max(score[0]))
#print(score[0])
#print(indx)
label = LABEL_NAMES[indx]
print("Runs:",runs)
print(label)
plot_images(imgs)
|
GAN/fashion_mnist_gan/fashion_mnist_gan_v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Generate Timeline from profiling trace from nvvp
#
# by <NAME>
#
# * Twitter: @yu_leiming
# * Blog: http://www1.coe.neu.edu/~ylm/
# ### prerequisite
#
# profile your application using nvprof
#
# ```bash
# nvprof --print-gpu-trace --csv Application_XXX 2> trace.csv
# ```
import pandas as pd
import numpy as np
import operator
# ### read trace file
# +
trace_file = "trace.csv"
# There are max 17 columns in the output csv
col_name = ["Start","Duration","Grid X","Grid Y","Grid Z","Block X","Block Y","Block Z","Registers Per Thread","Static SMem","Dynamic SMem","Size","Throughput","Device","Context","Stream","Name"]
df_trace = pd.read_csv(trace_file, names=col_name, engine='python')
rows_to_skip = 0
## find out the number of rows to skip
for index, row in df_trace.iterrows():
if row['Start'] == 'Start':
rows_to_skip = index
break
# read the input csv again
df_trace = pd.read_csv(trace_file, skiprows=rows_to_skip)
# -
class transfer():
def __init__(self, start=0.0,end=0.0):
self.start_time_ms = start
self.end_time_ms = end
class streams():
def __init__(self):
self.h2d = []
self.d2h = []
self.kernel = []
def time_coef_ms(df_trace):
rows, cols = df_trace.shape
start_unit = df_trace['Start'].iloc[0]
duration_unit = df_trace['Duration'].iloc[0]
start_coef = 1.0
if start_unit == 's':
start_coef = 1e3
if start_unit == 'us':
start_coef = 1e-3
duration_coef = 1.0
if duration_unit == 's':
duration_coef = 1e3
if duration_unit == 'us':
duration_coef = 1e-3
return start_coef, duration_coef
# read data for the current row
def read_row(df_row, start_coef_ms, duration_coef_ms):
start_time_ms = float(df_row['Start']) * start_coef_ms
end_time_ms = start_time_ms + float(df_row['Duration']) * duration_coef_ms
stream_id = int(df_row['Stream'])
api_name = df_row['Name'].to_string()
if "DtoH" in api_name:
api_type = 'd2h'
elif "HtoD" in api_name:
api_type = 'h2d'
else:
api_type = 'kernel'
return stream_id, api_type, start_time_ms, end_time_ms
# ### extract data from the trace
# +
streamList = []
# read the number of unique streams
stream_id_list = df_trace['Stream'].unique()
stream_id_list = stream_id_list[~np.isnan(stream_id_list)] # remove nan
# stream_id_list = list(stream_id_list)
num_streams = len(stream_id_list)
for i in xrange(num_streams):
streamList.append(streams())
# +
start_coef, duration_coef = time_coef_ms(df_trace)
# read row by row
for rowID in xrange(1, df_trace.shape[0]):
# extract info from the current row
stream_id, api_type, start_time_ms, end_time_ms = read_row(df_trace.iloc[[rowID]], start_coef, duration_coef)
# find out index of the stream
sid, = np.where(stream_id_list==stream_id)
# add the start/end time for different api calls
if api_type == 'h2d':
streamList[sid].h2d.append(transfer(start_time_ms, end_time_ms))
elif api_type == 'd2h':
streamList[sid].d2h.append(transfer(start_time_ms, end_time_ms))
elif api_type == 'kernel':
streamList[sid].kernel.append(transfer(start_time_ms, end_time_ms))
else:
print "Unknown. Error."
# -
# ### generate timeline
#
# You may need to adjust the font size for the annotation. The default is 10.
import matplotlib.pyplot as plt
import numpy as np
# +
fig, ax = plt.subplots()
# each bar will be 1 in height, the interval between centers of each bar is 2
# for example, bar 1 is at 1 with width 1 (1, 1), then bar 2 is at 3 with width 1 (3, 1), so on and so forth
transfer_color = '#C5EDEE'
kernel_color = '#D2E307'
stream_num = len(streamList)
ylim_max = 1 + stream_num * 2.0
stream_tag_pos = []
stream_tag = []
for i in xrange(stream_num):
ii = i + 1
bar_center = ylim_max - ii * 2.0
bar_loc = (bar_center, 1) # width 1
# y lable
stream_tag_pos.append(bar_center + 0.5) # 0.5 interv
stream_tag.append('stream-'+ str(i))
current_stream = streamList[i]
api_call_seq = []
api_color_seq = []
# h2d
for j in xrange(len(current_stream.h2d)):
start_time = current_stream.h2d[j].start_time_ms
duration = current_stream.h2d[j].end_time_ms - current_stream.h2d[j].start_time_ms # add start and duration
api_call_seq.append((start_time, duration))
api_color_seq.append(transfer_color) # add the color for bar
# pos for the annotation: shift left 0.0015 in the middle of the bar
ax.annotate('h2d', (start_time + duration * 0.35, bar_center + 0.25), fontsize=10)
# d2h
for j in xrange(len(current_stream.d2h)):
start_time = current_stream.d2h[j].start_time_ms
duration = current_stream.d2h[j].end_time_ms - current_stream.d2h[j].start_time_ms
api_call_seq.append((start_time, duration))
api_color_seq.append(transfer_color)
# pos for the annotation: shift left 0.0015 in the middle of the bar
ax.annotate('d2h', (start_time + duration * 0.35, bar_center + 0.25), fontsize=10)
# kernel
for j in xrange(len(current_stream.kernel)):
start_time = current_stream.kernel[j].start_time_ms
duration = current_stream.kernel[j].end_time_ms - current_stream.kernel[j].start_time_ms
api_call_seq.append((start_time, duration))
api_color_seq.append(kernel_color)
# offset 0.0007 for kernel annotation
ax.annotate('K', (start_time + duration * 0.35, bar_center + 0.25), fontsize=10)
# add the bar to the plot for current stream
ax.broken_barh(api_call_seq,
bar_loc,
facecolors=api_color_seq)
# +
ax.set_ylim(0, ylim_max)
ax.set_xlabel('timeline (ms)')
ax.set_yticks(stream_tag_pos)
ax.set_yticklabels(stream_tag)
aspectratio=0.2
ratio_default=(ax.get_xlim()[1]-ax.get_xlim()[0])/(ax.get_ylim()[1]-ax.get_ylim()[0])
ax.set_aspect(ratio_default*aspectratio)
plt.show()
# +
#fig.savefig('stream_timeline.pdf')
|
mem_mem/read_trace.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
import seaborn as sns
from tabpy.tabpy_tools.client import Client
df = pd.read_csv("~/Desktop/TC20/KAG_conversion_data.csv")
df.head()
df_model = df.drop(labels=['ad_id','Spent', 'Total_Conversion','Approved_Conversion'],axis=1)
df_model.head()
sns.catplot(x='conversion_bool',data=df_model,kind='count')
y = df_model['conversion_bool']
X = df_model.drop(labels=['conversion_bool'],axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.head()
enc_age = OneHotEncoder(handle_unknown='ignore')
hot = enc_age.fit_transform(X_train[['age']]).toarray()
age_hot = pd.DataFrame(hot)
X_train.drop(axis=1,labels=['age'],inplace=True)
X_train = pd.concat([X_train.reset_index(drop=True),age_hot.reset_index(drop=True)], axis=1)
X_train.head()
enc_gen = OneHotEncoder(handle_unknown='ignore')
hot_gender = enc_gen.fit_transform(X_train[['gender']]).toarray()
gender_hot = pd.DataFrame(hot_gender)
X_train = X_train.drop(axis=1,labels=['gender'])
X_train = pd.concat([X_train.reset_index(drop=True),gender_hot.reset_index(drop=True)], axis=1)
X_train.head()
gbm = GradientBoostingClassifier(criterion='friedman_mse', init=None,
learning_rate=0.1, loss='deviance', max_depth=2,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=2, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=200,
n_iter_no_change=None, presort='auto',
random_state=None, subsample=1.0, tol=0.001,
validation_fraction=0.1, verbose=0,
warm_start=False)
gbm.fit(X_train, y_train)
hot_test = enc_age.transform(X_test[['age']]).toarray()
age_hot_test = pd.DataFrame(hot_test)
hot_gender_test = enc_gen.fit_transform(X_test[['gender']]).toarray()
gender_hot_test = pd.DataFrame(hot_gender_test)
X_test.drop(labels=['age', 'gender'],axis=1, inplace=True)
X_test = pd.concat([X_test.reset_index(drop=True),
age_hot_test.reset_index(drop=True),
gender_hot_test.reset_index(drop=True)], axis=1)
preds = gbm.predict(X_test)
confusion_matrix(y_test,preds)
client = Client('http://localhost:9004/')
def conversion_prediction(xyz_campaign_id,fb_campaign_id, age, gender, interest, Impressions, Clicks):
X_pred = pd.DataFrame({'xyz_campaign_id': xyz_campaign_id,
'fb_campaign_id': fb_campaign_id,
'age': age,
'gender': gender,
'interest':interest,
'Impressions':Impressions,
'Clicks':Clicks})
print(X_pred.head())
hot_pred = enc_age.transform(X_pred[['age']]).toarray()
age_hot_pred = pd.DataFrame(hot_pred)
hot_gender_pred = enc_gen.transform(X_pred[['gender']]).toarray()
gender_hot_pred = pd.DataFrame(hot_gender_pred)
X_pred.drop(labels=['age', 'gender'],axis=1, inplace=True)
X_pred = pd.concat([X_pred.reset_index(drop=True),
age_hot_pred.reset_index(drop=True),
gender_hot_pred.reset_index(drop=True)], axis=1)
print(X_pred.head())
X_pred_num = X_pred.to_numpy()
print(X_pred_num[0])
preds = gbm.predict(X_pred_num).tolist()
return preds
test = [[936],[115484],['45-49'], ['F'],[15], [3569], [0]]
xyz_campaign_id = [936]
fb_campaign_id = [115484]
age = ['45-49']
gender = ['F']
interest = [15]
Impressions = [3569]
Clicks = [0]
conversion_prediction(xyz_campaign_id,fb_campaign_id, age, gender, interest, Impressions, Clicks)
client.deploy('conversion_prediction_test',
conversion_prediction,
'Takes xyz_campaign_id,fb_campaign_id, age, gender, interest, \
Impressions, Clicks as predictors as returns a prediction as to \
whether the customer will convert (1) or not (0)',override = True)
client.query('conversion_prediction_test', xyz_campaign_id,fb_campaign_id, age, gender, interest, Impressions, Clicks)
|
TC20 Data Science/Model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as mt
# %matplotlib inline
train=pd.read_csv("train.csv")
train.head()
# first we find the null values in given data set
train.isnull().head()
# now it become difficult to trace null values because there can be millions of data so we use seaborn library to plot a heatmap
sn.heatmap(train.isnull(), xticklabels=True,cbar=False,cmap='gist_earth')
# now we need to know how may peope survived or how many not....!!
# using again seaborn library
sn.set_style('whitegrid')
sn.countplot(x='Survived',data=train)
# now we distiguise on the basis of sex e.i; how many male and female survived
sn.set_style('whitegrid')
sn.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
sn.set_style('whitegrid')
sn.countplot(x='Survived',hue='Pclass',data=train,palette='RdBu_r')
# now we extract information about the age of people with no. of people with same ages
sn.distplot(train['Age'].dropna(),kde=False,color='black',bins=40)
# now we have to find people having no of spouse or siblings
sn.countplot(x='SibSp',data=train)
sn.distplot(train['Fare'],kde=False)
# #or
train['Fare'].hist(color='green',bins=40,figsize=(8,4))
# now we have to remove nan values ,here we find relation between passenger class and their age for that relation we use boxplot
mt.Figure(figsize=(12,7))
sn.boxplot(x='Pclass',y='Age',data=train)
# now here we find that the average age of 1 class=38(approx),2 class=28(approx),3 class=25(approx)
# so we define a function to remove nan values
# +
def impute_age(cols):
Age=cols[0]
Pclass=cols[1]
if pd.isnull(Age):
if Pclass==1:
return 38
elif Pclass==2:
return 28
else:
return 25
else:
return Age
# -
# now apply that function
train['Age']=train[['Age','Pclass']].apply(impute_age,axis=1)
(pd.isnull(train['Age']))
sn.heatmap(train.isnull())
train.drop('Cabin',axis=1,inplace=True)
train
pd.isnull(train)
# + active=""
# ufffff
# -
train.info()
embark=pd.get_dummies(train['Embarked'],drop_first=True)
sex=pd.get_dummies(train['Sex'],drop_first=True)
train.drop(['Name','Sex','Embarked','Ticket'],axis=1,inplace=True)
train.head()
train=pd.concat([train,sex,embark],axis=1)
train.head()
# Great! now our data is ready for our model
# BUILDING A logistic regression model
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(train.drop(['Survived'],axis=1),train['Survived'],test_size=0.30,random_state=101)
from sklearn.linear_model import LogisticRegression
logmodel=LogisticRegression()
logmodel.fit(x_train,y_train)
prediction=logmodel.predict(x_test)
from sklearn.metrics import confusion_matrix
accuracy=confusion_matrix(y_test,prediction)
accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_test,prediction)
accuracy
prediction
# all done
|
exploratory data analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/blakelobato/DS-Unit-2-Kaggle-Challenge/blob/master/module4/224A_kaggle_challenge_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zm3fbEXnRg48" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 4*
#
# ---
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# # Classification Metrics
#
# ## Assignment
# - [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Plot a confusion matrix for your Tanzania Waterpumps model.
# - [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline).
# - [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_
# - [ ] Commit your notebook to your fork of the GitHub repo.
# - [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student <NAME>. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.
#
#
# ## Stretch Goals
#
# ### Reading
# - [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_
# - [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)
# - [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by <NAME>, with video
# - [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)
#
#
# ### Doing
# - [ ] Share visualizations in our Slack channel!
# - [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)
# - [ ] More Categorical Encoding. (See module 2 assignment notebook)
# - [ ] Stacking Ensemble. (See below)
#
# ### Stacking Ensemble
#
# Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
#
# ```python
# import pandas as pd
#
# # Filenames of your submissions you want to ensemble
# files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
#
# target = 'status_group'
# submissions = (pd.read_csv(file)[[target]] for file in files)
# ensemble = pd.concat(submissions, axis='columns')
# majority_vote = ensemble.mode(axis='columns')[0]
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission[target] = majority_vote
# submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
# ```
# + colab_type="code" id="lsbRiKBoB5RE" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + colab_type="code" id="BVA1lph8CcNX" colab={}
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# + id="Ifi1x-fKRg5D" colab_type="code" colab={}
import category_encoders as ce
import numpy as np
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
# + id="sqpRxHd9RycI" colab_type="code" colab={}
# %matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# + id="UetgLJC-Sxey" colab_type="code" colab={}
target = 'status_group'
# Arrange data into X features matrix and y target vector
features = train.columns.drop(target)
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# + id="hPzksFjxU601" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 200} outputId="bb4cadfc-52a1-4ad3-a7db-0cb9ebaf3f27"
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
import category_encoders as ce
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier()
)
param = {
'simpleimputer__strategy': ['median'],
'randomforestclassifier__n_estimators': [150, 200, 220, 230],
'randomforestclassifier__bootstrap': [True],
'randomforestclassifier__max_depth': [10,20,25, 30],
'randomforestclassifier__min_samples_leaf': [1,3],
'randomforestclassifier__min_samples_split': [4,5,6,7,8]
}
# If you're on Colab, decrease n_iter & cv parameters # estar n_iter=100 e n_iter=10
search = RandomizedSearchCV(
pipeline,
param_distributions=param,
random_state=44,
n_iter=5,
cv=4,
scoring= 'accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train)
print('Best hyperparameters', search.best_params_)
print('Cross-validation Accuracy', -search.best_score_)
# + id="JYPpf4ROSDX2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66} outputId="7237ade2-8cba-482a-97e7-afc89e6dd2ef"
# %%time
### try ordinal encoding
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
# Arrange data to include all categorical features and not ignore those > 50
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# pipeline, but with ordinal encoder
pipeline = make_pipeline(
#ce.OneHotEncoder(use_cat_names=True, cols=['basin']),
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=150, bootstrap=True, max_depth=30, min_samples_leaf=1, min_samples_split=5, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
#81.68965 with 250, true, 35, 1, 5
#81.717 with 275, true, 40, 1, 5
#81.72447 with 275, true, 42, 1, 5 (81.0 on kaggle ouch)
# + id="-HFlI-HIUr-A" colab_type="code" colab={}
y_pred = pipeline.predict(X_test)
sumbission = sample_submission.copy()
sumbission['status_group'] = y_pred
sumbission.to_csv('submission_BL_8.csv', index=False)
# + id="hn3H5T8ddtCw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="313edd6b-acca-48b0-e0b5-f6c6240ec8d0"
# !pip install matplotlib==3.1.0
# + id="Y5lr9OyDdtHS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66} outputId="2d72b23f-11f5-406f-d2af-61c799deba4d"
# Compute the confusion_matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_val, y_pred)
# + id="wdy-w7MKdtFI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="10df0909-f9d6-4da6-8490-cbe635515b6a"
# Get the unique labels
from sklearn.utils.multiclass import unique_labels
unique_labels(y_val)
# + id="Wvl6gjHjd0OI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 422} outputId="c50fe529-3e5a-475d-ac27-216ee623c6c8"
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
df = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns = columns,
index = index)
return sns.heatmap(df, annot=True, fmt='d', cmap='Blues')
plot_confusion_matrix(y_val, y_pred);
# + id="fsGyJqIFfF1m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="c337132a-1a41-4350-ce4c-73067024ea1f"
# !pip install scikit-plot
# + id="PZFYeg9XfF38" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="8d95fefc-b407-4560-e0ed-b382af6468e7"
import scikitplot as skplt
skplt.metrics.plot_confusion_matrix(y_val, y_pred,
figsize=(8,6),
title=f'Confusion Matrix (n={len(y_val)})',
normalize=False);
# + id="AHkaBYm1fF6J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 180} outputId="91938176-ca34-46b0-81b5-c45efdd92311"
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
# + id="X9MpN1WdiRU1" colab_type="code" colab={}
|
module4/224A_kaggle_challenge_4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import seaborn as sns
import numpy as np
import pickle
import torch
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
cl = 'E003'
# +
with open('/gpfs_home/spate116/data/spate116/GCN/%s/res/shapley_0.res' % cl, 'rb') as f:
x = pickle.load(f)[1]
x[0] = x[0][1].reshape(-1, 5, 100)
x[1] = x[1][1].reshape(-1, 5, 100)
with open('/gpfs_home/spate116/data/spate116/GCN/%s/data/data_class1_unflattened.pickle' % cl, 'rb') as f:
data = pickle.load(f)
data_embedding = data.x.reshape(data.x.shape[0], 1, data.x.shape[1], data.x.shape[2]).float()
torch.tensor(list(map(lambda x: x[0], data.y)), dtype=torch.long)[100:200]
# -
def plt_node(x, idx):
df = pd.DataFrame(x[0][idx])
df1 = pd.DataFrame(x[1][idx])
df.index = ['H3K27me3', 'H3K36me3', 'H3K4me1', 'H3K4me3', 'H3K9me3']
df1.index = ['H3K27me3', 'H3K36me3', 'H3K4me1', 'H3K4me3', 'H3K9me3']
grid_kws = {"height_ratios": (.5, .5, .05), "hspace": .2, "wspace": 0, "width_ratios": (.8, .15)}
fig, ax = plt.subplots(3, 2, figsize=(15,10), dpi=300, gridspec_kw=grid_kws)
lower = min(np.min(df.to_numpy()), np.min(df1.to_numpy()))
upper = max(np.max(df.to_numpy()), np.max(df1.to_numpy()))
bound = max(abs(lower), abs(upper))
ax[0, 0] = sns.heatmap(df, vmin=-bound, vmax=bound, center=0, ax=ax[0, 0], xticklabels=False, cmap="RdBu", cbar_ax = ax[2, 0], cbar_kws={"orientation": "horizontal", 'label': 'Shapley Score'})
ax[1, 0] = sns.heatmap(df1, vmin=-bound, vmax=bound, center=0, ax=ax[1, 0], xticklabels=False, cmap="RdBu", cbar_ax = ax[2, 0], cbar_kws={"orientation": "horizontal", 'label': 'Shapley Score'})
ax[0, 0].set_title('Down Regulation Importance')
ax[1, 0].set_title('Up Regulation Importance')
ax[2, 1].axis('off')
ax[0, 1] = sns.barplot(x = df.sum(axis=1), y = df.index, ax = ax[0, 1])
ax[0, 1].axis('off')
ax[1, 1] = sns.barplot(x = df1.sum(axis=1), y = df1.index, ax = ax[1, 1])
ax[1, 1].axis('off')
plt_node(x, 5)
def plt_cl(x):
df = pd.DataFrame(np.sum(x[0], axis=0))
df1 = pd.DataFrame(np.sum(x[1], axis=0))
df.index = ['H3K27me3', 'H3K36me3', 'H3K4me1', 'H3K4me3', 'H3K9me3']
df1.index = ['H3K27me3', 'H3K36me3', 'H3K4me1', 'H3K4me3', 'H3K9me3']
grid_kws = {"height_ratios": (.5, .5, .05), "hspace": .2, "wspace": 0, "width_ratios": (.8, .15)}
fig, ax = plt.subplots(3, 2, figsize=(15,10), dpi=300, gridspec_kw=grid_kws)
lower = min(np.min(df.to_numpy()), np.min(df1.to_numpy()))
upper = max(np.max(df.to_numpy()), np.max(df1.to_numpy()))
bound = max(abs(lower), abs(upper))
ax[0, 0] = sns.heatmap(df, vmin=-bound, vmax=bound, center=0, ax=ax[0, 0], xticklabels=False, cmap="coolwarm", cbar_ax = ax[2, 0], cbar_kws={"orientation": "horizontal", 'label': 'Shapley Score'})
ax[1, 0] = sns.heatmap(df1, vmin=-bound, vmax=bound, center=0, ax=ax[1, 0], xticklabels=False, cmap="coolwarm", cbar_ax = ax[2, 0], cbar_kws={"orientation": "horizontal", 'label': 'Shapley Score'})
ax[0, 0].set_title('Down Regulation Importance')
ax[1, 0].set_title('Up Regulation Importance')
ax[2, 1].axis('off')
ax[0, 1] = sns.barplot(x = df.sum(axis=1), y = df.index, ax = ax[0, 1])
ax[0, 1].axis('off')
ax[1, 1] = sns.barplot(x = df1.sum(axis=1), y = df1.index, ax = ax[1, 1])
ax[1, 1].axis('off')
plt_cl(x)
|
notebooks/Shapley/plots.ipynb
|
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # broken_weights
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/broken_weights.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/broken_weights.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Broken weights problem in Google CP Solver.
From http://www.mathlesstraveled.com/?p=701
'''
Here's a fantastic problem I recently heard. Apparently it was first
posed by <NAME> in a book of arithmetic problems
published in 1612, and can also be found in Heinrich Dorrie's 100
Great Problems of Elementary Mathematics.
A merchant had a forty pound measuring weight that broke
into four pieces as the result of a fall. When the pieces were
subsequently weighed, it was found that the weight of each piece
was a whole number of pounds and that the four pieces could be
used to weigh every integral weight between 1 and 40 pounds. What
were the weights of the pieces?
Note that since this was a 17th-century merchant, he of course used a
balance scale to weigh things. So, for example, he could use a 1-pound
weight and a 4-pound weight to weigh a 3-pound object, by placing the
3-pound object and 1-pound weight on one side of the scale, and
the 4-pound weight on the other side.
'''
Compare with the following problems:
* MiniZinc: http://www.hakank.org/minizinc/broken_weights.mzn
* ECLiPSE: http://www.hakank.org/eclipse/broken_weights.ecl
* Gecode: http://www.hakank.org/gecode/broken_weights.cpp
* Comet: http://hakank.org/comet/broken_weights.co
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver('Broken weights')
#
# data
#
print('total weight (m):', m)
print('number of pieces (n):', n)
print()
#
# variables
#
weights = [solver.IntVar(1, m, 'weights[%i]' % j) for j in range(n)]
x = {}
for i in range(m):
for j in range(n):
x[i, j] = solver.IntVar(-1, 1, 'x[%i,%i]' % (i, j))
x_flat = [x[i, j] for i in range(m) for j in range(n)]
#
# constraints
#
# symmetry breaking
for j in range(1, n):
solver.Add(weights[j - 1] < weights[j])
solver.Add(solver.SumEquality(weights, m))
# Check that all weights from 1 to 40 can be made.
#
# Since all weights can be on either side
# of the side of the scale we allow either
# -1, 0, or 1 or the weights, assuming that
# -1 is the weights on the left and 1 is on the right.
#
for i in range(m):
solver.Add(i + 1 == solver.Sum([weights[j] * x[i, j] for j in range(n)]))
# objective
objective = solver.Minimize(weights[n - 1], 1)
#
# search and result
#
db = solver.Phase(weights + x_flat, solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
search_log = solver.SearchLog(1)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print('weights: ', end=' ')
for w in [weights[j].Value() for j in range(n)]:
print('%3i ' % w, end=' ')
print()
print('-' * 30)
for i in range(m):
print('weight %2i:' % (i + 1), end=' ')
for j in range(n):
print('%3i ' % x[i, j].Value(), end=' ')
print()
print()
print()
solver.EndSearch()
print('num_solutions:', num_solutions)
print('failures :', solver.Failures())
print('branches :', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
m = 40
n = 4
|
examples/notebook/contrib/broken_weights.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
class ImageTransformationNN(torch.nn.Module):
def __init__(self):
super(ImageTransformationNN, self).__init__()
self.down_sample = DownSampleConv()
self.res = ResidualNet()
self.up_sample = UpSampleConv()
def forward(self, X):
X = self.down_sample(X)
X = self.res(X)
y = self.up_sample(X)
return y
class DownSampleConv(torch.nn.Module):
def __init__(self):
super(DownSampleConv, self).__init__()
self.conv2d1 = torch.nn.Conv2d(3, 32, kernel_size=9, stride=1)
self.norm1 = torch.nn.InstanceNorm2d(32,affine=True)
self.relu1 = torch.nn.ReLU()
self.conv2d2 = torch.nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.norm2 = torch.nn.InstanceNorm2d(64,affine=True)
self.relu2 = torch.nn.ReLU()
self.conv2d3 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=2)
self.norm3 = torch.nn.InstanceNorm2d(128,affine=True)
self.relu3 = torch.nn.ReLU()
def forward(self, X):
y = self.relu3(
self.norm3(
self.conv2d3(
self.relu2(
self.norm2(
self.conv2d2(
self.relu1(
self.norm1(
self.conv2d1(
X
)))
)))
)))
return y
# +
# Residual Block
# http://torch.ch/blog/2016/02/04/resnets.html
# There should be 5 of these
class RBlock(torch.nn.Module):
def __init__(self, channels:int):
super(RBlock, self).__init__()
self.conv2d1 = torch.nn.Conv2d(channels, channels, kernel_size=3, stride=1)
self.norm1 = torch.nn.InstanceNorm2d(channels,affine=True)
self.relu = torch.nn.ReLU()
self.conv2d2 = torch.nn.Conv2d(3, channels, kernel_size=3, stride=1)
self.norm2 = torch.nn.InstanceNorm2d(channels,affine=True)
def forward(self):
residual = X
y_hat = self.norm2(
self.conv2d2(
self.relu(
self.norm1(
self.conv2d1(
X
)))
))
y = y_hat + residual
return y
class ResidualNet(torch.nn.Module):
def __init__(self):
super(ResidualNet, self).__init__()
self.block1 = RBlock(128)
self.block2 = RBlock(128)
self.block3 = RBlock(128)
self.block4 = RBlock(128)
self.block5 = RBlock(128)
def forward(self):
y = self.block5(
self.block4(
self.block3(
self.block2(
self.block1(
X)))))
# +
class UpSampleConv(torch.nn.Module):
def __init__(self):
super(UpSampleConv, self).__init__()
self.conv2d1 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=0.5)
self.norm1 = torch.nn.InstanceNorm2d(64,affine=True)
self.relu1 = torch.nn.ReLU()
self.conv2d2 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=0.5)
self.norm2 = torch.nn.InstanceNorm2d(32,affine=True)
self.relu2 = torch.nn.ReLU()
self.conv2d3 = torch.nn.ConvTranspose2d(32, 3, kernel_size=9, stride=1)
self.norm3 = torch.nn.InstanceNorm2d(3,affine=True)
self.tanh = torch.nn.Tanh()
def forward(self):
y = self.tanh(
self.norm3(
self.conv2d3(
self.relu2(
self.norm2(
self.conv2d2(
self.relu1(
self.norm1(
self.conv2d1(
X
)))
)))
)))
return y
# -
A = ImageTransformationNN()
A
|
src/dev-notebooks/ImageTransformationNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ### if Else
#
# The if else statement
#
# An if-else statement is a great tool for the developer trying to return an output based on a condition. In R, the syntax is:
# + active=""
# if (condition) {
# Expr1
# } else {
# Expr2
# }
# -
# 
#
#
#
# We want to examine whether a variable stored as "quantity" is above 20. If quantity is greater than 20, the code will print "You sold a lot!" otherwise Not enough for today.
# +
# Create vector quantity
quantity <- 25
# Set the is-else statement
if (quantity > 20)
{
print('You sold a lot!')
} else
{
print('Not enough for today')
}
# -
a <- readline(prompt = "Please Enter the Number:")
a <- as.integer(a)
if (a%%2==0){
print("Given Number is Even",a)
}else{
print("Given Number is Odd",a)
}
# ###### Give Number is Postive Or nagative Number
a <- readline(prompt = "Please Enter the Number:")
x <- as.integer(a)
if (x>0){
print("Given Number is Postive Number")
}else{
print("Given Number is Nagative Number")
}
# #### The else if statement
#
#
# We can further customize the control level with the else if statement. With elif, you can add as many conditions as we want. The syntax is:
# + active=""
# if (condition1) {
# expr1
# } else if (condition2) {
# expr2
# } else if (condition3) {
# expr3
# } else {
# expr4
# }
# -
a <- readline(prompt = "Please Enter the Number:")
x <- as.integer(a)
if (x>0){
print("Given Number is Postive Number",x)
}else if (x<0){
print("Given Number is Nagative Number",x)
}else{
print("Given Number is Zero",x)
}
a = c(5,7,2,9,25,28,27,3)
ifelse(a %% 2 ==0 ,"Even","odd")
team_A <- 3
team_B <- 1
if (team_A > team_B){
print("Team A Wins")
}else{
print("Team B Wins")
}
# #### Example 2:
#
# VAT has different rate according to the product purchased. Imagine we have three different kind of products with different VAT applied:
#
# |Categories| Products| VAT|
# |----------|------------|----------|
# |A| Book, magazine, newspaper, etc..| 8%|
# |B| Vegetable, meat, beverage, etc..| 10%|
# |C| Tee-shirt, jean, pant, etc.. |20%|
category <- readline(prompt = "Please Enter your Category:")
price <- 10
if (category =='A'){
cat('A vat rate of 8% is applied.','The total price is',price *1.08)
} else if (category =='B'){
cat('A vat rate of 10% is applied.','The total price is',price *1.10)
} else {
cat('A vat rate of 20% is applied.','The total price is',price *1.20)
}
|
14. IF, ELSE, ELSE IF Statement in R.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Adding a variable to capture NA
#
# In previous notebooks we learnt how to replace missing values by the mean, median or by extracting a random value. In other words we learnt about mean / median and random sample imputation. These methods assume that the data are missing completely at random (MCAR).
#
# There are other methods that can be used when values are not missing at random, for example arbitrary value imputation or end of distribution imputation. However, these imputation techniques will affect the variable distribution dramatically, and are therefore not suitable for linear models.
#
# **So what can we do if data are not MCAR and we want to use linear models?**
#
# If data are not missing at random, it is a good idea to replace missing observations by the mean / median / mode AND **flag** those missing observations as well with a **Missing Indicator**. A Missing Indicator is an additional binary variable, which indicates whether the data was missing for an observation (1) or not (0).
#
#
# ### For which variables can I add a missing indicator?
#
# We can add a missing indicator to both numerical and categorical variables.
#
# #### Note
#
# Adding a missing indicator is never used alone. On the contrary, it is always used together with another imputation technique, which can be mean / median imputation for numerical variables, or frequent category imputation for categorical variables. We can also use random sample imputation together with adding a missing indicator for both categorical and numerical variables.
#
# Commonly used together:
#
# - Mean / median imputation + missing indicator (Numerical variables)
# - Frequent category imputation + missing indicator (Categorical variables)
# - Random sample Imputation + missing indicator (Numerical and categorical)
#
# ### Assumptions
#
# - Data is not missing at random
# - Missing data are predictive
#
# ### Advantages
#
# - Easy to implement
# - Captures the importance of missing data if there is one
#
# ### Limitations
#
# - Expands the feature space
# - Original variable still needs to be imputed to remove the NaN
#
# Adding a missing indicator will increase 1 variable per variable in the dataset with missing values. So if the dataset contains 10 features, and all of them have missing values, after adding a missing indicator we will have a dataset with 20 features: the original 10 features plus additional 10 binary features, which indicate for each of the original variables whether the value was missing or not. This may not be a problem in datasets with tens to a few hundreds variables, but if our original dataset contains thousands of variables, by creating an additional variable to indicate NA, we will end up with very big datasets.
#
# #### Important
#
# In addition, data tends to be missing for the same observation across multiple variables, which often leads to many of the missing indicator variables to be actually similar or identical to each other.
#
# ### Final note
#
# Typically, mean / median / mode imputation is done together with adding a variable to capture those observations where the data was missing, thus covering 2 angles: if the data was missing completely at random, this would be contemplated by the mean / median / mode imputation, and if it wasn't this would be captured by the missing indicator.
#
# Both methods are extremely straight forward to implement, and therefore are a top choice in data science competitions. See for example the winning solution of the KDD 2009 cup: ["Winning the KDD Cup Orange Challenge with Ensemble Selection](http://www.mtome.com/Publications/CiML/CiML-v3-book.pdf).
#
#
# ## In this demo:
#
# We will use the Ames House Price and Titanic Datasets.
#
# - To download the datasets please refer to the lecture **Datasets** in **Section 1** of this course.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# to split the datasets
from sklearn.model_selection import train_test_split
# +
# load the Titanic Dataset with a few variables for demonstration
data = pd.read_csv('../titanic.csv', usecols=['age', 'fare', 'survived'])
data.head()
# +
# let's look at the percentage of NA
data.isnull().mean()
# -
# To add a binary missing indicator, we don't necessarily need to learn anything from the training set, so in principle we could do this in the original dataset and then separate into train and test. However, I do not recommend this practice.
# In addition, if you are using scikit-learn to add the missing indicator, the indicator as it is designed, needs to learn from the train set, which features to impute, this is, which are the features for which the binary variable needs to be added. We will see more about different implementations of missing indicators in future notebooks. For now, let's see how to create a binary missing indicator manually.
# +
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(
data[['age', 'fare']], # predictors
data['survived'], # target
test_size=0.3, # percentage of obs in test set
random_state=0) # seed to ensure reproducibility
X_train.shape, X_test.shape
# +
# Let's explore the missing data in the train set
# the percentages should be fairly similar to those
# of the whole dataset
X_train.isnull().mean()
# +
# add the missing indicator
# this is done very simply by using np.where from numpy
# and isnull from pandas:
X_train['Age_NA'] = np.where(X_train['age'].isnull(), 1, 0)
X_test['Age_NA'] = np.where(X_test['age'].isnull(), 1, 0)
X_train.head()
# +
# the mean of the binary variable, coincides with the
# perentage of missing values in the original variable
X_train['Age_NA'].mean()
# +
# yet the original variable, still shows the missing values
# which need to be replaced by any of the techniques
# we have learnt
X_train.isnull().mean()
# +
# for example median imputation
median = X_train['age'].median()
X_train['age'] = X_train['age'].fillna(median)
X_test['age'] = X_test['age'].fillna(median)
# check that there are no more missing values
X_train.isnull().mean()
# -
# ### House Prices dataset
# +
# we are going to use the following variables,
# some are categorical some are numerical
cols_to_use = [
'LotFrontage', 'MasVnrArea', # numerical
'BsmtQual', 'FireplaceQu', # categorical
'SalePrice' # target
]
# +
# let's load the House Prices dataset
data = pd.read_csv('../houseprice.csv', usecols=cols_to_use)
print(data.shape)
data.head()
# +
# let's inspect the variables with missing values
data.isnull().mean()
# +
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data,
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# +
# let's make a function to add a missing indicator
# binary variable
def missing_indicator(df, variable):
return np.where(df[variable].isnull(), 1, 0)
# +
# let's loop over all the variables and add a binary
# missing indicator with the function we created
for variable in cols_to_use:
X_train[variable+'_NA'] = missing_indicator(X_train, variable)
X_test[variable+'_NA'] = missing_indicator(X_test, variable)
X_train.head()
# +
# now let's evaluate the mean value of the missing indicators
# first I capture the missing indicator variables with a
# list comprehension
missing_ind = [col for col in X_train.columns if 'NA' in col]
# calculate the mean
X_train[missing_ind].mean()
# +
# the mean of the missing indicator
# coincides with the percentage of missing values
# in the original variable
X_train.isnull().mean()
# +
# let's make a function to fill missing values with a value:
# we have use a similar function in our previous notebooks
# so you are probably familiar with it
def impute_na(df, variable, value):
return df[variable].fillna(value)
# +
# let's impute the NA with the median for numerical
# variables
# remember that we calculate the median using the train set
median = X_train['LotFrontage'].median()
X_train['LotFrontage'] = impute_na(X_train, 'LotFrontage', median)
X_test['LotFrontage'] = impute_na(X_test, 'LotFrontage', median)
median = X_train['MasVnrArea'].median()
X_train['MasVnrArea'] = impute_na(X_train, 'MasVnrArea', median)
X_test['MasVnrArea'] = impute_na(X_test, 'MasVnrArea', median)
# let's impute the NA in categorical variables by the
# most frequent category (aka the mode)
# the mode needs to be learnt from the train set
mode = X_train['BsmtQual'].mode()[0]
X_train['BsmtQual'] = impute_na(X_train, 'BsmtQual', mode)
X_test['BsmtQual'] = impute_na(X_test, 'BsmtQual', mode)
mode = X_train['FireplaceQu'].mode()[0]
X_train['FireplaceQu'] = impute_na(X_train, 'FireplaceQu', mode)
X_test['FireplaceQu'] = impute_na(X_test, 'FireplaceQu', mode)
# -
# and now let's check there are no more NA
X_train.isnull().mean()
# As you can see, we have now the double of features respect to the original dataset. The original dataset had 4 variables, the pre-processed dataset contains 8, plus the target.
#
# **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
|
notebooks/feature-engineering/Section-04-Missing-Data-Imputation/04.08-Missing-Indicator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={"grade": false, "grade_id": "q1_prompt", "locked": true, "schema_version": 1, "solution": false}
# # Q1
#
# This question will focus entirely on NumPy arrays: vectorized programming, slicing, and broadcasting.
# + [markdown] nbgrader={"grade": false, "grade_id": "q1a_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part A
#
# In this question, you'll implement the vector dot product.
#
# Write a function which:
#
# - is named `dot`
# - takes two NumPy arrays as arguments
# - returns one number: the floating-point dot product of the two input vectors
#
# Recall how a [dot product](https://en.wikipedia.org/wiki/Dot_product) works: corresponding elements of two arrays are multiplied together, then all these products are summed.
#
# For example: if I have two NumPy arrays `[1, 2, 3]` and `[4, 5, 6]`, their dot product would be `(1*4) + (2*5) + (3*6)`, or `4 + 10 + 18`, or 32.
#
# You can use NumPy arrays, and the `np.sum()` function, but no other NumPy functions.
# + nbgrader={"grade": false, "grade_id": "q1a", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1a_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(57442)
x1 = np.random.random(10)
x2 = np.random.random(10)
np.testing.assert_allclose(x1.dot(x2), dot(x1, x2))
# + nbgrader={"grade": true, "grade_id": "q1a_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
np.random.seed(495835)
x1 = np.random.random(100)
x2 = np.random.random(100)
np.testing.assert_allclose(x1.dot(x2), dot(x1, x2))
# + [markdown] nbgrader={"grade": false, "grade_id": "q1b_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part B
#
# Write a function which:
#
# - is named `subarray`
# - takes two arguments, both NumPy arrays: one containing data, one containing indices
# - returns one NumPy array
#
# The function should return a NumPy array that corresponds to the elements of the input array of data selected by the indices array.
#
# For example, `subarray([1, 2, 3], [2])` should return a NumPy array of `[3]`.
#
# You cannot use any built-in functions, NumPy functions, or loops!
# + nbgrader={"grade": false, "grade_id": "q1b", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1b_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(5381)
x1 = np.random.random(43)
i1 = np.random.randint(0, 43, 10)
a1 = np.array([ 0.24317871, 0.16900041, 0.20687451, 0.38726974, 0.49798077,
0.32797843, 0.18801287, 0.29021025, 0.65418547, 0.78651195])
np.testing.assert_allclose(a1, subarray(x1, i1), rtol = 1e-5)
# + nbgrader={"grade": true, "grade_id": "q1b_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
x2 = np.random.random(74)
i2 = np.random.randint(0, 74, 5)
a2 = np.array([ 0.96372034, 0.84256813, 0.08188566, 0.71852542, 0.92384611])
np.testing.assert_allclose(a2, subarray(x2, i2), rtol = 1e-5)
# + [markdown] nbgrader={"grade": false, "grade_id": "q1c_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part C
#
# Write a function which:
#
# - is named `less_than`
# - takes two arguments: a NumPy array, and a floating-point number
# - returns a NumPy array
#
# You should use a boolean mask to return only the values in the NumPy array that are less than the specified floating-point value (the second parameter). No loops are allowed, or any built-in functions or loops.
#
# For example, `less_than([1, 2, 3], 2.5)` should return a NumPy array of `[1, 2]`.
# + nbgrader={"grade": false, "grade_id": "q1c", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1c_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(85928)
x = np.random.random((10, 20, 30))
t = 0.001
y = np.array([ 0.0005339 , 0.00085714, 0.00091265, 0.00037283])
np.testing.assert_allclose(y, less_than(x, t))
# + nbgrader={"grade": true, "grade_id": "q1c_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
np.random.seed(8643)
x2 = np.random.random((100, 100, 10))
t2 = 0.0001
y2 = np.array([ 2.91560413e-06, 6.80065620e-06, 3.63294064e-05,
7.50659065e-05, 1.61602031e-06, 9.37205052e-05])
np.testing.assert_allclose(y2, less_than(x2, t2), rtol = 1e-05)
# + [markdown] nbgrader={"grade": false, "grade_id": "q1d_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part D
#
# Write a function which:
#
# - is named `greater_than`
# - takes two arguments: a NumPy array, and a threshold number (float)
# - returns a NumPy array
#
# You should use a boolean mask to return only the values in the NumPy array that are greater than the specified `threshold` value (the second parameter). No loops are allowed, or built-in functions, or NumPy functions.
#
# For example, `greater_than([1, 2, 3], 2.5)` should return a NumPy array of `[3]`.
# + nbgrader={"grade": false, "grade_id": "q1d", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1d_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(592582)
x = np.random.random((10, 20, 30))
t = 0.999
y = np.array([ 0.99910167, 0.99982779, 0.99982253, 0.9991043 ])
np.testing.assert_allclose(y, greater_than(x, t))
# + nbgrader={"grade": true, "grade_id": "q1d_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
np.random.seed(689388)
x2 = np.random.random((100, 100, 10))
t2 = 0.9999
y2 = np.array([ 0.99997265, 0.99991169, 0.99998906, 0.99999012, 0.99992325,
0.99993289, 0.99996637, 0.99996416, 0.99992627, 0.99994388,
0.99993102, 0.99997486, 0.99992968, 0.99997598])
np.testing.assert_allclose(y2, greater_than(x2, t2), rtol = 1e-05)
# + [markdown] nbgrader={"grade": false, "grade_id": "q1e_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part E
#
# Write a function which:
#
# - is named `in_between`
# - takes three parameters: a NumPy array, a lower threshold (float), and an upper threshold (float)
# - returns a NumPy array
#
# You should use a boolean mask to return only the values in the NumPy array that are *in between* the two specified threshold values, `lower` and `upper`. No loops are allowed, or built-in functions, or NumPy functions.
#
# For example, `in_between([1, 2, 3], 1, 3)` should return a NumPy array of `[2]`.
#
# Hint: you can use your functions from Parts C and D to help!
# + nbgrader={"grade": false, "grade_id": "q1e", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1e_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(7472)
x = np.random.random((10, 20, 30))
lo = 0.499
hi = 0.501
y = np.array([ 0.50019884, 0.50039172, 0.500711 , 0.49983418, 0.49942259,
0.4994417 , 0.49979261, 0.50029046, 0.5008376 , 0.49985266,
0.50015914, 0.50068227, 0.50060399, 0.49968918, 0.50091042,
0.50063015, 0.50050032])
np.testing.assert_allclose(y, in_between(x, lo, hi))
# + nbgrader={"grade": true, "grade_id": "q1e_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(14985)
x = np.random.random((30, 40, 50))
lo = 0.49999
hi = 0.50001
y = np.array([ 0.50000714, 0.49999045])
np.testing.assert_allclose(y, in_between(x, lo, hi))
# + [markdown] nbgrader={"grade": false, "grade_id": "q1f_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part F
#
# Write a function which:
#
# - is named `not_in_between`
# - takes three parameters: a NumPy array, a lower threshold (float), and an upper threshold (float)
# - returns a NumPy array
#
# You should use a boolean mask to return only the values in the NumPy array that are *NOT in between* the two specified threshold values, `lower` and `upper`. No loops are allowed, or built-in functions, or NumPy functions.
#
# For example, `not_in_between([1, 2, 3, 4], 1, 3)` should return a NumPy array of `[4]`.
#
# Hint: you can use your functions from Parts C and D to help!
# + nbgrader={"grade": false, "grade_id": "q1f", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1f_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(475185)
x = np.random.random((10, 20, 30))
lo = 0.001
hi = 0.999
y = np.array([ 9.52511605e-04, 8.62993716e-04, 3.70243252e-04,
9.99945849e-01, 7.21751759e-04, 9.36931041e-04,
5.10792605e-04, 6.44911672e-04])
np.testing.assert_allclose(y, not_in_between(x, lo, hi))
# + nbgrader={"grade": true, "grade_id": "q1f_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
np.random.seed(51954)
x = np.random.random((30, 40, 50))
lo = 0.00001
hi = 0.99999
y = np.array([ 8.46159001e-06, 9.99998669e-01, 9.99993873e-01,
5.58488698e-06, 9.99993348e-01])
np.testing.assert_allclose(y, not_in_between(x, lo, hi))
# + [markdown] nbgrader={"grade": false, "grade_id": "q1g_prompt", "locked": true, "schema_version": 1, "solution": false}
# ### Part G
#
# Write a function which:
#
# - is named `reverse_array`
# - takes 1 parameter: a 1D NumPy array of data
# - returns the 1D NumPy array, reversed
#
# This function uses fancy indexing to reverse the ordering of the elements in the input array, and returns the reversed array. You *cannot* use the `[::-1]` notation, nor the built-in `reversed` method, or any other Python function or loops. You **can** use the `list()`, `range()`, and `np.arange()` functions, however, and only some or all of those (but again, no loops!).
#
# Hint: Construct a list of indices and use NumPy fancy indexing to reverse the ordering of the elements in the input list, then return the reversed array.
# + nbgrader={"grade": false, "grade_id": "q1g", "locked": false, "schema_version": 1, "solution": true}
# + nbgrader={"grade": true, "grade_id": "q1g_test1", "locked": true, "points": 5, "schema_version": 1, "solution": false}
import numpy as np
np.random.seed(5748)
x1 = np.random.random(75)
y1 = x1[::-1] # Sorry, you're not allowed to do this!
np.testing.assert_allclose(y1, reverse_array(x1))
# + nbgrader={"grade": true, "grade_id": "q1g_test2", "locked": true, "points": 5, "schema_version": 1, "solution": false}
np.random.seed(68382)
x2 = np.random.random(581)
y2 = x2[::-1] # Sorry, you're not allowed to do this!
np.testing.assert_allclose(y2, reverse_array(x2))
|
assignments/A5/A5_Q1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook serves to make some simple plots of the 1) losses and 2) entities and relations following training with the PyKEEN pipeline.
# +
import os
import sys
import time
import numpy as np
import pykeen
from matplotlib import pyplot as plt
from pykeen.pipeline import pipeline
from pykeen.triples import TriplesFactory
# -
# %config InlineBackend.figure_format = 'svg'
print(sys.version)
print(pykeen.get_version(with_git_hash=True))
print(time.asctime())
# ## Toy Example
#
# Following the disussions proposed in https://github.com/pykeen/pykeen/issues/97, a very small set of triples are trained and visualized.
# +
triples = '''
Brussels locatedIn Belgium
Belgium partOf EU
EU hasCapital Brussels
'''.strip()
triples = np.array([triple.split('\t') for triple in triples.split('\n')])
tf = TriplesFactory.from_labeled_triples(triples=triples)
# -
# Training with default arguments
results = pipeline(
training=tf,
testing=tf,
model = 'TransE',
model_kwargs=dict(embedding_dim=2),
training_kwargs=dict(use_tqdm_batch=False),
evaluation_kwargs=dict(use_tqdm=False),
random_seed=1,
device='cpu',
)
results.plot(er_kwargs=dict(plot_relations=True))
plt.savefig(os.path.expanduser('~/Desktop/toy_1.png'), dpi=300)
# Training with slower learning and more epochs
results = pipeline(
training=tf,
testing=tf,
model = 'TransE',
model_kwargs=dict(embedding_dim=2),
optimizer_kwargs=dict(lr=1.0e-1),
training_kwargs=dict(num_epochs=128, use_tqdm_batch=False),
evaluation_kwargs=dict(use_tqdm=False),
random_seed=1,
device='cpu',
)
results.plot(er_kwargs=dict(plot_relations=True))
plt.savefig(os.path.expanduser('~/Desktop/toy_2.png'), dpi=300)
# Training with appropriate softplus
toy_results = pipeline(
training=tf,
testing=tf,
model='TransE',
loss='softplus',
model_kwargs=dict(embedding_dim=2),
optimizer_kwargs=dict(lr=1.0e-1),
training_kwargs=dict(num_epochs=128, use_tqdm_batch=False),
evaluation_kwargs=dict(use_tqdm=False),
random_seed=1,
device='cpu',
)
toy_results.plot(er_kwargs=dict(plot_relations=True))
plt.savefig(os.path.expanduser('~/Desktop/toy_3.png'), dpi=300)
# ## Benchmark Dataset Example
nations_results = pipeline(
dataset='Nations',
model='TransE',
model_kwargs=dict(embedding_dim=8),
optimizer_kwargs=dict(lr=1.0e-1),
training_kwargs=dict(num_epochs=80, use_tqdm_batch=False),
evaluation_kwargs=dict(use_tqdm=False),
random_seed=1,
device='cpu',
)
nations_results.plot(er_kwargs=dict(plot_relations=True))
# Filter the ER plot down to a specific set of entities and relations
nations_results.plot_er(
relations={'treaties'},
apply_limits=False,
plot_relations=True,
);
|
notebooks/Pipeline Plots Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] id="oXpxFPlMirr0"
# # Varying Noise Experiment
# In this notebook, we test our algorithm in setting where there is a lot of noise variance. We then compare our results to other methods on the same task.
# + cellView="form" colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 87726, "status": "ok", "timestamp": 1649778758315, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="4cqcUgjzirr2" outputId="b680bf53-f23b-4ab2-d16a-8e68a8fa7c04"
# #@title Run this to setup libraries and mount drive
# # %load_ext autoreload
# # swig required for building wheel for orthnet package
# # !apt-get install swig
# # !pip3 install orthnet
# # !pip install GPUtil
# # !pip3 install cox
# # !pip install tensorboardX
# # !pip install delphi.ai --no-cache-dir
# # mount google drive locally onto colab
# from google.colab import drive
# drive.mount('/content/drive', force_remount=True)
import sys
sys.path.append('/Users/patroklos/Desktop/delphi_')
import subprocess
import torch as ch
from torch import Tensor
import torch.linalg as LA
from torch.distributions import Uniform
from torch.distributions.multivariate_normal import MultivariateNormal
import pandas as pd
import numpy as np
import csv
import json
from cox.store import Store
from cox.readers import CollectionReader
import os
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
import datetime
import math
import IPython
from delphi.stats.truncated_linear_regression import TruncatedLinearRegression
from delphi import oracle
from delphi.utils.helpers import Parameters
# commands and arguments
COMMAND = 'Rscript'
PATH2SCRIPT = './truncreg.R'
TMP_FILE = 'tmp.csv'
RESULT_FILE = 'result.csv'
# mean squared error loss
mse_loss = ch.nn.MSELoss(reduction='mean')
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1649778758317, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="3ivyYgVIirr5"
args = Parameters({
"samples": 5000, # number of samples to generate for ground-truth
"c": 0, # left truncation parameter
"trials": 20,
"x_lower": -10, # lower bound for generating input features
"x_upper": 10, # upper bound for generating input features
"lower": -1, # lower bound for generating ground-truth
"upper": 1, # lower bound for generating ground-truth
'var': 10, # maximum variance to use in procedure
'fit_intercept': True,
'exp': 'revival',
'out_dir': 'drive/MyDrive/Results/trunc_reg/',
})
# + [markdown] id="gMT3FjBfirr6"
# I will first begin, by showing how to use our algorithm in a 1 dimensional setting. I will sample input features from a Uniform distribution, I will also sample my ground-truth from a Uniform distribution, and I will add Gaussian noise with noise variance 10.
# + colab={"base_uri": "https://localhost:8080/", "height": 461} executionInfo={"elapsed": 615, "status": "ok", "timestamp": 1649778758925, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="sVeJPMnWirr6" outputId="318f1b93-7803-41ad-c473-f27ffe73816f"
# distribution for generating feature vectors
d, k = 10, 1
W = Uniform(args.lower, args.upper)
M = Uniform(args.x_lower, args.x_upper)
phi = oracle.Left_Regression(Tensor([args.C]))
# phi = oracle.Identity()
# generate ground truth
gt = ch.nn.Linear(in_features=d, out_features=k)
gt.weight = ch.nn.Parameter(W.sample(ch.Size([k, d])))
# gt.bias = ch.nn.Parameter(ch.ones(1, 1)) if args.bias else None
gt.bias = ch.nn.Parameter(W.sample(ch.Size([1, 1]))) if args.fit_intercept else None
weight, intercept = gt.weight, gt.bias
print(f"gt weight: {weight}")
if args.fit_intercept:
print(f"gt intercept: {intercept}")
# create base classifier
with ch.no_grad():
# generate data
X = M.sample(ch.Size([args.samples, d])) if isinstance(M, Uniform) else M.sample(ch.Size([args.samples]))
y = gt(X)
noise_var = Tensor([1.0])[...,None]
# remove synthetic data from the computation graph
with ch.no_grad():
# add noise to ground-truth pedictions
noised = y + ch.sqrt(noise_var) * ch.randn(X.size(0), 1)
# truncate based off of the standardized data
indices = phi(noised).nonzero()[:,0]
y_trunc, x_trunc = noised[indices], X[indices]
print('y trunc size: {}'.format(y_trunc.size()))
print('x truncc size: {}'.format(x_trunc.size()))
alpha = y_trunc.size(0) / y.size(0)
print(f"alpha: {alpha}")
# normalize input features
x_trunc_mu = x_trunc.mean(0)
x_trunc_centered = x_trunc - x_trunc_mu
l_inf = LA.norm(x_trunc_centered, dim=-1, ord=float('inf')).max()
beta = l_inf * (X.size(1) ** .5)
x_trunc_norm_centered = x_trunc_centered / beta
X_norm_centered = (X - x_trunc_mu) / beta
y_trunc_mu = y_trunc.mean(0)
y_trunc_centered = y_trunc - y_trunc_mu
noised_centered = noised - y_trunc_mu
# ground-truth OLS
gt_ols = LinearRegression(fit_intercept=args.fit_intercept)
gt_ols.fit(X_norm_centered, noised_centered)
if args.fit_intercept:
gt_ = ch.from_numpy(np.concatenate([gt_ols.coef_.flatten(), gt_ols.intercept_]))
print("gt ols coef: ", gt_ols.coef_)
print("gt ols intercept: ", gt_ols.intercept_)
else:
gt_ = gt_ols.coef_.flatten()
print("gt ols coef: ", gt_ols.coef_)
trunc_ols = LinearRegression(fit_intercept=args.fit_intercept)
trunc_ols.fit(x_trunc_norm_centered, y_trunc_centered)
trunc_ols_pred = trunc_ols.predict(x_trunc_norm_centered)
print("trunc ols coef: ", trunc_ols.coef_)
if args.fit_intercept:
print("trunc ols intercept: ", trunc_ols.intercept_)
if X.size(1) == 1:
# data for plotting regressions
data = np.linspace(X_norm_centered.min(), X_norm_centered.max(), 100).reshape(100, 1)
ax = plt.subplot(1, 1, 1)
plt.scatter(X_norm_centered, noised_centered)
plt.scatter(x_trunc_norm_centered, y_trunc_centered)
plt.plot(data, gt_ols.predict(data), color='green', label='gt ols')
plt.plot(data, trunc_ols.predict(data), color='red', label='trunc ols')
plt.legend()
plt.title("Empirical and Ground Truth Dataset and Model")
ax.set_xlabel("x")
ax.set_ylabel("y")
emp_noise_var = (y_trunc - trunc_ols_pred).var(0)
print("emp noise var: ", emp_noise_var)
# -
# # Check Truncated Gradients
# Here, I check my gradients with an identity oracle. To show the correctness of my custom gradients.
# +
from delphi.grad import TruncatedMSE, TruncatedUnknownVarianceMSE
weight = ch.cat([gt.weight, gt.bias], axis=1).T
# weight.requires_grad = True
features = ch.cat([X_norm, ch.ones(X_norm.size(0), 1)], axis=1)
pred = features@weight
loss = mse_loss(pred, y)
mse_grad = ch.autograd.grad(loss, weight)[0].flatten()
print('MSE grad: {}'.format(mse_grad))
pred = features@weight
# trunc_loss = TruncatedMSE.apply(pred, y, oracle.Identity(), noise_var, 1000)
trunc_loss = TruncatedMSE.apply(pred, y, phi, noise_var, 10000)
# print(f'trunc loss: {trunc_loss}')
trunc_mse_grad = ch.autograd.grad(trunc_loss, weight)[0].flatten()
print('Trunc MSE grad: {}'.format(trunc_mse_grad))
pred = features@weight
lambda_ = ch.nn.Parameter(ch.Tensor([[emp_noise_var]])).inverse()
# trunc_unknown_loss = TruncatedUnknownVarianceMSE.apply(pred, y, lambda_, oracle.Identity(), 1000)
trunc_unknown_loss = TruncatedUnknownVarianceMSE.apply(pred, y, lambda_, phi, 10000)
# print(f'trunc unknown loss: {trunc_unknown_loss}')
trunc_unknown_mse_grad = ch.autograd.grad(trunc_unknown_loss, weight)[0].flatten()
print('Trunc Unknown Variance MSE grad: {}'.format(trunc_unknown_mse_grad))
pred = features@weight
lambda_ = ch.nn.Parameter(ch.Tensor([[emp_noise_var]])).inverse()
trunc_unknown_loss = TruncatedUnknownVarianceMSE.apply(pred, y, lambda_, phi, 10000)
trunc_unknown_lambda_grad = ch.autograd.grad(trunc_unknown_loss, lambda_)[0].flatten()
print('Trunc Unknown Variance Lambda grad: {}'.format(trunc_unknown_lambda_grad))
# + [markdown] id="htfzeVveirsA"
# We will now run our procedure for truncated regression with known variance, assuming that the empirical noise variance is the actual noise variance. Since we applied feature transformations to our truncated dataset, we now need to reverse the trnasformations after the completion of our procedure.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2142, "status": "ok", "timestamp": 1649778791037, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="QLimQrI3irsA" outputId="db66212e-6aad-413f-91a1-a6a90766bfdf"
train_kwargs = Parameters({
'phi': phi,
'alpha': alpha,
'noise_var': float(emp_noise_var),
'epochs': 10,
# 'momentum': .25,
'verbose': True,
'trials': 1,
})
known_emp_trunc_reg = TruncatedLinearRegression(train_kwargs)
known_emp_trunc_reg.fit(x_trunc_norm, y_trunc)
# + colab={"base_uri": "https://localhost:8080/", "height": 622} executionInfo={"elapsed": 191, "status": "error", "timestamp": 1649778793161, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="hpCEf73CirsB" outputId="627681b0-17e3-4c78-c3ce-810e000eb8c9"
if X.size(1) == 1:
ax = plt.subplot(1, 1, 1)
plt.plot(data, trunc_ols.predict(data), color='red', label='ols')
plt.plot(data, known_emp_trunc_reg.predict(Tensor(data)).detach().numpy(), label='known', color='blue')
plt.plot(data, gt_ols.predict(data), color='green', label='gt')
plt.scatter(X_norm, noised, label='entire dataset')
plt.scatter(x_trunc_norm, y_trunc, label='truncated dataset')
plt.legend()
plt.title("Empirical Known Noise Variance Results - UnNormalized")
ax.set_xlabel("x")
ax.set_ylabel("y")
known_emp_ = ch.cat([known_emp_trunc_reg.coef_.flatten(), known_emp_trunc_reg.intercept_])
loss = mse_loss(known_emp_, gt_)
print(f'mse loss: {loss}')
# + [markdown] id="MAwZxXAKirsC"
# We now run our procedure for truncated regression with known variance, assuming that we are given the ground-truth noise variance.
# -
store = Store('/Users/patroklos/Desktop/exp/id_debug')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9269, "status": "ok", "timestamp": 1649779026800, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="nL4aV6gGirsD" outputId="e6b2bb57-b2d0-4bab-eb64-d9fd4eff2588"
y_trunc_scale = y_trunc_centered / ch.sqrt(noise_var)
phi_scale = oracle.Left_Regression(phi.left - y_trunc_mu / ch.sqrt(noise_var))
train_kwargs = Parameters({
'phi': phi_scale,
'alpha': alpha,
# 'noise_var': float(noise_var),
'noise_var': 1.0,
'epochs': 5,
'trials': 3,
'batch_size': 1,
# 'step_lr_gamma': 1.0,
'verbose': True,
# 'lr': 0.0,
# 'momentum': .9,
'fit_intercept': args.fit_intercept
})
known_trunc_reg = TruncatedLinearRegression(train_kwargs)
known_trunc_reg.fit(x_trunc_norm_centered, y_trunc_scale)
# -
w = known_trunc_reg.coef_*ch.sqrt(noise_var)
w0 = known_trunc_reg.intercept_*ch.sqrt(noise_var)
known_ = ch.cat([w, w0]).flatten()
# + colab={"base_uri": "https://localhost:8080/", "height": 493} executionInfo={"elapsed": 734, "status": "error", "timestamp": 1649779032758, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="TlnlULWuirsE" outputId="f6fb92e8-ef67-4476-ad18-39c225d00800"
if X.size(1) == 1:
ax = plt.subplot(1, 1, 1)
plt.scatter(X_norm_centered, noised_centered, label='entire dataset', alpha=.75)
plt.scatter(x_trunc_norm_centered, y_trunc_centered, label='truncated dataset', alpha=.75)
plt.plot(X_norm_centered, trunc_ols.predict(X_norm_centered), color='r', label='ols')
plt.plot(X_norm_centered, gt_ols.predict(X_norm_centered), color='green', label='gt')
plt.plot(X_norm_centered, known_trunc_reg.predict(X_norm_centered), label='known', color='blue')
plt.legend()
plt.title("Known Noise Variance - UnNormalized")
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.show()
loss = mse_loss(known_, gt_)
print(f'mse loss: {loss}')
# -
store.close()
reader = CollectionReader('/Users/patroklos/Desktop/exp/trunc_reg_debug_')
logs = reader.df('logs')
reader.close()
logs.head()
sns.lineplot(data=logs, x='epoch', y='val_loss', label='Val Loss')
sns.lineplot(data=logs, x='epoch', y='train_loss', label='Train Loss')
# + [markdown] id="QfeFBu4oirsE"
# Truncated Regression with Unknown Noise Variance
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 7347, "status": "ok", "timestamp": 1649697736730, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="lEgsoberirsF" outputId="019105d9-449f-487a-92bb-ce23e5f69d99"
y_trunc_emp_scale = y_trunc_centered / ch.sqrt(emp_noise_var)
phi_emp_scale = oracle.Left_Regression((phi.left - y_trunc_mu) / ch.sqrt(emp_noise_var))
train_kwargs = Parameters({
'phi': phi_emp_scale,
'alpha': alpha,
'epochs': 10,
'trials': 3,
'batch_size': 10,
'lr': 1e-1,
'var_lr': 1e-2,
# 'step_lr_gamma': 1.0,
'momentum': .5,
'verbose': True,
'fit_intercept': args.fit_intercept,
})
unknown_trunc_reg = TruncatedLinearRegression(train_kwargs)
unknown_trunc_reg.fit(x_trunc_norm_centered, y_trunc_emp_scale)
# -
w = unknown_trunc_reg.coef_*ch.sqrt(emp_noise_var)
w0 = unknown_trunc_reg.intercept_ * ch.sqrt(emp_noise_var)
unknown_ = ch.cat([w.flatten(), w0])
# + colab={"base_uri": "https://localhost:8080/", "height": 313} executionInfo={"elapsed": 449, "status": "ok", "timestamp": 1649697738902, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="0M_DqiiuirsF" outputId="53ec10c3-557b-4381-ef6a-a7480208ac63"
if X.size(1) == 1:
ax = plt.subplot(1, 1, 1)
plt.scatter(X_norm_centered, noised_centered, label='entire dataset', alpha=.75)
plt.scatter(x_trunc_norm_centered, y_trunc_centered, label='truncated dataset', alpha=.75)
plt.plot(X_norm_centered, trunc_ols.predict(X_norm_centered), label='ols', color='red')
plt.plot(X_norm_centered, gt_ols.predict(X_norm_centered), color='g', label='gt')
plt.plot(X_norm_centered, unknown_trunc_reg.predict(X_norm_centered) * ch.sqrt(emp_noise_var), color='blue', label='unknown')
plt.legend()
ax.set_title("Unknown Noise Variance")
ax.set_xlabel("x")
ax.set_ylabel("y")
plt.show()
loss = mse_loss(unknown_, gt_)
print(f'mse loss: {loss}')
# + [markdown] id="zw80XoNLirsG"
# # Generate Ground-Truth and Run Experiment
# + executionInfo={"elapsed": 159, "status": "ok", "timestamp": 1649697743116, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="fohS234airsG"
# expriment parameters
w_lower, w_upper = -1, 1
d, k = 10, 1
# distribution for generating feature vectors
W = Uniform(w_lower, w_upper)
dist = Uniform(-5, 5)
phi = oracle.Left_Regression(0.0)
def gen_ground_truth():
# generate ground truth
gt = ch.nn.Linear(in_features=d, out_features=k)
gt.weight = ch.nn.Parameter(W.sample(ch.Size([k, d])))
gt.bias = ch.nn.Parameter(W.sample(ch.Size([1, 1]))) if args.bias else None
# create base classifier
with ch.no_grad():
# generate data
X = dist.sample(ch.Size([args.samples, d])) if isinstance(dist, Uniform) else dist.sample(ch.Size([args.samples]))
y = gt(X)
return X, y
# + colab={"base_uri": "https://localhost:8080/", "height": 546} executionInfo={"elapsed": 8451, "status": "error", "timestamp": 1649698152046, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="iL4PUkN_irsG" outputId="88d5609c-7bc0-4e47-fcad-ee353578230f"
# create store and add table
store = Store(args.out_dir + args.exp)
store.add_table(args.table_name, {
'ols_r2': float,
'ols_param_mse': float,
'ols_var_l1': float,
'known_emp_r2': float,
'known_emp_param_mse': float,
'known_emp_time': int,
'known_r2': float,
'known_param_mse': float,
'known_time': int,
'unknown_r2': float,
'unknown_param_mse': float,
'unknown_var_l1': float,
'unknown_time': int,
'trunc_reg_r2': float,
'trunc_reg_param_mse': float,
'trunc_reg_var_mse': float,
'trunc_reg_time': int,
'alpha': float,
'num_samples': int,
'noise_var': float,
})
# store.add_table('poor_results', {
# 'train_nll': float,
# 'val_nll': float,
# 'converge': bool,
# 'noise_var': int,
# })
large_grad = 0
large_mse = 0
phi = oracle.Left_Regression(ch.zeros(1))
for i in range(args.trials):
# generate ground truth
X, y = gen_ground_truth()
for noise_var in range(1, args.var + 1):
noise_var = Tensor([noise_var])[None,...]
# add noise to ground-truth pedictions
noised = y + ch.sqrt(noise_var) * ch.randn(X.size(0), 1)
# truncate based off of the standardized data
indices = phi(noised).flatten().nonzero(as_tuple=False).flatten()
y_trunc, x_trunc = noised[indices], X[indices]
alpha = y_trunc.size(0) / args.samples
# mean for dependent variable
y_trunc_mu = y_trunc.mean(0)
# normalize input features
l_inf = LA.norm(x_trunc, dim=-1, ord=float('inf')).max()
beta = l_inf * (x_trunc.size(1) ** .5)
X /= beta
x_trunc /= beta
# ground-truth ols
gt_ols = LinearRegression()
gt_ols.fit(X, noised)
gt_params = ch.cat([Tensor(gt_ols.coef_).T, Tensor(gt_ols.intercept_)[..., None]]).flatten()
# empirical linear regression
ols = LinearRegression()
ols.fit(x_trunc, y_trunc)
ols_var = (y_trunc - ols.predict(x_trunc)).var(0)
ols_params = ch.cat([Tensor(ols.coef_).T, Tensor(ols.intercept_)[..., None]]).flatten()
# check r2 for entire dataset
ols_pred = ols.predict(X)
# ols results
store[args.table_name].update_row({
'ols_r2': r2_score(noised.flatten(), ols_pred.flatten()),
'ols_var_l1': ch.abs(ols_var - noise_var),
'ols_param_mse': mse_loss(ols_params, gt_params),
})
"""
Run dataset on truncated regression with known variance, while
assuming that the empirical noise variance is the underlying noise
variance of our linear regression. This means that we want to standardize
our dependent variable by the empirical noise variance.
"""
# variance of the residuals
# standardize y trunc by the empirical noise variance
emp_stand_y_trunc = (y_trunc - y_trunc_mu) / ch.sqrt(ols_var)
# standardize noised by the empirical noise variance
emp_stand_noised = noised / ch.sqrt(ols_var)
emp_stand_phi = oracle.Left_Regression((phi.left - y_trunc_mu) / ch.sqrt(ols_var))
st = datetime.datetime.now()
train_kwargs = Parameters({
'phi': emp_stand_phi,
'alpha': alpha,
'noise_var': float(emp_noise_var),
'epochs': 5,
'momentum': .25,
'verbose': True,
'num_trials': 1,
})
known_emp_trunc_reg = TruncatedLinearRegression(train_kwargs)
known_emp_trunc_reg.fit(x_trunc, emp_stand_y_trunc)
total_time = int((datetime.datetime.now() - st).total_seconds())
w, w0 = (known_emp_trunc_reg.coef_ * ch.sqrt(emp_noise_var)), known_emp_trunc_reg.intercept_[..., None] * ch.sqrt(emp_noise_var) + y_trunc_mu
known_emp_params = ch.cat([w, w0], dim=1).flatten()
# known emp results
store[args.table_name].update_row({
'known_emp_r2': r2_score(noised.flatten(), X@w.T + w0),
'known_emp_param_mse': mse_loss(known_emp_params, gt_params),
'known_emp_time': total_time,
})
"""
Run dataset on truncated regression with known variance. This means that we want to standardize
our dependent variable by the true noise variance.
"""
# standardize y trunc by actual noise variance
stand_y_trunc = (y_trunc - y_trunc_mu) / ch.sqrt(noise_var)
# standardize noised by actual noise variance
stand_phi = oracle.Left_Regression((phi.left - y_trunc_mu) / ch.sqrt(noise_var))
st = datetime.datetime.now()
train_kwargs = Parameters({
'phi': stand_phi,
'alpha': alpha,
'noise_var': 1.0,
'epochs': 5,
'momentum': .25,
'verbose': True,
'num_trials': 1,
})
known_trunc_reg = TruncatedLinearRegression(train_kwargs)
known_trunc_reg.fit(x_trunc, stand_y_trunc)
total_time = int((datetime.datetime.now() - st).total_seconds())
w, w0 = (known_trunc_reg.coef_ * ch.sqrt(noise_var)), known_trunc_reg.intercept_[..., None] * ch.sqrt(noise_var) + y_trunc_mu
known_params = ch.cat([w, w0], dim=1).flatten()
# known results
store[args.table_name].update_row({
'known_r2': r2_score(noised.flatten(), X@w.T + w0),
'known_param_mse': mse_loss(gt_params, known_params),
'known_time': total_time,
})
st = datetime.datetime.now()
train_kwargs = Parameters({
'phi': emp_stand_phi,
'alpha': alpha,
'epochs': 5,
'momentum': .25,
'verbose': True,
'num_trials': 1,
})
unknown_trunc_reg = TruncatedLinearRegression(train_kwargs)
unknown_trunc_reg.fit(x_trunc, emp_stand_y_trunc)
total_time = int((datetime.datetime.now() - st).total_seconds())
unknown_var = unknown_trunc_reg.variance * ols_var
w, w0 = (unknown_trunc_reg.coef_ * ch.sqrt(ols_var)), (unknown_trunc_reg.intercept_ * ch.sqrt(ols_var) + y_trunc_mu)[...,None]
unknown_params = ch.cat([w, w0], dim=1).flatten()
unknown_param_mse = mse_loss(gt_params, unknown_params)
if unknown_param_mse >= 2.0:
large_mse += 1
for x, y_ in unknown_trunc_reg.val_loader:
val_nll = unknown_trunc_reg.trunc_reg.calc_nll(x, y_)
grad = unknown_trunc_reg.trunc_reg.calc_grad(x, y_)
converge = (grad <= args.tol).all()
for x, y_ in unknown_trunc_reg.train_loader:
train_nll = unknown_trunc_reg.trunc_reg.calc_nll(x, y_)
break
# store['poor_results'].update_row({
# 'train_nll': train_nll,
# 'val_nll': val_nll,
# 'converge': converge,
# 'noise_var': noise_var
# })
# store['poor_results'].flush_row()
# known emp results
store[args.table_name].update_row({
'unknown_r2': r2_score(noised.flatten(), X@w.T + w0),
'unknown_param_mse': mse_loss(gt_params, unknown_params),
'unknown_var_l1': float(ch.abs(unknown_var - noise_var)),
'unknown_time': total_time,
})
# spawn subprocess to run truncreg experiment
concat = ch.cat([x_trunc, y_trunc], dim=1).numpy()
"""
DATA FORMAT:
-First n-1 columns are independent variables
-nth column is dependent variable
"""
concat_df = pd.DataFrame(concat)
concat_df.to_csv(args.out_dir + '/' + TMP_FILE) # save data to csv
"""
Arguments
- c - truncation point (float)
- dir - left or right -> type of truncation (str)
"""
cmd = [COMMAND, PATH2SCRIPT] + [str(0), str(d), 'left', args.out_dir]
# check_output will run the command and store the result
st = datetime.datetime.now()
result = subprocess.check_output(cmd, universal_newlines=True)
total_time = int((datetime.datetime.now() - st).total_seconds())
trunc_res = Tensor(pd.read_csv(args.out_dir + '/' + RESULT_FILE)['x'].to_numpy())
trunc_reg_params = ch.cat([trunc_res[1:-1].flatten(), trunc_res[0][None,...]])
trunc_reg_pred = X@trunc_reg_params[:-1] + trunc_reg_params[-1]
# truncreg results
store[args.table_name].update_row({
'trunc_reg_r2': r2_score(noised.flatten(), trunc_reg_pred.flatten()),
'trunc_reg_param_mse': mse_loss(trunc_reg_params, gt_params),
'trunc_reg_var_mse': float(ch.abs(trunc_res[-1].pow(2)[None,...] - noise_var)),
'trunc_reg_time': total_time,
})
# add additional exp data to store
store[args.table_name].update_row({
'alpha': float(alpha.flatten()),
'num_samples': x_trunc.size(0),
'noise_var': noise_var,
})
# append row to table
store[args.table_name].flush_row()
IPython.display.clear_output()
store.close()
# + id="j3-Z9urWirsI" outputId="9997bf90-715d-426a-89ed-08f03c9483ed"
reader = CollectionReader(args.out_dir + '/' + args.exp)
logs = reader.df(args.table_name)
# poor_results = reader.df('poor_results')
reader.close()
logs.head(10)
# + id="ZH6U1qxOirsI"
logs['unknown_epsilon'] = logs['unknown_param_mse'] + logs['unknown_var_l1']
logs['trunc_reg_epsilon'] = logs['trunc_reg_param_mse'] + logs['trunc_reg_var_mse']
logs['ols_epsilon'] = logs['ols_param_mse'] + logs['ols_var_l1']
# + id="RWqovbKDirsI"
logs['unknown_param_mse'] = logs['unknown_param_mse'] **(1/2)
logs['known_param_mse'] = logs['known_param_mse'] **(1/2)
logs['ols_param_mse'] = logs['ols_param_mse'] **(1/2)
# known_emp_logs['known_emp_param_mse'] = known_emp_logs['known_emp_param_mse'] **(1/2)
logs['trunc_reg_param_mse'] = logs['trunc_reg_param_mse'] **(1/2)
# + id="OYAxTUFlirsJ" outputId="11f6dda3-eeeb-402c-ec70-dee9824bdaa1"
sns.lineplot(data=logs, x='noise_var', y='ols_param_mse', label='ols', color='r')
# sns.lineplot(data=known_emp_logs, x='noise_var', y='known_emp_param_mse', color='purple', label='known - $\sigma_{0}^{2}$')
sns.lineplot(data=logs, x='noise_var', y='known_param_mse', label='known - $\sigma^{*2}$', color='blue')
sns.lineplot(data=logs, x='noise_var', y='trunc_reg_param_mse', label='truncreg', color='orange')
ax = sns.lineplot(data=logs, x='noise_var', y='unknown_param_mse', label='unknown', color='green')
ax.set(xlabel='$\sigma^{*2}$', ylabel='$||\hat w - w^{*}||_{2}$')
ax.set_title(args.exp + " : Model Weights")
ax.set_title("10 Dimensions: Model Weights")
plt.show()
sns.lineplot(data=logs, x='noise_var', y='ols_var_l1', label='ols', color='red')
sns.lineplot(data=logs, x='noise_var', y='trunc_reg_var_mse', label='truncreg', color='orange')
ax = sns.lineplot(data=logs, x='noise_var', y='unknown_var_l1', label='unknown', color='green')
ax.set(xlabel='$\sigma^{*2}$', ylabel='$|\hat \sigma^{2} - \sigma^{*2}|$')
ax.set_title("10 Dimensions : Variance")
# + id="ehM4afVBirsJ"
alpha = 5.0
lower_p = (alpha / 2.0) / 100
upper_p = ((100 - alpha) + (alpha / 2.0)) / 100
trunc_reg_w_scores, trunc_reg_var_scores = [], []
unknown_w_score, unknown_var_scores = [], []
known_scores = []
ols_scores = {}
for var in range(1, args.var + 1):
trunc_reg_w_scores, trunc_reg_var_scores = [], []
unknown_w_score, unknown_var_scores = [], []
known_scores = []
ols_w_scores, ols_var_scores = [], []
temp_logs = logs[logs.noise_var == var]
for i in range(100):
# # bootstrap sample
# indices = randint(0, args.trials, args.trials)
ols_w_scores.append(temp_logs.ols_param_mse.reset_index().loc[list(ch.randint(args.trials, (args.trials,)))].ols_param_mse.mean())
ols_var_scores.append(temp_logs.ols_var_l1.reset_index().loc[list(ch.randint(args.trials, (args.trials,)))].ols_var_l1.mean())
# trunc_reg_w_scores.append()
# break
ols_w_scores, ols_var_scores = Tensor(ols_w_scores), Tensor(ols_var_scores)
w_median = ols_w_scores.median()
var_median = ols_var_scores.median()
# calculate 95% confidence interval (100 - alpha)
w_lower, w_upper = ch.quantile(ols_w_scores, lower_p), ch.quantile(ols_w_scores, upper_p)
var_lower, var_upper = ch.quantile(ols_var_scores, lower_p), ch.quantile(ols_var_scores, upper_p)
ols_scores['w_' + str(var)] = (float(w_median), float(w_lower), float(w_upper))
ols_scores['var_' + str(var)] = (float(var_median), float(var_lower), float(var_upper))
# + id="_1cZOzk1irsK" outputId="6414a843-44d8-439e-e5fb-47da4e47c9c3"
temp_logs.ols_param_mse.reset_index().loc[list(ch.randint(args.trials, (args.trials,)))].ols_param_mse
# + id="2dK91owTirsL" outputId="bff6e6c3-1b47-446d-aabc-59483496ddcc"
ols_scores
# + id="zluTnQw-irsL"
alpha = 5.0
ols_ = {}
for key in ols_scores.keys():
median = ols_scores[key].mean()
# calculate 95% confidence interval (100 - alpha)
lower_p = alpha / 2.0
lower = ch.quantile(ols_scores[key], lower_p / 100)
upper_p = (100 - alpha) + (alpha / 2.0)
upper = ch.quantile(ols_scores[key], upper_p / 100)
# + id="ZJ7dTX9NirsL" outputId="9ca571ea-16bf-4cd6-c488-61d0b78680b7"
median, lower, upper
# + id="p79q1REzirsL" outputId="0334e0c1-c49e-4110-8671-185b3c2bf6c1"
# + id="0kGw1hhiirsM"
import torch.linalg as LA
l_inf = LA.norm(X, dim=-1, ord=float('inf')).max()
beta = l_inf * X.size(1) ** (.5)
# + id="fdmfA7IlirsM"
x_norm = X / beta
# + id="pYEPETXEirsM" outputId="3a4eda8d-7375-4711-c0d9-a908609ddf1f"
gt_norm = LinearRegression()
gt_norm.fit(x_norm, y)
# + id="JMmtG2QIirsO"
norm_w = np.concatenate([gt_norm.coef_, gt_norm.intercept_.reshape(-1, 1)], axis=1)
# + id="F5kTbh9lirsO"
w_ = np.concatenate([gt_ols.coef_, gt_ols.intercept_.reshape(-1, 1)], axis=1)
# + id="GnRYx3SUirsO" outputId="a32ff816-8990-4ecb-bc18-082f05b58878"
norm_w / w_
# + id="bdTMorzMirsO" outputId="fce49dbf-d993-4b9e-efaf-bc42cdccbd77"
beta
# + id="94ubURPsirsO" outputId="17253010-2b68-494c-ef6c-4726d2024cb1"
poor_results_schema = {
'train_nll': float,
'val_nll': float,
'converge': bool,
'noise_var': int,
}
# + id="6H-AG--9irsP" outputId="aadb0329-dcd3-4c4a-fd83-3a0177c50f43"
gt_unknown_params[:,-1]
# + id="UfMqFYxeirsP" outputId="5b9988f1-b1ac-4fd5-bb2b-050d15f77f47"
gt_params
# + id="OoCd5UiAirsP" outputId="22c772b6-ae00-4fe4-8ee8-0168e6f2199f"
known_params
# + id="GphuJCU7irsQ" outputId="9a286b93-8eb2-4d0b-b80a-ff68908e9f66"
unknown_params
# + id="n4mVUxjuirsQ" outputId="424c1290-4276-41da-fee8-ea94b805d000"
ols_params
# + id="bGKDSNLJirsQ" outputId="060865b3-7fd6-4615-cce3-4939f11ec41f"
mse_loss(gt_params, known_params)
# + id="fc0WZhthirsQ" outputId="f60dcdff-a41b-4e57-d32b-da5e8bac989b"
mse_loss(gt_params, ols_params)
# + id="Q_11Dy_kirsR" outputId="be7a709b-96b9-49b4-8895-4265a3eb2de7"
mse_loss(gt_params, unknown_params)
# + id="OUqfB_BOirsS" outputId="aa73204f-c7df-440f-8eb2-5d890739a5f4"
mse_loss(gt_params, trunc_reg_params)
# + id="uPFasuB0irsS" outputId="ab239429-30e0-4ec3-89bd-de8bcef44604"
unknown_var
# + id="ld2qAjUCirsT" outputId="d94bd6d2-4d73-42db-8473-cf80608a3441"
trunc_res[-1].pow(2)[None,...]
# + id="XblaM2ekirsT" outputId="f4cab947-2d66-47a2-ef26-88303e875ee0"
noise_var
# + id="EKatsUo_irsU"
my_list = ch.randn(100000, 1)
# + id="rMOgNOwxirsU" outputId="c1c1ee55-6350-4074-eaa9-048434aa3838"
# !tqdm --version
# + id="ihBaIx9WirsV" outputId="6cb8e921-f89a-40ec-ecc3-12af728dfa08"
noised = y + ch.sqrt(noise_var) * ch.randn(X.size(0), 1)
# + id="dLTUCQmjirsV" outputId="b796476c-89f2-49eb-9739-32f6b75af385"
stand_y_trunc.var()
# + id="ZMvrm-HairsV" outputId="d65de950-a741-4f6b-b6e2-1d94469711aa"
X.size(0)
# + id="eTDfc_HVirsV" outputId="574f38c4-173a-48a3-95bc-413e57c562cd"
noise_var
# + id="7zSXlVuoirsW"
x_, y_ = gen_ground_truth()
# + id="L42BvJpeirsW" outputId="8983fc38-1aca-44ca-ac96-b1b8c8884021"
x_.size(), y_.size()
# + id="RDiqZ7OZirsW" outputId="03e3a128-5d33-4120-837a-6146282f390e"
y.size()
# + id="_g-ZvKNMirsW" outputId="3bc8d6be-221c-4a9a-b5aa-062106d2aa27"
noise_var, stand_y_trunc.var()
# + id="JSNSbGabirsX" outputId="57ed9231-0fc3-4ec2-bb70-9913cfeea1c7"
known_params
# + id="NGwkKgeSirsX" outputId="76494a66-0ba8-4567-8297-73ec66d708b8"
gt_params
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5167, "status": "ok", "timestamp": 1649699309512, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="rPTRnkK8irsX" outputId="adb4b622-fe50-4679-bbbb-ca8b3df58c1c" language="R"
# install.packages('truncreg')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 158, "status": "ok", "timestamp": 1649699828703, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="qcGY-ARMirsX" outputId="6ddef497-a1c8-45f0-b2a9-a979f42e3510"
# %load_ext rpy2.ipython
X_ = x_trunc.numpy()
# %R -i X_
y_ = y_trunc.numpy()
# %R -i y_
# + colab={"base_uri": "https://localhost:8080/", "height": 824} executionInfo={"elapsed": 316, "status": "error", "timestamp": 1649700286393, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="jMM6GW8OpJSR" outputId="d6e9ac1c-4da0-4896-d59c-9ce7ed9fe1c7" language="R"
# library(truncreg)
# X <- as.matrix(X_)
# y <- as.matrix(y_)
# df <- data.frame(X=X, y=y)
# # truncated regression procedure
# trunc_reg <- truncreg(df$y ~ X, data=df, point=0, direction='left', scaled=TRUE)
# # return model coefficients
# # coef_df <- coef(trunc_reg)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2215, "status": "ok", "timestamp": 1649700298807, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="ED9TPlI7r0GF" outputId="05dec1a8-dcec-4066-b90a-eded00f2e155" language="R"
# print(colnames(df))
# dimnames(df)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 187, "status": "ok", "timestamp": 1649699258939, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="ozfBiJj9r_j-" outputId="a91cd73e-ebc4-4501-c19f-9ce780a2057a"
x_trunc.size(), y_trunc.size()
# + executionInfo={"elapsed": 2, "status": "ok", "timestamp": 1649698888535, "user": {"displayName": "<NAME>", "userId": "03550109589279644114"}, "user_tz": 240} id="5z1wXtu-toeX"
from torch.nn import MSELoss
# + id="-VGlt5Hnt1lW"
# -
pred.size()
# +
X_ = ch.cat([X, ch.ones(X.size(0), 1)], axis=1)
# calculate original ols
gt = LinearRegression(fit_intercept=False)
gt.fit(X_.numpy(), y)
weight = ch.nn.Parameter(ch.from_numpy(gt.coef_).T)
opt = ch.optim.SGD([weight], lr=1e-2)
loader = ch.utils.data.DataLoader(ch.utils.data.TensorDataset(X_, y), batch_size=100, shuffle=True)
# iterate over epochs
for i in range(10):
for batch in loader:
opt.zero_grad()
x_, y_ = batch
pred = x_@weight
loss = TruncatedMSE.apply(pred, y_, oracle.Identity(), noise_var)
loss.backward()
opt.step()
print('grad: {}'.format(weight.grad))
# -
gt.coef_
|
notebooks/Varying Noise Variance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spark et MLlib - ML
#
# Régression logisitique avec [Spark](https://spark.apache.org/).
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# [MLlib](http://spark.apache.org/docs/latest/ml-guide.html) est la librairie de machine learning distribué implémenté sur Spark et qui explique en partie son succès. La première mouture de la librairie était [Mahout](http://mahout.apache.org/) implémentée sur [Hadoop](http://hadoop.apache.org/). [MLlib](http://spark.apache.org/docs/latest/ml-guide.html) est devenu le standard. [ML](http://spark.apache.org/docs/latest/ml-guide.html) est la dernière version et s'appuie sur les [DataFrame](http://spark.apache.org/docs/latest/ml-pipeline.html#dataframe). On retrouve les mêmes concepts que ceux de [scikit-learn](http://scikit-learn.org/) tels que les [Pipeline](http://spark.apache.org/docs/latest/ml-pipeline.html#main-concepts-in-pipelines).
# ## Data
import os
if not os.path.exists("data_adult.txt"):
from pyquickhelper.filehelper import unzip_files
unzip_files("data_adult.zip", where_to=".")
assert os.path.exists("data_adult.txt")
import pandas
df = pandas.read_csv("data_adult.txt", sep="\t", encoding="utf-8")
df.head()
df.dtypes
cols = list(filter(lambda tu: tu[1] != object, zip(range(len(df.columns)), df.dtypes)))
cols
column_keep = set(_[0] for _ in cols)
column_keep
df.to_csv("adult.txt", sep="\t", index=False, header=None)
data = sc.textFile("adult.txt")
col = data.take(2)
col
# ## Régression logistique (RDD)
#
# On reprend l'exemple de la documentation :
# [Linear Methods - RDD-based API](http://spark.apache.org/docs/latest/mllib-linear-methods.html). On exclue les variables catégorielles pour garder l'exemple concis.
# +
def parsePoint(line):
spl = line.split('\t')
values = [float(x) for i, x in enumerate(spl) if i in column_keep]
target = float(spl[-1].strip() == "<=50K")
return LabeledPoint(target, values)
# We prepare the training data
parsedData = data.map(parsePoint)
parsedData.collect()[:2]
# +
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint
# Load and parse the data
def parsePoint(line):
spl = line.split('\t')
values = [float(x) for i, x in enumerate(spl) if i in column_keep]
target = float(spl[-1].strip() == "<=50K")
return LabeledPoint(target, values)
# We prepare the training data
parsedData = data.map(parsePoint)
# Build the model
model = LogisticRegressionWithLBFGS.train(parsedData)
# -
# Pendant que ça tourne, il faut regarder la fenêtre terminal qui affiche les messages du serveur de notebook.
model.numClasses, model.numFeatures, model.weights
# +
from pyquickhelper.filehelper import remove_folder
def clean(folder):
if os.path.exists(folder):
return remove_folder(folder)
else:
return []
clean("target/pythonLogisticRegressionWithLBFGSModel")
# Save and load model
model.save(sc, "target/pythonLogisticRegressionWithLBFGSModel")
sameModel = LogisticRegressionModel.load(sc, "target/pythonLogisticRegressionWithLBFGSModel")
# -
# Evaluating the model on training data
labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
def filter_error(ys):
return ys[0] != ys[1]
trainErr = labelsAndPreds.filter(filter_error).count() / float(parsedData.count())
print("Training Error = " + str(trainErr))
# ## Régression logisitique (DataFrame)
#
# On s'inspire de l'exemple :
# [Régression Logistique](http://spark.apache.org/docs/latest/ml-classification-regression.html#logistic-regression). Le code change, la préparation des données aussi. Les modèles acceptent comme entrées un vecteur colonne créé par un [VectorAssembler](http://spark.apache.org/docs/latest/api/python/pyspark.ml.html?highlight=vector#pyspark.ml.feature.VectorAssembler).
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("Python Spark SQL basic example").getOrCreate()
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
training = spark.createDataFrame(df)
training = training.withColumn('Y', training.target == " <=50K")
training = training.withColumn("Y", training.Y.astype('float'))
training = training.select(["age", "fnlwgt", "education_num", "capital_gain", "capital_loss",
"hours_per_week", "Y"])
assembler = VectorAssembler(
inputCols=["age", "fnlwgt", "education_num", "capital_gain", "capital_loss", "hours_per_week"],
outputCol="features")
training = assembler.transform(training)
training.explain()
head = training.take(2)
head
training.schema
training.groupby("Y").count().collect()
from pyspark.ml.classification import LogisticRegression
# +
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8, labelCol='Y', featuresCol="features")
# Fit the model
lrModel = lr.fit(training)
# Print the coefficients and intercept for logistic regression
print("Coefficients: " + str(lrModel.coefficients))
print("Intercept: " + str(lrModel.intercept))
# -
prediction = lrModel.transform(training)
prediction.take(2)
|
_doc/notebooks/spark_local/spark_mllib.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Outline
# #### 1. Design choices and code structure
# #### 2. Demo of usage
# #### 3. Summary of results
# ### Design choices and code structure
#
# The tasks require fast querying of relevant courses, usc or ucla, given a string or course id. Initial tests show that a real-time training/prediction is too slow to be usable.
#
# The system is therefore separated into the following two parts.
#
# 1. Offline part.
# 1. crawler.py : crawl functions to crawl, parse and organize the USC and UCLA websites.
# 2. preprocessor.py: preprocess the description fields into usable informations
# 3. model.py: estimator classes for the trainable algorithms and enable sklearn-style trainning and prediction. The training is done on ordered course pair and the ground truth is taken from a bert model.
# 3. dataPreparer.py: call crawl and preprocessor functions to save useful data structures locally in the `data` directory.
#
# 2. Online part
# 1. api.py: offer usable apis the end user can use to perform the required tasks
#
# **The two parts are decoupled and the offline part only runs to obtain data/model and the online part runs making use of the data and model.**
#
# The user can perform the tasks without knowing how the offline part works.
#
# Notes:
#
# 1. For obtaining ground truth, please refer to the `_pseudoGroundTruth()` function in dataPreparer.py. The t-SNE plot of the ground truth is in the root directory.
# 2. The training with cross-validation is very hard to obtain on laptop. Therefore, I obtain best parameters from small scale cross-validation and used the parameters to train doc2vec and word2vec models with whole datasets.
# 3. The similarity scores obtained on USC datasets from various algorithms are zeroed and normalized to fall in the range [-1,1] roughly. However, it is still not recommended to compare them numerically across algorithms.
# 4. Pre-trained `glove` model is large and requires downloading, not recommended in general. Our trained word2vec model works actually better.
# 5. Only UCLA Computer Science courses are obtained based on the link provided. It is recommended to only query USC *CSCI*, *EE* or other CS-relevant courses to obtain sensible prediction.
# 6. There are some *magic numbers* in the code. For example, description shorter than 140 characters are discarded. This is done by manual intervention.
# 7. The `bert_vecs.npy` is **required** if you want to build all the data/model from scratch.
#
# The t-SNE visualization of USC courses, the pseudo ground truth.
# <img src="bert_tsne.png">
# ### How to use
#
# #### set up an environment and install dependencies
#
# `
# virtualenv cour-sine
# source cour-sine/bin/activate
# pip3 install -r requirements.txt
# `
# #### run this notebook in the root directory
from crawler import *
from preprocessor import *
from model import Model,word2vecEstimator, doc2vecEstimator
from dataPreparer import initPrepare, splitDataSets
from api import API, dataSaver, uclaDataSaver
# %reload_ext autoreload
# %autoreload 2
# If you didn't download the data directory, you should run the following functions to obtain essential data.
# #### Optional, prepare data/model
# dataSaver() process usc data
#dataSaver()
#uclaDataSaver()
"""
This may take long time to prepare all the data/model.
It is recommended to download the json and pickle files in data directory instead.
"""
# #### task 1 (step 5)
api1 = API(model_algo="word2vec")
# obtain an api
api1.stringQuery("Computer science and information technology")
# we obtain the top 10 usc courses
api1.stringQuery("Medical biology and its chemical nature.")
# try another string
api2 = API(model_algo="Jaccard")
api2.stringQuery("Computer science and information technology")
# we obtain the top 10 usc courses but the results now are different. Note the scores are now in a different range.
api2.stringQuery("cancer and neural biology")
# try another string
# #### task 2 (step 7.a)
# need to explitly import the courses
import json
import numpy as np
with open("data/coursenames.json","r") as fp:
coursenames = json.load(fp)
count1 = api1.queryPrereqsCIDs(list(coursenames.keys()))
# Let's only look at those courses who have prerequisites
np.mean(np.array(list(filter(lambda x: x >= 0, count1)))>0)
# Try another model
count2 = api2.queryPrereqsCIDs(list(coursenames.keys()))
np.mean(np.array(list(filter(lambda x: x >= 0, count2)))>0)
# #### task 3 (step 7.b)
#
import random
# randomly select 10 courses
courseIDs = random.sample(list(coursenames.keys()),10)
res = api1.querySameSchoolCIDs(courseIDs)
# For each of the selected course, the ratio that related courses are from the same school
for course, ratio in zip(courseIDs, res):
print(str(ratio*100)+"% of related courses of "+ course + " are from the same school of the course." )
# #### task 4 (step 7.c)
# CSCI courses, looks pretty accurate
for usccourse in ["CSCI570", "CSCI585", "EE457", "CSCI567", "CSCI402"]:
print(usccourse)
print(api1.queryUCLACID(usccourse))
# However, the Jaccard method looks bad
for usccourse in ["CSCI570", "CSCI585", "EE457", "CSCI567", "CSCI402"]:
print(usccourse)
print(api2.queryUCLACID(usccourse))
# #### Some Visualization and offline part code demostration
with open("data/pseudoTruthTable.json","r") as fp:
TRUTHTABLE = json.load(fp)
uids = list(TRUTHTABLE.keys())
random.seed(2019)
random.shuffle(uids)
uids[:3] # paired ids
from dataPreparer import splitDataSets
from preprocessor import Preprocessor
p1 = Preprocessor()
trainSets, testSets = splitDataSets(uids)
model = Model(algo="doc2vec")
uids, desc1, desc2, name1, name2 = model.augmentData(trainSets)
sentences1 = [p1.process(desc) for desc in desc1]
sentences2 = [p1.process(desc) for desc in desc2]
size = 100
# arguments have to be constructed such that it meets the Estimator's standard in order to work with sklearn module.
model.trainCV(uids[:size],[(sentences1[i],sentences2[i]) for i in range(size)])
model.bestParam
# Take a visual impression of the model's performance.
import matplotlib.pyplot as plt
# %matplotlib inline
# Note that for some model, the absolute values are not comparable with the pseudo ground truth at the same scale.
plt.figure(figsize=(18,4))
random.seed(100)
testUIDs = random.sample(testSets,100)
plt.title("Comparison of similarity data on andomly selected 100 course pairs")
plt.plot(np.array(model.predict(testUIDs)),label = "predicted")
plt.plot(np.array([TRUTHTABLE[uid] for uid in testUIDs]),label = "pseudo truth")
_ = plt.legend()
# ### Summary of results
# 1. By visual inspections, most of the functionalities work well.
# 2. The document vector model actually works worse than the word vector model. This may be due to the limited number of total documents.
# 3. About 15% - 20% of total USC courses which have prerequisites have prerequisite courses in their top3 most relevant courses.
# 4. The same-school test shows that most relevant courses do almost all come from same school
# 5. UCLA CS course querying is accurate for typical courses like algorithm, database, machine learning, etc.
|
doc/documentation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using Dataset: matches.csv
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, log_loss
import keras
from keras import Sequential
from keras.layers import Dense
from keras.utils import plot_model, print_summary
from sklearn.preprocessing import Imputer, StandardScaler
from keras.preprocessing.text import text_to_word_sequence, Tokenizer
# Loading data
data = pd.read_csv("matches.csv")
data.head()
data.toss_decision.unique()
# Encoding data
data.toss_decision = data.toss_decision.map({'bat':1, 'field':0})
data.result.unique()
data.result = data.result.map({'normal':1, 'tie':2, 'no result':0})
# ## field is 0 and bat is 1 <br>
# ## normal is 1, tie is 2, no result is 0
data.drop(columns=['venue', 'player_of_match', 'dl_applied','umpire1','umpire2','umpire3','date','city','season','id'], inplace=True)
data.head()
data.team1.unique()
r = len(data.team2.unique())
teams = data.team1.unique()
mapping = {}
for i in range(14):
mapping[teams[i]] = i
mapping
data.team1 = data.team1.map(mapping)
data.team2 = data.team2.map(mapping)
data.head()
data.toss_winner = data.toss_winner.map(mapping)
data.winner = data.winner.map(mapping)
data.head()
data.isna().sum()
data.winner.fillna(0, axis=0, inplace=True)
data.isna().sum()
data.winner = data.winner.astype(int)
data.head()
# Scaling win_by_runs and win_by_wickets
data.win_by_runs
for i in range(len(data['win_by_runs'].values)):
if (data['win_by_runs'][i]) >= 20: # Strong team
data['win_by_runs'][i] = 1
else:
data['win_by_runs'][i] = 0
data.win_by_runs
data.win_by_wickets
for i in range(len(data['win_by_wickets'].values)):
if data['win_by_wickets'][i] >= 7: # Strong team
data['win_by_wickets'][i] = 1
else:
data['win_by_wickets'][i] = 0
data.head()
# Extracting features and labels
labels = data.winner.values
features = data.drop(columns=["winner"]).values
features.shape
ndim = features.shape[1]
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, random_state=3, shuffle=True)
len(features_train)
len(labels_test)
# As the out is multclass, we perform one hot encoding
from keras.utils import to_categorical
labelsc = data.winner.values
featuresc = data.drop(columns=["winner"]).values
labelsc = to_categorical(labelsc, num_classes=14)
labelsc[0]
# Building model
model = Sequential()
model.add(Dense(100, input_dim = featuresc.shape[1], activation="relu"))
model.add(Dense(50, activation="relu"))
model.add(Dense(10, activation="relu"))
model.add(Dense(100, activation="relu"))
model.add(Dense(14, activation="softmax"))
model.summary()
model.compile(optimizer="adam", loss=keras.losses.categorical_crossentropy, metrics=["accuracy"])
featuresc_train, featuresc_test, labelsc_train, labelsc_test = train_test_split(featuresc, labelsc, random_state=3, shuffle=True)
len(featuresc_train)
model.fit(featuresc_train, labelsc_train, epochs=200, validation_data=(featuresc_test, labelsc_test), batch_size=100)
model.evaluate(featuresc_test, labelsc_test)
pred = np.round(model.predict(featuresc_test))
pred[0]
featuresc_train[0]
mapping
pred[0]
pred[1]
pred[2]
pred[3]
featuresc_train = to_categorical(featuresc_train, num_classes=14)
featuresc_train.shape[1]
pred[0]
labelsc_test[0]
one = features_test[0]
one
one = one.reshape(1,-1)
one.ndim
np.round(model.predict(one))
labelsc_test[0]
mapping
features_train
labels_train
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
clf2 = RandomForestClassifier(n_estimators=120)
clf2.fit(features_train, labels_train)
pred = clf.predict(features_test)
pred[0]
one
clf.predict(one.reshape(1,-1))
np.where(pred != labels_test)
clf2.score(features_test, labels_test)
model.predict(one.reshape(1,-1))
pred = np.round(model.predict(features_test))
pred
np.where(pred!=labelsc_test)
model.evaluate(features_test, labelsc_test)
features_train
labels_train
model.predict_classes(one.reshape(1,-1))
features_train[0]
mapping
labelsc_train
from keras.preprocessing.text import one_hot
one_hot(data.team1.values[11],n=5)
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
encoder.fit(["ada","sdad"])
encoder.transform(["ada","sdad"])
teams
encoder.fit(teams)
encoder.transform(teams)
y = to_categorical(encoder.transform(teams))
y[0]
y.shape
features_train = StandardScaler().fit_transform(features_train)
features_train
from sklearn.linear_model import LinearRegression
lg = LinearRegression()
lg.fit(features_train, labels_train)
lg.score(features_test, labels_test)
from sklearn.preprocessing import PolynomialFeatures
p = PolynomialFeatures()
features_train = p.fit_transform(features_train)
to_categorical(data.team1.values).shape
to_categorical(labels_train)[4]
features_train[0]
|
matches-ml-model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # See Through Gradients: Image Batch Recovery via GradInversion
# This notebook shows an example for a **small batch image gradient inversion** as described in "See Through Gradients: Image Batch Recovery via GradInversion".
#
# Paper URL: https://openaccess.thecvf.com/content/CVPR2021/html/Yin_See_Through_Gradients_Image_Batch_Recovery_via_GradInversion_CVPR_2021_paper.html
# **This is only a partial re-implementation** of the original attack for which no code is available. If you have a better idea for these hyperparameters, don't hesitate to message us!
# #### Abstract
# Training deep neural networks requires gradient estimation from data batches to update parameters. Gradients per parameter are averaged over a set of data and this has been presumed to be safe for privacy-preserving training in joint, collaborative, and federated learning applications. Prior work only showed the possibility of recovering input data given gradients under very restrictive conditions - a single input point, or a network with no non-linearities, or a small 32x32 px input batch. Therefore, averaging gradients over larger batches was thought to be safe. In this work, we introduce GradInversion, using which input images from a larger batch (8 - 48 images) can also be recovered for large networks such as ResNets (50 layers), on complex datasets such as ImageNet (1000 classes, 224x224 px). We formulate an optimization task that converts random noise into natural images, matching gradients while regularizing image fidelity. We also propose an algorithm for target class label recovery given gradients. We further propose a group consistency regularization framework, where multiple agents starting from different random seeds work together to find an enhanced reconstruction of original data batch. We show that gradients encode a surprisingly large amount of information, such that all the individual images can be recovered with high fidelity via GradInversion, even for complex datasets, deep networks, and large batch sizes.
# ### Startup
# +
try:
import breaching
except ModuleNotFoundError:
# You only really need this safety net if you want to run these notebooks directly in the examples directory
# Don't worry about this if you installed the package or moved the notebook to the main directory.
import os; os.chdir("..")
import breaching
import torch
# %load_ext autoreload
# %autoreload 2
# Redirects logs directly into the jupyter notebook
import logging, sys
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stdout)], format='%(message)s')
logger = logging.getLogger()
# -
# ### Initialize cfg object and system setup:
# This will load the full configuration object. This includes the configuration for the use case and threat model as `cfg.case` and the hyperparameters and implementation of the attack as `cfg.attack`. All parameters can be modified below, or overriden with `overrides=` as if they were cmd-line arguments.
# +
cfg = breaching.get_config(overrides=["case=5_small_batch_imagenet", "attack=seethroughgradients"])
device = torch.device(f'cuda:0') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.benchmark = cfg.case.impl.benchmark
setup = dict(device=device, dtype=getattr(torch, cfg.case.impl.dtype))
setup
# -
# ### Modify config options here
# You can use `.attribute` access to modify any of these configurations for the attack, or the case:
# +
cfg.case.data.partition="unique-class"
cfg.case.user.user_idx = 0
cfg.case.user.num_data_points = 1
cfg.case.model = "resnet50" # also options are resnet50ssl or resnetmoco
# In this paper, there are no public buffers, but users send their batch norm statistic updates in combination
# with their gradient updates to the server:
cfg.case.server.provide_public_buffers = False
cfg.case.user.provide_buffers = True
# +
# cfg.attack.optim.langevin_noise = 1e-4
# cfg.attack.objective.scale = 1e-3
# -
# ### Instantiate all parties
# The following lines generate "server, "user" and "attacker" objects and print an overview of their configurations.
user, server, model, loss_fn = breaching.cases.construct_case(cfg.case, setup)
attacker = breaching.attacks.prepare_attack(server.model, server.loss, cfg.attack, setup)
breaching.utils.overview(server, user, attacker)
# ### Simulate an attacked FL protocol
# This exchange is a simulation of a single query in a federated learning protocol. The server sends out a `server_payload` and the user computes an update based on their private local data. This user update is `shared_data` and contains, for example, the parameter gradient of the model in the simplest case. `true_user_data` is also returned by `.compute_local_updates`, but of course not forwarded to the server or attacker and only used for (our) analysis.
server_payload = server.distribute_payload()
shared_data, true_user_data = user.compute_local_updates(server_payload)
user.plot(true_user_data)
# ### Reconstruct user data:
# Now we launch the attack, reconstructing user data based on only the `server_payload` and the `shared_data`.
#
# You can interrupt the computation early to see a partial solution.
reconstructed_user_data, stats = attacker.reconstruct([server_payload], [shared_data], {}, dryrun=cfg.dryrun)
# Next we'll evaluate metrics, comparing the `reconstructed_user_data` to the `true_user_data`.
metrics = breaching.analysis.report(reconstructed_user_data, true_user_data, [server_payload],
server.model, order_batch=True, compute_full_iip=False,
cfg_case=cfg.case, setup=setup)
# And finally, we also plot the reconstructed data:
user.plot(reconstructed_user_data)
# ### Notes:
# * Reconstructions with the Deep Inversion regularization take a while to compute.
# * Likely settings for the Deep Inversion regularization are reverse-engineered from the related NVIDIA repository https://github.com/NVlabs/DeepInversion
# * This is only a partial implementation of See Through Gradients: The Group Registration part is not included (and not entirely clear to me (Jon<NAME>) from the paper only)
# * In some configurations of this attacks, the gradient reconstruction objective can actually be completely disabled and images can be recovered just as well based only on their batch norm statistics!
|
examples/See through Gradients - Optimization-based Attack - ResNet50 on ImageNet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.1 64-bit
# language: python
# name: python3
# ---
# +
from pulp import *
import numpy as np
import pandas as pd
import math
members_count = 30
days_count = 26
members = list(range(1, members_count + 1))
days = list(range(1, days_count + 1))
shifts = list(range(1,5))
# defining the variables
x = LpVariable.dicts("x",(members,days,shifts),0,1,LpInteger)
y = LpVariable.dicts("y",(members),0,None,LpInteger)
# binary variable
b = LpVariable.dicts("b",(members),0,1,LpInteger)
model = LpProblem("Shifts", LpMaximize)
model += lpSum(y[si] for si in members)
# # 4 shifts per meeting
for d in days:
model += lpSum(x[m][d][r] for m in members for r in shifts) == 4
# # max and min number of meetings per member
for m in members:
model += lpSum(x[m][d][r] for d in days for r in shifts) <= math.ceil(days_count*4 / members_count)
model += lpSum(x[m][d][r] for d in days for r in shifts) >= math.floor(days_count*4 / members_count)
# only one role per member per meeting
for m in members:
for d in days:
model += lpSum(x[m][d][r] for r in shifts) <= 1
# every role in the meeting must be occupied
for d in days:
for r in shifts:
model += lpSum(x[m][d][r] for m in members) == 1
# number of shifts that a member has done
for m in members:
model += lpSum(x[m][d][r] for d in days for r in shifts) - y[m] == 0
model.solve(PULP_CBC_CMD(msg=0))
# +
days = [i for i in range(1, 26)]
shifts = [i for _ in days for i in range(1,5)]
shift_weights = [1.5 if i == 1 else 1 for i in shifts]
members = [i for i in range(1, 31)]
has_shift = LpVariable.dicts("hasshift", (members, shifts), 0, 1, LpInteger)
member_vars = LpVariable.dicts("membervars", (members), 0, None, LpInteger)
weight_vars = LpVariable.dicts("weightvars", (shift_weights), 0)
model = LpProblem("Shifts", LpMinimize)
model += lpSum(member_vars[m]*shift_weights[w] for m in members for w in shift_weights)
# # 4 shifts per meeting
model += lpSum(has_shift[m][r] for m in members for r in shifts) == 4
# only one role per member per meeting
for m in members:
model += lpSum(has_shift[m][r] for r in shifts) <= 1
# every role in the meeting must be occupied
for r in shifts:
model += lpSum(has_shift[m][r] for m in members) == 1
for m in members:
model += lpSum(has_shift[m][r] for r in shifts) - member_vars[m] == 0
model.solve()
# +
people = []
days = []
shifts = []
value = []
for vi in model.variables():
if vi.name.split("_")[0] == 'x':
person = vi.name.split("_")[1]
day = vi.name.split("_")[2]
role = vi.name.split("_")[3]
people.append(int(person))
days.append(int(day))
shifts.append(role)
value.append(vi.varValue)
data = {'Person':people, 'Day':days, 'Role':shifts, 'Value':value}
df = pd.DataFrame(data)
pd.set_option('display.max_columns', None)
def mapper(row):
if row['Value'] == 1:
return str(int(row['Role']))
return str(int(row['Value']))
df['y'] = df.apply(mapper,axis = 1)
# -
# +
df['z'] = df['y'].apply(int)
df.sort_index()
table = pd.pivot_table(df, values='z', index=['Person'],
columns=['Day'], aggfunc=np.sum)
table
|
migrations/scheduling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas import *
# -
flights = pd.read_csv('flights.csv')
print flights_df.head();
print flights_df.tail();
print flights_df.describe();
print flights.shape
print flights.columns
print flights.dtypes
flights.dest.unique()
flights.head(10)
# # Create a new dataframe df_1 with flights form NYC to SEA
flights[(flights.origin=='JFK') & (flights.dest=='SEA')]
flights[(flights.month >= 3) & (flights.month <= 6)]
|
Flights_Dataset/Problem set 1 solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import numpy as np
import random
class Layer:
"""
Base class for layers in the network.
Arguments:
`inbound_layers`: A list of layers with edges into this layer.
"""
def __init__(self, inbound_layers=[]):
"""
Layer's constructor (runs when the object is instantiated). Sets
properties that all layers need.
"""
# A list of layers with edges into this layer.
self.inbound_layers = inbound_layers
# The eventual value of this layer.
self.value = None
# A list of layers that this layer outputs to.
self.outbound_layers = []
# New property! Keys are the inputs to this layer and
# their values are the partials of this layer with
# respect to that input.
self.gradients = {}
# Sets this layer as an outbound layer for all of
# this layer's inputs.
for layer in inbound_layers:
layer.outbound_layers.append(self)
def forward():
"""
Every layer that uses this class as a base class will
need to define its own `forward` method.
"""
raise NotImplementedError
def backward():
"""
Every layer that uses this class as a base class will
need to define its own `backward` method.
"""
raise NotImplementedError
class Input(Layer):
"""
A generic input into the network.
"""
def __init__(self):
# The base class constructor has to run to set all
# the properties here.
#
# The most important property on an Input is value.
# self.value is set during `topological_sort` later.
Layer.__init__(self)
def forward(self):
# Do nothing because nothing is calculated.
pass
def backward(self):
# An Input layer has no inputs so the gradient (derivative)
# is zero.
# The key, `self`, is reference to this object.
self.gradients = {self: 0}
# Weights and bias may be inputs, so you need to sum
# the gradient from output gradients.
for n in self.outbound_layers:
grad_cost = n.gradients[self]
self.gradients[self] += grad_cost * 1
class Linear(Layer):
"""
Represents a layer that performs a linear transform.
"""
def __init__(self, X, W, b):
# The base class (Layer) constructor. Weights and bias
# are treated like inbound layers.
Layer.__init__(self, [X, W, b])
def forward(self):
"""
Performs the math behind a linear transform.
"""
X = self.inbound_layers[0].value
W = self.inbound_layers[1].value
b = self.inbound_layers[2].value
self.value = np.dot(X, W) + b
def backward(self):
"""
Calculates the gradient based on the output values.
"""
# Initialize a partial for each of the inbound_layers.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_layers}
# Cycle through the outputs. The gradient will change depending
# on each output, so the gradients are summed over all outputs.
for n in self.outbound_layers:
# Get the partial of the cost with respect to this layer.
grad_cost = n.gradients[self]
# Set the partial of the loss with respect to this layer's inputs.
self.gradients[self.inbound_layers[0]] += np.dot(grad_cost, self.inbound_layers[1].value.T)
# Set the partial of the loss with respect to this layer's weights.
self.gradients[self.inbound_layers[1]] += np.dot(self.inbound_layers[0].value.T, grad_cost)
# Set the partial of the loss with respect to this layer's bias.
self.gradients[self.inbound_layers[2]] += np.sum(grad_cost, axis=0, keepdims=False)
class Sigmoid(Layer):
"""
Represents a layer that performs the sigmoid activation function.
"""
def __init__(self, layer):
# The base class constructor.
Layer.__init__(self, [layer])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x))
def forward(self):
"""
Perform the sigmoid function and set the value.
"""
input_value = self.inbound_layers[0].value
self.value = self._sigmoid(input_value)
def backward(self):
"""
Calculates the gradient using the derivative of
the sigmoid function.
"""
# Initialize the gradients to 0.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_layers}
# Cycle through the outputs. The gradient will change depending
# on each output, so the gradients are summed over all outputs.
for n in self.outbound_layers:
# Get the partial of the cost with respect to this layer.
grad_cost = n.gradients[self]
dsig = self.value*(1.0-self.value)
self.gradients[self.inbound_layers[0]] += dsig*grad_cost
class MSE(Layer):
def __init__(self, y, a):
"""
The mean squared error cost function.
Should be used as the last layer for a network.
"""
# Call the base class' constructor.
Layer.__init__(self, [y, a])
def forward(self):
"""
Calculates the mean squared error.
"""
# NOTE: We reshape these to avoid possible matrix/vector broadcast
# errors.
#
# For example, if we subtract an array of shape (3,) from an array of shape
# (3,1) we get an array of shape(3,3) as the result when we want
# an array of shape (3,1) instead.
#
# Making both arrays (3,1) insures the result is (3,1) and does
# an elementwise subtraction as expected.
y = self.inbound_layers[0].value.reshape(-1, 1)
a = self.inbound_layers[1].value.reshape(-1, 1)
self.m = self.inbound_layers[0].value.shape[0]
# Save the computed output for backward.
self.diff = y - a
self.value = np.mean(self.diff**2)
def backward(self):
"""
Calculates the gradient of the cost.
This is the final layer of the network so outbound layers
are not a concern.
"""
self.gradients[self.inbound_layers[0]] = (2 / self.m) * self.diff
self.gradients[self.inbound_layers[1]] = (-2 / self.m) * self.diff
def topological_sort(feed_dict):
"""
Sort the layers in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Layer and the value is the respective value feed to that Layer.
Returns a list of sorted layers.
"""
input_layers = [n for n in feed_dict.keys()]
G = {}
layers = [n for n in input_layers]
while len(layers) > 0:
n = layers.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_layers:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
layers.append(m)
L = []
S = set(input_layers)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_layers:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_pass(graph):
"""
Performs a forward pass through a list of sorted Layers.
Arguments:
`graph`: The result of calling `topological_sort`.
"""
# Forward pass
for n in graph:
n.forward()
def forward_and_backward(graph):
"""
Performs a forward pass and a backward pass through a list of sorted Layers.
Arguments:
`graph`: The result of calling `topological_sort`.
"""
# Forward pass
for n in graph:
n.forward()
# Backward pass
for n in graph[::-1]:
n.backward()
# +
y, a = Input(), Input()
cost = MSE(y, a)
y_ = np.array([1, 2, 3])
a_ = np.array([4.5, 5, 10])
feed_dict = {y: y_, a: a_}
graph = topological_sort(feed_dict)
# forward pass
forward_pass(graph)
"""
Expected output
23.4166666667
"""
print(cost.value)
# -
def gradient_descent_update(x, gradx, learning_rate):
"""
Performs a gradient descent update.
"""
return x - learning_rate * gradx
def f(x):
"""
Quadratic function.
It's easy to see the minimum value of the function
is 5 when is x=0.
"""
return x**2 + 5
def df(x):
"""
Derivative of `f` with respect to `x`.
"""
return 2*x
# +
# Random number better 0 and 10,000. Feel free to set x whatever you like.
x = random.randint(0, 10000)
learning_rate = 0.1
epochs = 100
for i in range(epochs+1):
cost = f(x)
gradx = df(x)
print("EPOCH {}: Cost = {:.3f}, x = {:.3f}".format(i, cost, gradx))
x = gradient_descent_update(x, gradx, learning_rate)
# +
X, W, b = Input(), Input(), Input()
y = Input()
f = Linear(X, W, b)
a = Sigmoid(f)
cost = MSE(y, a)
X_ = np.array([[-1., -2.], [-1, -2]])
W_ = np.array([[2.], [3.]])
b_ = np.array([-3.])
y_ = np.array([1, 2])
feed_dict = {
X: X_,
y: y_,
W: W_,
b: b_,
}
graph = topological_sort(feed_dict)
forward_and_backward(graph)
gradients = [t.gradients[t] for t in [X, y, W, b]]
"""
Expected output
[array([[ -3.34017280e-05, -5.01025919e-05],
[ -6.68040138e-05, -1.00206021e-04]]), array([[ 0.9999833],
[ 1.9999833]]), array([[ 5.01028709e-05],
[ 1.00205742e-04]]), array([ -5.01028709e-05])]
"""
print(gradients)
# -
|
labs/miniflow/miniflow_back.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TorchDyn Quickstart
#
# **TorchDyn is the toolkit for continuous models in PyTorch. Play with state-of-the-art architectures or use its powerful libraries to create your own.**
#
#
# Central to the `torchdyn` approach are continuous neural networks, where *width*, *depth* (or both) are taken to their infinite limit. On the optimization front, we consider continuous "data-stream" regimes and gradient flow methods, where the dataset represents a time-evolving signal processed by the neural network to adapt its parameters.
#
# By providing a centralized, easy-to-access collection of model templates, tutorial and application notebooks, we hope to speed-up research in this area and ultimately contribute to turning neural differential equations into an effective tool for control, system identification and common machine learning tasks.
#
from torchdyn.models import *
from torchdyn.data_utils import *
from torchdyn import *
# ## Generate data from a static toy dataset
#
# We’ll be generating data from toy datasets. We provide a wide range of datasets often use to benchmark and understand neural ODEs. Here we will use the classic moons dataset and train a neural ODE for binary classification
d = ToyDataset()
X, yn = d.generate(n_samples=520, dataset_type='moons')
# +
import matplotlib.pyplot as plt
colors = ['orange', 'blue']
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
for i in range(len(X)):
ax.scatter(X[i,0], X[i,1], color=colors[yn[i].int()])
# -
# Generated data can be easily loaded in the dataloader with standard `PyTorch` calls
# +
import torch
import torch.utils.data as data
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
X_train = torch.Tensor(X).to(device)
y_train = torch.LongTensor(yn.long()).to(device)
train = data.TensorDataset(X_train, y_train)
trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)
# -
# The learner is defined as....
# +
import torch.nn as nn
import pytorch_lightning as pl
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module, settings:dict={}):
super().__init__()
defaults.update(settings)
self.settings = defaults
self.model = model
self.c = 0
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = nn.CrossEntropyLoss()(y_hat, y)
logs = {'train_loss': loss}
return {'loss': loss, 'log': logs}
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=0.01)
def train_dataloader(self):
return trainloader
# -
# ## Define a Neural ODE
# Analogously to most forward neural models we want to realize a map
#
# $$
# x \mapsto \hat y
# $$
#
# where $\hat y$ becomes the best approximation of a true output $y$ given an input $x$.\
# In torchdyn you can define very simple neural ODE models of the form
#
# $$ \left\{
# \begin{aligned}
# \dot{h}(s) &= f(h(s), \theta)\\
# h(0) &= x\\
# \hat y & = h(1)
# \end{aligned}
# \right. \quad s\in[0,1]
# $$
#
# by just specifying a neural network $f$ and giving some simple settings.
#
# **Note:** This neural ODE model is of *depth-invariant* type as neither $f$ explicitly depend on $s$ nor the parameters $\theta$ are depth-varying. Together with their *depth-variant* counterpart with $s$ concatenated in the vector field was first proposed and implemeted by [[<NAME>. et al, 2018]](https://arxiv.org/abs/1806.07366)
# ### Define the vector field (DEFunc)
#
# The first step is to define a `torch.nn.Sequential` object and wrap it with the `DEFunc` class from torchdyn. This automatically defines the vector field $f(h,\theta)$ of the neural ODE
f = DEFunc(nn.Sequential(
nn.Linear(2, 64),
nn.Tanh(),
nn.Linear(64, 2)
)
)
# In this case we chose $f$ to be a simple MLP with one hidden layer and $\tanh$ activation
# ### Define the NeuralDE
# The final step to define a neural ODE object is to instantiate an object of the torchdyn's class `NeuralDE` passing some preferences and `f`.
#
# In this case with `settings` we just specify that:
#
# * we want a `'classic'` neural ODE;
# * we will use the `'dopri5'` (Dormand-Prince) ODE solver from `torchdiffeq`;
# * we compute backward gradients with the `'adjoint'` method.
settings = {'type':'classic', 'solver':'dopri5', 'backprop_style':'adjoint'}
model = NeuralDE(f, settings).to(device)
# ## Train the Model
learn = Learner(model)
trainer = pl.Trainer(min_nb_epochs=200, max_nb_epochs=300)
trainer.fit(learn)
# With the method `trajectory` of `NeuralDE` objects you can quickly evaluate the entire trajectory of each data point in `X_train` on an interval `s_span`
s_span = torch.linspace(0,1,100)
trajectory = model.trajectory(X_train, s_span).detach().cpu()
# ### Plot the Training Results
# We can first plot the trajectories of the data points in the depth domain $s$
# +
color=['orange', 'blue']
fig = plt.figure(figsize=(8,2))
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
for i in range(500):
ax0.plot(s_span, trajectory[:,i,0], color=color[int(yn[i])], alpha=.1);
ax1.plot(s_span, trajectory[:,i,1], color=color[int(yn[i])], alpha=.1);
ax0.set_xlabel(r"$s$ [Depth]")
ax0.set_ylabel(r"$h_0(s)$")
ax0.set_title("Dimension 0")
ax1.set_xlabel(r"$s$ [Depth]")
ax1.set_ylabel(r"$h_1(s)$")
ax1.set_title("Dimension 1")
# -
# Then the trajectory in the *state-space*
# +
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
for i in range(500):
ax.plot(trajectory[:,i,0], trajectory[:,i,1], color=color[int(yn[i])], alpha=.1);
ax.set_xlabel(r"$h_0$")
ax.set_ylabel(r"$h_1$")
ax.set_title("Flows in the state-space")
# -
# As you can see, the neural ODE steers the data-points into regions of null loss with a continuous flow in the depth domain.\ Finally, we can also plot the learned vector field $f$
plot_static_vector_field(model, trajectory)
# **Sweet! You trained your first neural ODE! Now go on and learn more advanced models with the next tutorials**
|
tutorials/00_quickstart.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import undetected_chromedriver.v2 as uc
chrome_options = uc.ChromeOptions()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-popup-blocking")
chrome_options.add_argument("--profile-directory=Default")
chrome_options.add_argument("--ignore-certificate-errors")
chrome_options.add_argument("--disable-plugins-discovery")
chrome_options.add_argument("--incognito")
chrome_options.add_argument("user_agent=DN")
driver = uc.Chrome(options=chrome_options)
driver.delete_all_cookies()
driver.get("https://www.youtube.com/?gl=TW&tab=w1")
|
rpa/youtube.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
from scipy.stats.stats import ttest_ind
import warnings
warnings.filterwarnings('ignore')
# +
postgres_user = 'dsbc_student'
postgres_pw = '<PASSWORD>'
postgres_host = '172.16.31.10'
postgres_port = '5432'
postgres_db = 'studentsperformance'
engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(
postgres_user, postgres_pw, postgres_host, postgres_port, postgres_db))
student_df = pd.read_sql_query('select * from studentsperformance',con=engine)
engine.dispose()
# -
student_df.groupby("gender").mean()[["math score", "reading score", "writing score"]]
# +
ttest_ind(student_df[student_df.gender == "female"][["math score", "reading score", "writing score"]],
student_df[student_df.gender == "male"][["math score", "reading score", "writing score"]])
# +
student_df.groupby("race/ethnicity").mean()[["math score", "reading score", "writing score"]]
# -
ethnicities = student_df["race/ethnicity"].unique()
grouped_df = student_df.groupby("race/ethnicity")
for var in ["math score", "reading score", "writing score"]:
print("------------------------------------------------")
print("Comparisons for variable: {}".format(var))
print("------------------------------------------------")
for i in range(0, len(ethnicities)):
for j in range(i+1, len(ethnicities)):
print(
"t-test between groups {0} and {1}:".format(ethnicities[i], ethnicities[j]))
print(ttest_ind(
student_df[student_df["race/ethnicity"]
== ethnicities[i]][var],
student_df[student_df["race/ethnicity"] == ethnicities[j]][var]
))
student_df.groupby("parental level of education").mean()[["math score", "reading score", "writing score"]]
ethnicities = student_df["parental level of education"].unique()
grouped_df = student_df.groupby("parental level of education")
for var in ["math score", "reading score", "writing score"]:
print("------------------------------------------------")
print("Comparisons for variable: {}".format(var))
print("------------------------------------------------")
for i in range(0, len(ethnicities)):
for j in range(i+1, len(ethnicities)):
print(
"t-test between groups {0} and {1}:".format(ethnicities[i], ethnicities[j]))
print(ttest_ind(
student_df[student_df["parental level of education"]
== ethnicities[i]][var],
student_df[student_df["parental level of education"]
== ethnicities[j]][var]
))
student_df.groupby("lunch").mean()[["math score", "reading score", "writing score"]]
ttest_ind(student_df[student_df.lunch == "free/reduced"][["math score", "reading score", "writing score"]],
student_df[student_df.lunch == "standard"][["math score", "reading score", "writing score"]])
student_df.groupby("test preparation course").mean()[["math score", "reading score", "writing score"]]
ttest_ind(student_df[student_df["test preparation course"] == "completed"][["math score", "reading score", "writing score"]],
student_df[student_df["test preparation course"] == "none"][["math score", "reading score", "writing score"]])
student_df[["math score", "reading score", "writing score"]].corr()
|
Thinkful Program/Model Preparation/Thinkful 15.6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
from tqdm import tqdm as tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, ConcatDataset
# # Initialization
torch.cuda.init()
device = torch.device('cuda:1')
print(torch.cuda.get_device_name(device))
# +
# class AutoEncoder(nn.Module):
# def __init__(self):
# super(AutoEncoder, self).__init__()
# #encode
# self.e1 = nn.Linear(9216,6000)
# self.e2 = nn.Linear(6000,4000)
# # self.e3 = nn.Linear(4000,2000)
# # self.e4 = nn.Linear(2000,1000)
# #decode
# # self.d1 = nn.Linear(1000,2000)
# # self.d3 = nn.Linear(2000,4000)
# self.d1 = nn.Linear(4000,6000)
# self.d2 = nn.Linear(6000,9216)
# def forward(self, x):
# encode = self.e2(F.relu(self.e1(x)))
# return self.d2(F.relu(self.d1(encode)))
# -
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
#encode
self.e1 = nn.Linear(9216,4608)
self.e2 = nn.Linear(4608,1024)
#decode
self.d1 = nn.Linear(1024,4608)
self.d2 = nn.Linear(4608,9216)
def forward(self, x):
encode = self.e2(F.relu(self.e1(x)))
return self.d2(F.relu(self.d1(encode)))
# +
base_path = '/ssd2/chetanp'
epochs = 250
epochs0 = 4
batch_size = int((2 ** 15))
lr = 1e-5
lr0 = 1e-4
# -
dataset = []
for i in range(4):
path = os.path.join(base_path,'brown_e_base'+str(i)+'.pt')
data = torch.load(path)
dataset.append(TensorDataset(data))
dataset = ConcatDataset(dataset)
data_loader = DataLoader(dataset,batch_size = batch_size,num_workers = 16)
model = nn.DataParallel(AutoEncoder(),device_ids =[1,2]).to(device)
path = os.path.join(base_path,'brown_e_base_compressor_superfine.pth')
model.load_state_dict(torch.load(path))
# +
criterion = nn.CosineSimilarity()
loss_function = lambda x,y: (1-criterion(x,y)).mean()
# loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(
model.parameters(), lr=lr0, weight_decay=1e-5)
# -
# # Training The Model
# +
for i in range(epochs0):
running_loss = 0
count = 0
for data in tqdm(data_loader):
## Get Data
sample = data[0].to(device)
## Pass forward
output = model(sample)
loss = loss_function(output,sample)
## Update Parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
## Update Running Loss
running_loss += float(loss)
count += 1
# if(i == 0): print('Enough GPU Memory Present')
print('Epoch ', str(i+1), ' Loss: ', str(running_loss/count))
# +
optimizer = torch.optim.Adam(
model.parameters(), lr=lr, weight_decay=1e-5)
path = os.path.join(base_path,'optimizer_superfine.pth')
optimizer.load_state_dict(torch.load(path))
# -
lambda1 = lambda epoch: 0.98 ** epoch
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
# +
losses = []
for i in range(epochs):
running_loss = 0
count = 0
for data in tqdm(data_loader):
## Get Data
sample = data[0].to(device)
## Pass forward
output = model(sample)
loss = loss_function(output,sample)
## Update Parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
## Update Running Loss
running_loss += float(loss)
count += 1
# if(i == 0): print('Enough GPU Memory Present')
print('Epoch ', str(i+1), ' Loss: ', str(running_loss/count))
losses.append(running_loss/count)
# -
for i in range(epochs):
running_loss = 0
count = 0
for data in tqdm(data_loader):
## Get Data
sample = data[0].to(device)
## Pass forward
output = model(sample)
loss = loss_function(output,sample)
## Update Parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
## Update Running Loss
running_loss += float(loss)
count += 1
print('Epoch ', str(i+1), ' Loss: ', str(running_loss/count))
scheduler.step()
# +
path = os.path.join(base_path,'brown_e_base_compressor_superfine.pth')
torch.save(model.state_dict(), path)
path = os.path.join(base_path,'optimizer_superfine.pth')
torch.save(optimizer.state_dict(), path)
path = os.path.join(base_path,'loss.csv')
import csv
with open(path, 'w') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(losses)
# -
# %reset -f
|
Experiments/Compression/Implement_AutoEncoder.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# importing libraries
import pandas as pd
import numpy as np
#importing data
df = pd.read_csv("Tweets.csv")
df.head()
df.isnull().sum()
df.shape
df.describe()
# #%null
((len(df)-df.count())/len(df))*100
df.drop(['airline_sentiment_gold','negativereason_gold','tweet_coord'],inplace=True,axis=1)
df.head()
sentiment_count = df.airline_sentiment.value_counts()
#Airline Review Count
airline_total = df['airline'].value_counts()
sentiment_count
import matplotlib.pyplot as plt
import seaborn as sns
index = [1,2,3]
plt.figure(1,figsize=(20,10))
plt.subplot(221)
plt.bar(index,sentiment_count,color=['red','blue','green'])
plt.xticks(index,['negative','neutral','positive'],rotation=0)
plt.xlabel('Sentiment Type')
plt.ylabel('Sentiment Count')
plt.title('Count of Type of Sentiment')
Index=[1,2,3,4,5,6]
my_colors = 'rgbkym'
plt.subplot(222)
plt.bar(Index,airline_total,color=my_colors)
plt.xticks(Index,['United','US Airways','American','Southwest','Delta','Virgin America'],rotation=90)
plt.xlabel('Airline')
plt.ylabel('Review Count')
plt.title('Airline Review Count')
airline_count = df.groupby('airline')['airline_sentiment'].value_counts()
airline_count
def plot_sentiment_airline(airline):
df_airline = df[df['airline']==airline]
count = df_airline['airline_sentiment'].value_counts()
index = [1,2,3]
plt.bar(index,count,color=['red','blue','green'])
plt.xticks(index,['negative','neutral','positive'],rotation=0)
plt.xlabel('Sentiment Type')
plt.ylabel('Sentiment Count')
plt.title('Count of Sentiment Type of '+airline)
airlines = ['US Airways','Virgin America','United','Delta','American','Southwest']
for i in range(len(airlines)):
plt.figure(1,figsize=(20,12))
temp = 231+i
plt.subplot(temp)
plot_sentiment_airline(airlines[i])
df.isnull().sum()
df.columns
df.negativereason.unique()
df.negativereason_confidence.unique()
timezone_count = df['user_timezone'].value_counts()
tweet = df.groupby(['airline','airline_sentiment'])['user_timezone'].value_counts()
# Airlines' Negative Sentiment Count by Date
df['tweet_created']=pd.to_datetime(df['tweet_created'])
df['tweet_created'] = df['tweet_created'].dt.date
day = df.groupby(['tweet_created','airline'])['airline_sentiment'].value_counts(sort=True)
date = day.loc(axis=0)[:,:,'negative']
date.groupby(['tweet_created','airline']).sum().unstack().plot(kind='bar',figsize=(15,5))
plt.xlabel('Date')
plt.ylabel('Negative Sentiment Count')
plt.title("Airlines' Negative Sentiment Count by Date")
plt.show()
# +
#Negative Reason Count
nr_count = df['negativereason'].value_counts()
nr_dict = dict(df['negativereason'].value_counts())
nr = ["Customer Service Issue","Late Flight","Can't Tell", "Cancelled Flight", "Lost Luggage", "Bad Flight","Flight Booking Problems",
"Flight Attendant Complaints","longlines","Damaged Luggage"]
def plot_negativereason_count(reason,reason_count):
index=list(range(10))
plt.figure(figsize=(15,10))
plot_colors = 'rgbykcm'
plt.bar(index,reason_count,color=plot_colors)
plt.xticks(index,reason,rotation=90)
plt.xlabel('Negative Reason Type')
plt.ylabel('Reason Count')
plt.title('Count of Negative Reasons Type')
plot_negativereason_count(nr,nr_count)
# -
negative_df = df.groupby('airline')['negativereason'].value_counts(ascending=False)
#Negative Reason Count for Airlines
negative_df.groupby(['airline','negativereason']).sum().unstack().plot(kind='bar',figsize=(20,10))
plt.xlabel('Airline')
plt.ylabel('Negative Reason Count')
plt.title("Negative Reason Count for Airlines")
plt.show()
|
Twitter Sentiment Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # BUILD DATABASE
# +
import os
#importem la funcio get_params de params (funcio params)
from params import get_params
# definim la funcio build_database
def build_database(params):
# List images
# llista les imatges dins del path root(GDSA)->database(TB2016) per als grups de train, val i test
image_names = os.listdir(os.path.join(params['root'],
params['database'],params['split'],'images'))
# File to be saved
# crea el fitxer a la carpeta save dins de root, escriu la llista d'imatges
# i li dona el nom de split(train, val o test).txt
file = open(os.path.join(params['root'],params['root_save'],
params['image_lists'],
params['split'] + '.txt'),'w')
# Save image list to disk
# guarda el fitxer creat anteriorment i el tanca
for imname in image_names:
file.write(imname + "\n")
file.close()
if __name__=="__main__":
# crida a la funcio get_params dins de params.py
# obtenim tots els parametres necesaris per cridar la funcio build_database
params = get_params()
# cridem la funcio build_database amb la que creem el fitxer de la llista d'imatges per cada grup
# train, val i test
for split in ['train','val','test']:
params['split'] = split
build_database(params)
# -
# # GET FEATURES
# +
from params import get_params
import sys
# We need to add the source code path to the python path if we want to call modules such as 'utils'
params = get_params()
sys.path.insert(0,params['src'])
from utils.rootsift import RootSIFT
import os, time
import numpy as np
import pickle
import cv2
from sklearn.cluster import MiniBatchKMeans
from sklearn.preprocessing import normalize, StandardScaler
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings("ignore")
def get_features(params,pca=None,scaler=None):
# Read image names
readfile = os.path.join(params['root'],params['root_save'],
params['image_lists'],params['split'] + '.txt')
with open(readfile,'r') as f:
image_list = f.readlines()
# Initialize keypoint detector and feature extractor
detector, extractor = init_detect_extract(params)
# Initialize feature dictionary
features = {}
# Get trained codebook
km = pickle.load(open(os.path.join(params['root'],params['root_save'],
params['codebooks_dir'],'codebook_'
+ str(params['descriptor_size']) + "_"
+ params['descriptor_type']
+ "_" + params['keypoint_type'] + '.cb')
,'rb'))
for image_name in image_list:
# Read image
im = cv2.imread(os.path.join(params['root'],params['database'],
params['split'],
'images',image_name.rstrip()))
# Resize image
im = resize_image(params,im)
# Extract local features
feats = image_local_features(im,detector,extractor)
if feats is not None:
if params['normalize_feats']:
feats = normalize(feats)
# If we scaled training features
if scaler is not None:
scaler.transform(feats)
# Whiten if needed
if pca is not None:
pca.transform(feats)
# Compute assignemnts
assignments = get_assignments(km,feats)
# Generate bow vector
feats = bow(assignments,km)
else:
# Empty features
feats = np.zeros(params['descriptor_size'])
# Add entry to dictionary
features[image_name] = feats
# Save dictionary to disk with unique name
save_file = os.path.join(params['root'],params['root_save'],
params['feats_dir'],
params['split'] + "_" +
str(params['descriptor_size']) + "_"
+ params['descriptor_type'] + "_"
+ params['keypoint_type'] + '.p')
pickle.dump(features,open(save_file,'wb'))
def resize_image(params,im):
# Get image dimensions
height, width = im.shape[:2]
# If the image width is smaller than the proposed small dimension,
# keep the original size !
resize_dim = min(params['max_size'],width)
# We don't want to lose aspect ratio:
dim = (resize_dim, height * resize_dim/width)
# Resize and return new image
return cv2.resize(im,dim)
def image_local_features(im,detector,extractor):
'''
Extract local features for given image
'''
positions = detector.detect(im,None)
positions, descriptors = extractor.compute(im,positions)
return descriptors
def init_detect_extract(params):
'''
Initialize detector and extractor from parameters
'''
if params['descriptor_type'] == 'RootSIFT':
extractor = RootSIFT()
else:
extractor = cv2.xfeatures2d.SIFT_create()
#extractor = cv2.DescriptorExtractor_create(params['descriptor_type'])
#detector = cv2.FeatureDetector_create(params['keypoint_type'])
detector = cv2.xfeatures2d.SIFT_create()
return detector, extractor
def stack_features(params):
'''
Get local features for all training images together
'''
# Init detector and extractor
detector, extractor = init_detect_extract(params)
# Read image names
readfile = os.path.join(params['root'],params['root_save'],
params['image_lists'],params['split'] + '.txt')
with open(readfile,'r') as f:
image_list = f.readlines()
X = []
for image_name in image_list:
# Read image
im = cv2.imread(os.path.join(params['root'],
params['database'],params['split'],
'images',image_name.rstrip()))
# Resize image
im = resize_image(params,im)
feats = image_local_features(im,detector,extractor)
# Stack all local descriptors together
if feats is not None:
if len(X) == 0:
X = feats
else:
X = np.vstack((X,feats))
if params['normalize_feats']:
X = normalize(X)
if params['whiten']:
pca = PCA(whiten=True)
pca.fit_transform(X)
else:
pca = None
# Scale data to 0 mean and unit variance
if params['scale']:
scaler = StandardScaler()
scaler.fit_transform(X)
else:
scaler = None
return X, pca, scaler
def train_codebook(params,X):
# Init kmeans instance
km = MiniBatchKMeans(params['descriptor_size'])
# Training the model with our descriptors
km.fit(X)
# Save to disk
pickle.dump(km,open(os.path.join(params['root'],params['root_save'],
params['codebooks_dir'],'codebook_'
+ str(params['descriptor_size']) + "_"
+ params['descriptor_type']
+ "_" + params['keypoint_type'] + '.cb'),
'wb'))
return km
def get_assignments(km,descriptors):
assignments = km.predict(descriptors)
return assignments
def bow(assignments,km):
# Initialize empty descriptor of the same length as the number of clusters
descriptor = np.zeros(np.shape(km.cluster_centers_)[0])
# Build vector of repetitions
for a in assignments:
descriptor[a] += 1
# L2 normalize
descriptor = descriptor.reshape(1, -1)
descriptor = normalize(descriptor)
return descriptor
if __name__ == "__main__":
params = get_params()
# Change to training set
params['split'] = 'train'
print "Stacking features together..."
# Save features for training set
t = time.time()
X, pca, scaler = stack_features(params)
print "Done. Time elapsed:", time.time() - t
print "Number of training features", np.shape(X)
print "Training codebook..."
t = time.time()
train_codebook(params,X)
print "Done. Time elapsed:", time.time() - t
for split in ['train','val','test']:
params['split'] = split
print "Storing bow features for %s set..."%(params['split'])
t = time.time()
get_features(params, pca,scaler)
print "Done. Time elapsed:", time.time() - t
# -
# # RANKING
# +
import os
import pickle
import numpy as np
from params import get_params
from sklearn.metrics.pairwise import pairwise_distances
def rank(params):
train_features = pickle.load(open(os.path.join(params['root'],
params['root_save'],params['feats_dir'],
'train' + "_" + str(params['descriptor_size'])
+ "_" + params['descriptor_type'] +
"_" + params['keypoint_type'] + '.p'),'rb'))
for split in ['val','test']:
features = pickle.load(open(os.path.join(params['root'],
params['root_save'],params['feats_dir'],
split + "_" +
str(params['descriptor_size']) + "_"
+ params['descriptor_type'] + "_"
+ params['keypoint_type'] + '.p'),'rb'))
# For each image id in the validation set
for id in features.keys():
# Get its feature
bow_feats = features[id]
# The ranking is composed with the ids of all training images
ranking = train_features.keys()
X = np.array(train_features.values())
# The .squeeze() method reduces the dimensions of an array to the
# minimum. E.g. if we have a numpy array of shape (400,1,100)
# it will transform it to (400,100)
distances = pairwise_distances(bow_feats,X.squeeze())
# Sort the ranking according to the distances.
# We convert 'ranking' to numpy.array to sort it, and then back to list
# (although we could leave it as numpy array).
ranking = list(np.array(ranking)[np.argsort(distances.squeeze())])
# Save to text file
outfile = open(os.path.join(params['root'],params['root_save'],
params['rankings_dir'],params['descriptor_type'],
split,id.split('.')[0] + '.txt'),'w')
for item in ranking:
outfile.write(item.split('.')[0] + '\n')
outfile.close()
if __name__ == "__main__":
params = get_params()
rank(params)
# +
# Ens dona un error que no sabem a què es degut.
|
Jupyter Notebooks/team3_project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Gumbel Softmax / Concrete VAE with BayesFlow
#
# Implements a categorical VAE using the technique introduced in [The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables (Maddison et al. 2016)](https://arxiv.org/abs/1611.00712) and [Categorical Reparameterization with Gumbel-Softmax (Jang et al. 2016)](https://arxiv.org/abs/1611.01144). The VAE architecture shown here are a bit different than the models presented in the papers, this one has 1 stochastic 20x10-ary layer with 2-layer deterministic encoder/decoders and a fixed prior.
#
# 17 Feb 2017
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
slim=tf.contrib.slim
Bernoulli = tf.contrib.distributions.Bernoulli
OneHotCategorical = tf.contrib.distributions.OneHotCategorical
RelaxedOneHotCategorical = tf.contrib.distributions.RelaxedOneHotCategorical
# black-on-white MNIST (harder to learn than white-on-black MNIST)
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size=100
tau0=1.0 # initial temperature
K=10 # number of classes
N=200//K # number of categorical distributions
straight_through=False # if True, use Straight-through Gumbel-Softmax
kl_type='relaxed' # choose between ('relaxed', 'categorical')
learn_temp=False
x=tf.placeholder(tf.float32, shape=(batch_size,784), name='x')
net = tf.cast(tf.random_uniform(tf.shape(x)) < x, x.dtype) # dynamic binarization
net = slim.stack(net,slim.fully_connected,[512,256])
logits_y = tf.reshape(slim.fully_connected(net,K*N,activation_fn=None),[-1,N,K])
tau = tf.Variable(tau0,name="temperature",trainable=learn_temp)
q_y = RelaxedOneHotCategorical(tau,logits_y)
y = q_y.sample()
if straight_through:
y_hard = tf.cast(tf.one_hot(tf.argmax(y,-1),K), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
net = slim.flatten(y)
net = slim.stack(net,slim.fully_connected,[256,512])
logits_x = slim.fully_connected(net,784,activation_fn=None)
p_x = Bernoulli(logits=logits_x)
x_mean = p_x.mean()
# +
recons = tf.reduce_sum(p_x.log_prob(x),1)
logits_py = tf.ones_like(logits_y) * 1./K
if kl_type=='categorical' or straight_through:
# Analytical KL with Categorical prior
p_cat_y = OneHotCategorical(logits=logits_py)
q_cat_y = OneHotCategorical(logits=logits_y)
KL_qp = tf.contrib.distributions.kl(q_cat_y, p_cat_y)
else:
# Monte Carlo KL with Relaxed prior
p_y = RelaxedOneHotCategorical(tau,logits=logits_py)
KL_qp = q_y.log_prob(y) - p_y.log_prob(y)
# -
KL = tf.reduce_sum(KL_qp,1)
mean_recons = tf.reduce_mean(recons)
mean_KL = tf.reduce_mean(KL)
loss = -tf.reduce_mean(recons-KL)
train_op=tf.train.AdamOptimizer(learning_rate=3e-4).minimize(loss)
data = []
with tf.train.MonitoredSession() as sess:
for i in range(1,50000):
batch = mnist.train.next_batch(batch_size)
res = sess.run([train_op, loss, tau, mean_recons, mean_KL], {x : batch[0]})
if i % 100 == 1:
data.append([i] + res[1:])
if i % 1000 == 1:
print('Step %d, Loss: %0.3f' % (i,res[1]))
# end training - do an eval
batch = mnist.test.next_batch(batch_size)
np_x = sess.run(x_mean, {x : batch[0]})
data = np.array(data).T
# +
f,axarr=plt.subplots(1,4,figsize=(18,6))
axarr[0].plot(data[0],data[1])
axarr[0].set_title('Loss')
axarr[1].plot(data[0],data[2])
axarr[1].set_title('Temperature')
axarr[2].plot(data[0],data[3])
axarr[2].set_title('Recons')
axarr[3].plot(data[0],data[4])
axarr[3].set_title('KL')
# -
tmp = np.reshape(np_x,(-1,280,28)) # (10,280,28)
img = np.hstack([tmp[i] for i in range(10)])
plt.imshow(img)
plt.grid('off')
|
gumbel_softmax_vae_v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorFlow Semi-supervised Self-training Classification with mnist Dataset
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
print(tf.__version__)
# ## Get data
# +
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# -
print("x_train.shape = {}".format(x_train.shape))
print("y_train.shape = {}".format(y_train.shape))
print("x_test.shape = {}".format(x_test.shape))
print("y_test.shape = {}".format(y_test.shape))
HEIGHT = 28
WIDTH = 28
NCLASSES = 10
y_train = np.eye(N = NCLASSES)[y_train]
y_test = np.eye(N = NCLASSES)[y_test]
print("x_train.shape = {}".format(x_train.shape))
print("y_train.shape = {}".format(y_train.shape))
print("x_test.shape = {}".format(x_test.shape))
print("y_test.shape = {}".format(y_test.shape))
# ## Create fully supervised model for comparison
# +
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": x_train},
y = y_train,
batch_size = 100,
num_epochs = None,
shuffle = True,
queue_capacity = 5000)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": x_test},
y = y_test,
batch_size = 100,
num_epochs = 1,
shuffle = False,
queue_capacity = 5000)
# +
def linear_model(img, mode, hparams):
X = tf.reshape(tensor = img, shape = [-1,HEIGHT * WIDTH]) # flatten
ylogits = tf.layers.dense(inputs = X, units = NCLASSES, activation = None)
return ylogits, NCLASSES
def dnn_model(img, mode, hparams):
X = tf.reshape(tensor = img, shape = [-1, HEIGHT * WIDTH]) # flatten
h1 = tf.layers.dense(inputs = X, units = 300, activation = tf.nn.relu)
h2 = tf.layers.dense(inputs = h1, units = 100, activation = tf.nn.relu)
h3 = tf.layers.dense(inputs = h2, units = 30, activation = tf.nn.relu)
ylogits = tf.layers.dense(inputs = h3, units = NCLASSES, activation = None)
return ylogits, NCLASSES
def dnn_dropout_model(img, mode, hparams):
dprob = hparams.get("dprob", 0.1)
X = tf.reshape(tensor = img, shape = [-1, HEIGHT * WIDTH]) #flatten
h1 = tf.layers.dense(inputs = X, units = 300, activation = tf.nn.relu)
h2 = tf.layers.dense(inputs = h1, units = 100, activation = tf.nn.relu)
h3 = tf.layers.dense(inputs = h2, units = 30, activation = tf.nn.relu)
h3d = tf.layers.dropout(
inputs = h3,
rate = dprob,
training = (mode == tf.estimator.ModeKeys.TRAIN)) # only dropout when training
ylogits = tf.layers.dense(inputs = h3d, units = NCLASSES, activation = None)
return ylogits, NCLASSES
def cnn_model(img, mode, hparams):
ksize1 = hparams.get("ksize1", 5)
ksize2 = hparams.get("ksize2", 5)
nfil1 = hparams.get("nfil1", 10)
nfil2 = hparams.get("nfil2", 20)
dprob = hparams.get("dprob", 0.25)
c1 = tf.layers.conv2d(inputs = img, filters = nfil1,
kernel_size = ksize1, strides = 1, # ?x28x28x10
padding = "same", activation = tf.nn.relu)
p1 = tf.layers.max_pooling2d(inputs = c1, pool_size = 2, strides = 2) # ?x14x14x10
c2 = tf.layers.conv2d(inputs = p1, filters = nfil2,
kernel_size = ksize2, strides = 1,
padding = "same", activation = tf.nn.relu)
p2 = tf.layers.max_pooling2d(inputs = c2, pool_size = 2, strides = 2) # ?x7x7x20
outlen = p2.shape[1] * p2.shape[2] * p2.shape[3] #980
p2flat = tf.reshape(tensor = p2, shape = [-1, outlen]) # flattened
# Apply batch normalization
if hparams["batch_norm"]:
h3 = tf.layers.dense(inputs = p2flat, units = 300, activation = None)
h3 = tf.layers.batch_normalization(
x = h3,
training = (mode == tf.estimator.ModeKeys.TRAIN)) # only batchnorm when training
h3 = tf.nn.relu(x = h3)
else:
h3 = tf.layers.dense(inputs = p2flat, units = 300, activation = tf.nn.relu)
# Apply dropout
h3d = tf.layers.dropout(
inputs = h3, rate = dprob, training = (mode == tf.estimator.ModeKeys.TRAIN))
ylogits = tf.layers.dense(inputs = h3d, units = NCLASSES, activation = None)
# Apply batch normalization once more
if hparams["batch_norm"]:
ylogits = tf.layers.batch_normalization(
x = ylogits,
training = (mode == tf.estimator.ModeKeys.TRAIN))
return ylogits, NCLASSES
# -
def image_classifier(features, labels, mode, params):
print("\nfeatures = \n{}".format(features))
print("labels = \n{}".format(labels))
print("mode = \n{}".format(mode))
print("params = \n{}".format(params))
model_functions = {
"linear":linear_model,
"dnn":dnn_model,
"dnn_dropout":dnn_dropout_model,
"cnn":cnn_model}
model_function = model_functions[params["model"]]
ylogits, nclasses = model_function(features["image"], mode, params)
print("ylogits = \n{}".format(ylogits))
probabilities = tf.nn.softmax(logits = ylogits) # shape = (current_batch_size, NCLASSES)
print("probabilities = \n{}".format(probabilities))
class_ids = tf.cast(
x = tf.argmax(
input = probabilities, axis = 1), dtype = tf.uint8) # shape = (current_batch_size,)
print("class_ids = \n{}".format(class_ids))
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.reduce_mean(
input_tensor = tf.nn.softmax_cross_entropy_with_logits_v2(
logits = ylogits, labels = labels))
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels = tf.argmax(input = labels, axis = 1), predictions = class_ids)}
if mode == tf.estimator.ModeKeys.TRAIN:
# This is needed for batch normalization, but has no effect otherwise
update_ops = tf.get_collection(key = tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = params["learning_rate"],
optimizer = "Adam")
else:
train_op = None
else:
loss = None
train_op = None
eval_metric_ops = None
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = {"probabilities": probabilities, "class_ids": class_ids},
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = {
"classes": tf.estimator.export.PredictOutput(
{"probabilities": probabilities,
"class_ids": class_ids})})
def serving_input_fn():
# Input will be rank 3
feature_placeholders = {
"image": tf.placeholder(dtype = tf.float64, shape = [None, HEIGHT, WIDTH])}
# But model function requires rank 4
features = {
"image": tf.expand_dims(input = feature_placeholders["image"], axis = -1)}
return tf.estimator.export.ServingInputReceiver(
features = features,
receiver_tensors = feature_placeholders)
def train_and_evaluate(output_dir, hparams):
# Ensure filewriter cache is clear for TensorBoard events file
tf.summary.FileWriterCache.clear()
EVAL_INTERVAL = 60
supervised_estimator = tf.estimator.Estimator(
model_fn = image_classifier,
params = hparams,
config = tf.estimator.RunConfig(
save_checkpoints_secs = EVAL_INTERVAL),
model_dir = output_dir)
train_spec = tf.estimator.TrainSpec(
input_fn = train_input_fn,
max_steps = hparams["train_steps"])
exporter = tf.estimator.LatestExporter(
name = "exporter",
serving_input_receiver_fn = serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = eval_input_fn,
steps = None,
exporters = exporter,
throttle_secs = EVAL_INTERVAL)
tf.estimator.train_and_evaluate(
estimator = supervised_estimator,
train_spec = train_spec,
eval_spec = eval_spec)
return supervised_estimator
hparams = {}
hparams["train_batch_size"] = 100
hparams["learning_rate"] = 0.01
hparams["train_steps"] = 1000
hparams["ksize1"] = 5
hparams["ksize2"] = 5
hparams["nfil1"] = 10
hparams["nfil2"] = 20
hparams["dprob"] = 0.1
hparams["batch_norm"] = False
hparams["model"] = "linear"
SUPERVISED_MODEL_DIR = "supervised_trained"
shutil.rmtree(path = SUPERVISED_MODEL_DIR, ignore_errors = True) # start fresh each time
supervised_estimator = train_and_evaluate(SUPERVISED_MODEL_DIR, hparams)
eval_metrics = supervised_estimator.evaluate(input_fn = eval_input_fn, steps = None)
# ## Now create semi-supervised model
percent_labeled = 0.01
number_of_train_examples = x_train.shape[0]
print("number_of_train_examples = {}".format(number_of_train_examples))
number_of_labeled_train_examples = int(number_of_train_examples * percent_labeled)
number_of_unlabeled_train_examples = number_of_train_examples - number_of_labeled_train_examples
print("number_of_labeled_train_examples = {} & number_of_unlabeled_train_examples = {}".format(number_of_labeled_train_examples, number_of_unlabeled_train_examples))
semi_supervised_labeled_x_train_original_arr = x_train[0:number_of_labeled_train_examples]
semi_supervised_labeled_y_train_original_arr = y_train[0:number_of_labeled_train_examples]
semi_supervised_unlabeled_x_train_original_arr = x_train[number_of_labeled_train_examples:]
print("semi_supervised_labeled_x_train_original_arr.shape = {}".format(semi_supervised_labeled_x_train_original_arr.shape))
print("semi_supervised_labeled_y_train_original_arr.shape = {}".format(semi_supervised_labeled_y_train_original_arr.shape))
print("semi_supervised_unlabeled_x_train_original_arr.shape = {}".format(semi_supervised_unlabeled_x_train_original_arr.shape))
# ## Create semi-supervised model using sparse labels
SEMI_SUPERVISED_MODEL_DIR = "semi_supervised_trained"
EVAL_INTERVAL = 30
semi_supervised_estimator = tf.estimator.Estimator(
model_fn = image_classifier,
params = hparams,
config = tf.estimator.RunConfig(
save_checkpoints_secs = EVAL_INTERVAL),
model_dir = SEMI_SUPERVISED_MODEL_DIR)
confidence_threshold = 0.99
# +
shutil.rmtree(path = SEMI_SUPERVISED_MODEL_DIR, ignore_errors = True) # start fresh each time
semi_supervised_labeled_x_train_arr = semi_supervised_labeled_x_train_original_arr
semi_supervised_labeled_y_train_arr = semi_supervised_labeled_y_train_original_arr
semi_supervised_unlabeled_x_train_arr = semi_supervised_unlabeled_x_train_original_arr
new_labeled_x_train_arr = np.zeros([1])
accuracy = 0.000001
old_accuracy = 0.0
loop_counter = 0
while semi_supervised_unlabeled_x_train_arr.shape[0] > 0 and new_labeled_x_train_arr.shape[0] > 0 and accuracy > old_accuracy:
print("\nloop_counter = {}, number_of_labeled_examples = {}, number_of_unlabeled_examples = {}\n".format(loop_counter, semi_supervised_labeled_x_train_arr.shape[0], semi_supervised_unlabeled_x_train_arr.shape[0]))
# Train on currently labeled data
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": semi_supervised_labeled_x_train_arr},
y = semi_supervised_labeled_y_train_arr,
batch_size = 32,
num_epochs = None,
shuffle = True)
semi_supervised_estimator.train(
input_fn = train_input_fn,
steps = 2000)
# Check evaluation metrics on held out evaluation set now that training is over
eval_metrics = semi_supervised_estimator.evaluate(
input_fn = eval_input_fn,
steps = None)
old_accuracy = accuracy
accuracy = eval_metrics["accuracy"]
# Now predict from the unlabeled set
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": semi_supervised_unlabeled_x_train_arr},
y = None,
batch_size = 512,
num_epochs = 1,
shuffle = False)
predictions = [prediction
for prediction in semi_supervised_estimator.predict(
input_fn = predict_input_fn)]
# Get the probabilities from the prediction list generated from the estimator
probabilities = np.array(object = [prediction["probabilities"] for prediction in predictions])
# Check if our predictions are above the confidence threshold
confidence_condition = np.amax(a = probabilities, axis = 1) > confidence_threshold
# Create array of the confidently prediction examples combining their features with the predicted probabilities
new_labeled_x_train_arr = semi_supervised_unlabeled_x_train_arr[confidence_condition]
new_labeled_y_train_arr = probabilities[confidence_condition]
semi_supervised_labeled_x_train_arr = np.concatenate(
seq = [semi_supervised_labeled_x_train_arr, new_labeled_x_train_arr], axis = 0)
semi_supervised_labeled_y_train_arr = np.concatenate(
seq = [semi_supervised_labeled_y_train_arr, new_labeled_y_train_arr], axis = 0)
# Remove the confident predictions leaving only the unconfident predictions to go another round through the loop
semi_supervised_unlabeled_x_train_arr = semi_supervised_unlabeled_x_train_arr[~confidence_condition]
loop_counter += 1
# -
# ## Use kmeans to improve results
# ### First use PCA to reduce the dimensionality going into kmeans
# +
number_of_dimensions = 10
s, u, v = tf.svd(
tensor = tf.convert_to_tensor(
value = x_train.reshape([-1, HEIGHT * WIDTH]),
dtype = tf.float32),
full_matrices = False,
compute_uv = True)
print("s = \n{}".format(s))
print("u = \n{}".format(u))
print("v = \n{}".format(v))
sigma = tf.diag(diagonal = s)
print("sigma = \n{}".format(sigma))
x_train_pca = tf.matmul(a = u, b = sigma[:, 0:number_of_dimensions])
print("x_train_pca = \n{}".format(x_train_pca))
with tf.Session() as sess:
x_train_pca_arr = sess.run(fetches = x_train_pca)
print("x_train_pca_arr.shape = \n{}".format(x_train_pca_arr.shape))
# -
KMEANS_MODEL_DIR = "kmeans_estimator"
# +
shutil.rmtree(path = KMEANS_MODEL_DIR, ignore_errors = True) # start fresh each time
def input_fn():
return tf.train.limit_epochs(
tensor = tf.convert_to_tensor(
value = x_train_pca_arr,
dtype = tf.float32),
num_epochs = 1)
num_clusters = 10
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters = num_clusters,
model_dir = KMEANS_MODEL_DIR,
initial_clusters = tf.contrib.factorization.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
use_mini_batch = True)
# Train
num_iterations = 30
previous_centers = None
for _ in range(num_iterations):
kmeans.train(input_fn = input_fn)
cluster_centers = kmeans.cluster_centers()
previous_centers = cluster_centers
print("cluster centers = \n{}".format(cluster_centers))
# Map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn = input_fn))
# -
cluster_indices_arr = np.array(object = cluster_indices)
cluster_indices_arr.shape
point_clusters_arr = cluster_centers[cluster_indices_arr, :]
point_clusters_arr.shape
squared_error_arr = (x_train_pca_arr - point_clusters_arr)**2
squared_error_arr.shape
squared_euclidean_distance = np.sum(a = squared_error_arr, axis = 1)
squared_euclidean_distance.shape
kmeans_df = pd.DataFrame({"cluster_id": cluster_indices_arr,
"squared_euclidean_distance": squared_euclidean_distance})
kmeans_df.head()
kmeans_df.groupby("cluster_id").count()
kmeans_df.groupby("cluster_id").mean()
kmeans_df.groupby("cluster_id")["squared_euclidean_distance"].nsmallest(n = 5)
closest_indices = np.array(
object = kmeans_df.groupby("cluster_id")["squared_euclidean_distance"].nsmallest(n = 100).index.get_level_values(1))
closest_indices.shape
# ## Try semi-supervised again
semi_supervised_labeled_x_train_original_arr = x_train[closest_indices]
semi_supervised_labeled_y_train_original_arr = y_train[closest_indices]
semi_supervised_unlabeled_x_train_original_arr = x_train[np.isin(
element = np.arange(number_of_train_examples),
test_elements = closest_indices,
assume_unique = True,
invert = True)]
print("semi_supervised_labeled_x_train_original_arr.shape = {}".format(semi_supervised_labeled_x_train_original_arr.shape))
print("semi_supervised_labeled_y_train_original_arr.shape = {}".format(semi_supervised_labeled_y_train_original_arr.shape))
print("semi_supervised_unlabeled_x_train_original_arr.shape = {}".format(semi_supervised_unlabeled_x_train_original_arr.shape))
# +
shutil.rmtree(path = SEMI_SUPERVISED_MODEL_DIR, ignore_errors = True) # start fresh each time
semi_supervised_labeled_x_train_arr = semi_supervised_labeled_x_train_original_arr
semi_supervised_labeled_y_train_arr = semi_supervised_labeled_y_train_original_arr
semi_supervised_unlabeled_x_train_arr = semi_supervised_unlabeled_x_train_original_arr
new_labeled_x_train_arr = np.zeros([1])
accuracy = 0.000001
old_accuracy = 0.0
loop_counter = 0
while semi_supervised_unlabeled_x_train_arr.shape[0] > 0 and new_labeled_x_train_arr.shape[0] > 0 and accuracy > old_accuracy:
print("\nloop_counter = {}, number_of_labeled_examples = {}, number_of_unlabeled_examples = {}\n".format(loop_counter, semi_supervised_labeled_x_train_arr.shape[0], semi_supervised_unlabeled_x_train_arr.shape[0]))
# Train on currently labeled data
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": semi_supervised_labeled_x_train_arr},
y = semi_supervised_labeled_y_train_arr,
batch_size = 32,
num_epochs = None,
shuffle = True)
semi_supervised_estimator.train(
input_fn = train_input_fn,
steps = 2000)
# Check evaluation metrics on held out evaluation set now that training is over
eval_metrics = semi_supervised_estimator.evaluate(
input_fn = eval_input_fn,
steps = None)
old_accuracy = accuracy
accuracy = eval_metrics["accuracy"]
# Now predict from the unlabeled set
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x = {"image": semi_supervised_unlabeled_x_train_arr},
y = None,
batch_size = 512,
num_epochs = 1,
shuffle = False)
predictions = [prediction
for prediction in semi_supervised_estimator.predict(
input_fn = predict_input_fn)]
# Get the probabilities from the prediction list generated from the estimator
probabilities = np.array(object = [prediction["probabilities"]
for prediction in predictions])
# Check if our predictions are above the confidence threshold
confidence_condition = np.amax(a = probabilities, axis = 1) > confidence_threshold
# Create array of the confidently prediction examples combining their features with the predicted probabilities
new_labeled_x_train_arr = semi_supervised_unlabeled_x_train_arr[confidence_condition]
new_labeled_y_train_arr = probabilities[confidence_condition]
semi_supervised_labeled_x_train_arr = np.concatenate(
seq = [semi_supervised_labeled_x_train_arr, new_labeled_x_train_arr], axis = 0)
semi_supervised_labeled_y_train_arr = np.concatenate(
seq = [semi_supervised_labeled_y_train_arr, new_labeled_y_train_arr], axis = 0)
# Remove the confident predictions leaving only the unconfident predictions to go another round through the loop
semi_supervised_unlabeled_x_train_arr = semi_supervised_unlabeled_x_train_arr[~confidence_condition]
loop_counter += 1
# -
|
courses/machine_learning/asl/open_project/semi_supervised_self_training/tf_semi_supervised_self_training_classification_mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ## Inputs
# ### subscript conventions
# subscript| meaning
# ---|---
# S | shaft
# H | hub
# O | outer
# I | inner
# R | room temp
# h | high temp
# Max | maximum
# Min | minimum
# f | is a function
# fg | is a function generator
R.Version()$nickname
# input parameters
nu_H <- 0.33 # Poisson's number for the hub
nu_S <- 0.35 # Poisson's number for the shaft
E_H <- 10e6 # psi, modulus of elasticity of the hub
E_S <- 29e6 # psi, modulus of elasticity of the shaft
mu_s <- 0.47 # unitless, static coefficient of friction (hub and shaft)
D_SOR <- 0.375 # inches, shaft outer diameter at room temp
D_HOR <- 0.650 # inches, hub outer diameter at room temp
D_SIR <- 0.120 # inches, shaft inner diameter at room temp
L <- 1 # inches, interface length
alpha_H <- 13.5e-6 # 1/degF, CTE fo the hub
alpha_S <- 5.6e-7 # 1/degF, CTE fo the shaft
T_R <- 70 # degF, expected working temp
delta_h <- -0.003 # inches
Gamma_Max_seed <- 100*12 # inch pounds, restraining torque
# ## main routines:
# +
# a fxn that will evaluate the routine in the current env.
mainCalc <- function() eval(mainQuote, parent.frame())
mainQuote <- quote({ # main routine to execute
# the interference pressure as a function of approximate desired
# restraining torque
p_torque_f <- function(Gamma_Max) 2*Gamma_Max/(mu_s*L*pi*D_SOR^2)
p_des <- p_torque_f(Gamma_Max=Gamma_Max_seed)
# some possible inner hub diameters (room temp)
D_HIR_vec <- seq(from=D_SOR-0.01, to=D_SOR, length.out=1e3)
# get the interference pressure from the diameters
p_int_f <- function(D_SO, D_SI, D_HO, D_HI)
(D_SO - D_HI) / (
D_HI/E_H*((D_HO^2+D_HI^2)/(D_HO^2-D_HI^2)+nu_H)
+D_SO/E_S*((D_SO^2+D_SI^2)/(D_SO^2-D_SI^2)-nu_S)
)
# some possible interference pressures
# concordant with hub inner diameters
p_vec <- p_int_f(D_SOR, D_SIR, D_HOR, D_HIR_vec)
# plot(D_HIR_vec, p_vec, type='l')
# abline(h=p_des)
# lazy root finding
D_HIR <- D_HIR_vec[which.min(abs(p_des-p_vec))]
cat('calculated inner hub diameter at room temp: ', D_HIR, '\n')
# round to nearest thou
D_HIR <- round(D_HIR*1e3)/1e3
# get temperature to have the desired assembly clearance
T_h_prescribed <- T_R+(delta_h-D_SOR+D_HIR)/(alpha_S*D_SOR-alpha_H*D_HIR)
# working interference (how hard will this be to machine?)
delta_R <- D_SOR-D_HIR
# actual restraining torque
Gamma_Max <- p_des*mu_s*L*pi*D_SOR^2/2
})
reportValues <- function()
{
cat('assembly clearance: ', -delta_h, '\n')
cat('working-temp interference: ', delta_R, '\n')
cat('hub inner diameter (room temp): ', D_HIR, '\n')
cat('shaft outer diameter (room temp): ', D_SOR, '\n')
cat('working temp restraining torque: ', Gamma_Max/12, ' foot pounds\n')
cat('required temp to achive assembly clearance: ', T_h_prescribed, '\n')
}
# -
# ## Cases:
# ### aluminum hub, steel shaft
alpha_H <- 13.5e-6 # 1/degF, CTE fo the hub
alpha_S <- 5.6e-7 # 1/degF, CTE fo the shaft
# #### very tight tolerances
# This temp is achievable with the oven in EB 480, but requires machining tolerances under 1 thou.
delta_h <- -0.001
Gamma_Max_seed <- 50*12
mainCalc()
reportValues()
# #### reasonable tolerances
# We could roll with machining tolerances of +/- 1 thou and use a torch to heat the parts.
# Although, it might be tricky to actually get uniformly heated soot-free parts.
delta_h <- -0.002
Gamma_Max_seed <- 200*12
mainCalc()
reportValues()
|
cad/module/jigs/shrinkFit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework 5 - Bayesian Networks
# **Name**: <NAME>
# **Student Number**: 96100114
# **Sources**: My brain, notes and computer
# ### Install & import the required packages
# You are to implement the algorithms yourself and forbided from the use of dedicated packages!
# !pip install numpy tqdm scipy seaborn matplotlib
import numpy as np
import random
import scipy as sp
from scipy.stats import norm
from tqdm import *
import seaborn as sns
import matplotlib.pyplot as plt
# ### Load input data
# `input.txt` contains a sample log of vahid's location and `ground_truth.txt` holds the initial state and the hidden state's correrct location at each time-frame, employ it to validate your findings.
with open('input.txt', 'r') as file:
n, k, mean_pace, t = file.readline().split(' ')
n, t, k, mean_pace = int(n), int(t), float(k), float(mean_pace)
sen_error_mean, sen_error_std, sen_xs, sen_ys = list(), list(), list(), list()
for i in range(n):
x,y, mu, sig = map(float, file.readline().split(' '))
sen_xs.append(x)
sen_ys.append(y)
sen_error_mean.append(mu)
sen_error_std.append(sig)
sen_error_mean = np.array(sen_error_mean)
sen_error_std = np.array(sen_error_std)
sens = np.stack([np.array(sen_xs), np.array(sen_ys)], axis=1)
distances = [np.array(list(map(float, file.readline().split(' ')))) for i in range(t)]
with open('ground_truth.txt', 'r') as file:
ground_truth = [np.array(list(map(float, line.split(' ')))) for line in file.readlines()]
# ### Implement utility functions
# Implement the utility functions required for particle filtering (the following strucutre is merely a suggestion and you can choose to discard it).
np.minimum([2, 3, 4, 5], 3)
def move(particles: np.array, mean_pace: float, reverse=False):
get_dir = lambda: np.array([2*np.random.randint(2)-1, 2*np.random.randint(2)-1]) * np.random.exponential(mean_pace)
return [p + get_dir() for p in particles]
def distance(particles: np.array, sensors: np.array):
return np.maximum(np.minimum([[np.linalg.norm(p-s) for p in particles] for s in sensors], 200), 0)
def weights(distances:np.array, gt_distances:np.array, sen_err_means:np.array, sen_err_stds:np.array):
ps = []
res = []
norms = [norm(mu, std) for mu, std in zip(sen_err_means, sen_err_stds)]
for n, dist, d in zip(norms, gt_distances, distances):
ps.append(n.pdf(dist - d))
ps = np.array(ps).transpose()
for p in ps:
res.append(np.prod(p))
return res
# return np.array([np.prod([[n.pdf()] for d, p, n in zip(distances, p_distances, norms)]) for diff in diffs])
def resample(liklihood:np.array, particles:np.array):
return random.choices(particles, weights=liklihood/sum(liklihood), k=len(liklihood))
# ## A. Particle Filtering
top_loc = []
all_loc = []
SAMPLES = 5000
ws = np.ones(SAMPLES)
particles = np.array([[np.random.randint(201), np.random.randint(201)] for _ in range(SAMPLES)])
pace = mean_pace
for d in tqdm(distances):
particles = move(particles, pace)
distances_p = distance(particles, sens)
ws = weights(d, distances_p, sen_error_mean, sen_error_std)
top_loc.append(particles[np.argmax(ws)])
all_loc = all_loc + particles
particles = resample(ws, particles)
pace *= k
top_loc = np.array(top_loc)
all_loc = np.array(all_loc)
tops = np.argsort(ws)[-3:]
print(particles[tops[0]], particles[tops[1]], particles[tops[2]])
print('ground truth', *ground_truth[-1])
# Elaborate on why your findings vary from the ground truth? (if they do)
#
# ## B. Plot your trajectory prediction
plt.figure(figsize=(20, 20))
diff = top_loc[1:] - top_loc[:-1]
# for i in range(len(top_loc) - 1):
# plt.arrow(*top_loc[i], *top_loc[i+1])
plt.quiver(top_loc[:,0], top_loc[:,1], diff[:, 0], diff[:,1], linewidths=1, headaxislength=3, scale=40, width=0.004)
sns.scatterplot(x=top_loc[:,0], y=top_loc[:,1], s=500, hue=1)
# Provide a short analysis of your resulting graph.
plt.figure(figsize=(20, 20))
sns.scatterplot(x=all_loc[:, 0], y=all_loc[:, 1], alpha=0.4)
# ## C. Predict the trajectory's initial point
init = ground_truth[-1]
SAMPLES = 5000
ws = np.ones(SAMPLES)
particles = np.array([init for _ in range(SAMPLES)])
pace_r = pace
for d in tqdm(reversed(distances), total=len(distances)):
particles = move(particles, pace_r)
distances_p = distance(particles, sens)
ws = weights(d, distances_p, sen_error_mean, sen_error_std)
particles = resample(ws, particles)
pace_r /= k
tops = np.argsort(ws)[-3:]
print(particles[tops[0]], particles[tops[1]], particles[tops[2]])
print('ground truth', *ground_truth[0])
# ## D.
# Elaborate on whether this process is always a possibility for HMMs?
# <div dir="rtl">
# خیر، در صورتی که توابع گذر از استیت به استیت بعد معکوس پذیر باشد، ممکن است و اگر معکوس پذیر نبوده یا پیچیدگی محاسباتی بالا داشته باشد، ممکن نیست.
# </div>
# ## E. Analysis of different k values
# Briefly explain what values of $k$ would eventually lead to better predictions?
# <div dir="rtl">
# به ازای مقادیر کوچکتر از یک چون به مرور مکان فرد به محل مشخصی میل می کند، مسئله خوب حل می شود و ذرات به مقدار مشخصی همگرا می شوند اما اگر از ۱ بیشتر باشد عملا در هر مرحله، یک جهش تصادفی با گام بلندتر به محلی تصادفی می کند.
# </div>
|
AI/AI HW5/Practical/Practical-Notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + id="312d1fc2"
# -*- coding: utf-8 -*-
import pymaterials as pm
import inventory
import shipping
import sales
import report
import pandas as pd
import constants as cn
# -
df, ro = pm.run()
df[(df['R-Avail'] < df['Reorder'] * 1.00 )]
df[(df['On Hand'] < df['Released'] * 4 ) | (df['R-Avail'] < df['Reorder'] * 1.25 )]
|
materials.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Income Prediction Explanations
#
# 
#
# We will use an SKLearn classifier built on the [1996 US Census DataSet](https://archive.ics.uci.edu/ml/datasets/adult) which predicts high (>50K$) or low (<=50K$) income based on the Census demographic data.
#
# The Kfserving resource provides:
# * A pretrained sklearn model stored on a Google bucket
# * A pretrained Tabular [Seldon Alibi](https://github.com/SeldonIO/alibi) Explainer. The training has taken samples of the training data and stored the categorical mapping to allow for human readable results. See the [Alibi Docs](https://docs.seldon.io/projects/alibi/en/stable/) for further details of training and setting up a model explainer for your data.
#
# ** For users of KFServing v0.3.0 please follow the [notebook for that branch](https://github.com/kubeflow/kfserving/blob/v0.3.0/docs/samples/explanation/alibi/income/income_explanations.ipynb).
# !pip install -r requirements.txt
# !pygmentize income.yaml
# !kubectl apply -f income.yaml
CLUSTER_IPS=!(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
CLUSTER_IP=CLUSTER_IPS[0]
print(CLUSTER_IP)
SERVICE_HOSTNAMES=!(kubectl get inferenceservice income -o jsonpath='{.status.url}' | cut -d "/" -f 3)
SERVICE_HOSTNAME=SERVICE_HOSTNAMES[0]
print(SERVICE_HOSTNAME)
import sys
sys.path.append('../')
from alibi_helper import *
from alibi.datasets import fetch_adult
adult = fetch_adult()
cmap = dict.fromkeys(adult.category_map.keys())
for key, val in adult.category_map.items():
cmap[key] = {i: v for i, v in enumerate(val)}
idxLow = 0
idxHigh = 32554
for idx in [idxLow,idxHigh]:
show_row([getFeatures([adult.data[idx]], cmap)],adult)
show_prediction(predict(adult.data[idx:idx+1].tolist(),"income",adult,SERVICE_HOSTNAME,CLUSTER_IP))
# ## Get Explanation for Low Income Prediction
exp = explain(adult.data[idxLow:idxLow+1].tolist(),"income",SERVICE_HOSTNAME,CLUSTER_IP)
show_anchors(exp['data']['anchor'])
# Show precision. How likely predictions using the Anchor features would produce the same result.
show_bar([exp['data']['precision']],[''],"Precision")
show_bar([exp['data']['coverage']],[''],"Coverage")
show_feature_coverage(exp['data'])
show_examples(exp['data'],0,adult)
show_examples(exp['data'],0,adult,False)
# ## Get Explanation for High Income Example
exp = explain(adult.data[idxHigh:idxHigh+1].tolist(),"income", SERVICE_HOSTNAME,CLUSTER_IP)
show_anchors(exp['data']['anchor'])
# Show precision. How likely predictions using the Anchor features would produce the same result.
show_bar([exp['data']['precision']],[''],"Precision")
show_bar([exp['data']['coverage']],[''],"Coverage")
show_feature_coverage(exp['data'])
show_examples(exp['data'],0,adult)
show_examples(exp['data'],0,adult,False)
# ## Teardown
# !kubectl delete -f income.yaml
|
docs/samples/explanation/alibi/income/income_explanations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:gae]
# language: python
# name: conda-env-gae-py
# ---
# # Network Visualizations and Statistics
# ---
# 1. Use matplotlib to generate **visualizations** of each Facebook ego network and each randomly generated NetworkX network. Save the visualizations in `.pdf` files in `visualizations` folder.
# 2. Use networkx to **analyze** each Facebook ego network and each randomly generated NetworkX network. Save the results as dictionaries in `.pkl` files in `network-statistics` folder. (Numbers to crunch: Num. connected components, Num. nodes, Num. edges, Density, Diameter, Avg. clustering coefficient, Avg. degree, Transitivity)
import networkx as nx
import matplotlib.pyplot as plt
import pickle
from math import log
# +
def save_visualization(g, file_name, title):
plt.figure(figsize=(18,18))
degrees = nx.degree(g)
# Draw networkx graph -- scale node size by log(degree+1)
nx.draw_spring(g, with_labels=False,
linewidths=2.0,
nodelist=degrees.keys(),
node_size=[log(degree_val+1) * 100 for degree_val in degrees.values()], \
node_color='r')
# Create black border around node shapes
ax = plt.gca()
ax.collections[0].set_edgecolor("#000000")
# plt.title(title)
plt.savefig(file_name)
plt.clf()
# -
def get_network_statistics(g):
num_connected_components = nx.number_connected_components(g)
num_nodes = nx.number_of_nodes(g)
num_edges = nx.number_of_edges(g)
density = nx.density(g)
avg_clustering_coef = nx.average_clustering(g)
avg_degree = sum(g.degree().values()) / float(num_nodes)
transitivity = nx.transitivity(g)
if num_connected_components == 1:
diameter = nx.diameter(g)
else:
diameter = None # infinite path length between connected components
network_statistics = {
'num_connected_components':num_connected_components,
'num_nodes':num_nodes,
'num_edges':num_edges,
'density':density,
'diameter':diameter,
'avg_clustering_coef':avg_clustering_coef,
'avg_degree':avg_degree,
'transitivity':transitivity
}
return network_statistics
def save_network_statistics(g, file_name):
network_statistics = get_network_statistics(g)
with open(file_name, 'wb') as f:
pickle.dump(network_statistics, f)
# ## Facebook Ego Networks
# +
FB_EGO_USERS = [0, 107, 1684, 1912, 3437, 348, 3980, 414, 686, 698]
# Individual ego networks
for user in FB_EGO_USERS:
network_dir = './fb-processed/{0}-adj-feat.pkl'.format(user)
with open(network_dir, 'rb') as f:
adj, features = pickle.load(f)
G = nx.Graph(adj)
visualization_file_name = './visualizations/fb-ego-{0}-visualization.pdf'.format(user)
statistics_file_name = './network-statistics/fb-ego-{0}-statistics.pkl'.format(user)
title = 'Facebook Ego Network: ' + str(user)
save_visualization(G, visualization_file_name, title)
save_network_statistics(G, statistics_file_name)
# -
# Combined FB network
combined_dir = './fb-processed/combined-adj-sparsefeat.pkl'
with open(combined_dir, 'rb') as f:
adj, features = pickle.load(f)
G = nx.Graph(adj)
visualization_file_name = './visualizations/fb-combined-visualization.pdf'
statistics_file_name = './network-statistics/fb-combined-statistics.pkl'
title = 'Facebook Ego Networks: Combined'
save_visualization(G, visualization_file_name, title)
save_network_statistics(G, statistics_file_name)
# ## NetworkX Random Networks
# +
RANDOM_SEED = 0
# Dictionary to store all nx graphs
nx_graphs = {}
# Small graphs
N_SMALL = 200
nx_graphs['er-small'] = nx.erdos_renyi_graph(n=N_SMALL, p=.02, seed=RANDOM_SEED) # Erdos-Renyi
nx_graphs['ws-small'] = nx.watts_strogatz_graph(n=N_SMALL, k=5, p=.1, seed=RANDOM_SEED) # Watts-Strogatz
nx_graphs['ba-small'] = nx.barabasi_albert_graph(n=N_SMALL, m=2, seed=RANDOM_SEED) # Barabasi-Albert
nx_graphs['pc-small'] = nx.powerlaw_cluster_graph(n=N_SMALL, m=2, p=.02, seed=RANDOM_SEED) # Powerlaw Cluster
nx_graphs['sbm-small'] = nx.random_partition_graph(sizes=[N_SMALL/10]*10, p_in=.1, p_out=.01, seed=RANDOM_SEED) # Stochastic Block Model
# Larger graphs
N_LARGE = 1000
nx_graphs['er-large'] = nx.erdos_renyi_graph(n=N_LARGE, p=.01, seed=RANDOM_SEED) # Erdos-Renyi
nx_graphs['ws-large'] = nx.watts_strogatz_graph(n=N_LARGE, k=3, p=.1, seed=RANDOM_SEED) # Watts-Strogatz
nx_graphs['ba-large'] = nx.barabasi_albert_graph(n=N_LARGE, m=2, seed=RANDOM_SEED) # Barabasi-Albert
nx_graphs['pc-large'] = nx.powerlaw_cluster_graph(n=N_LARGE, m=2, p=.02, seed=RANDOM_SEED) # Powerlaw Cluster
nx_graphs['sbm-large'] = nx.random_partition_graph(sizes=[N_LARGE/10]*10, p_in=.05, p_out=.005, seed=RANDOM_SEED) # Stochastic Block Model
# -
# Remove isolates from random graphs
for g_name, nx_g in nx_graphs.iteritems():
isolates = nx.isolates(nx_g)
if len(isolates) > 0:
for isolate_node in isolates:
nx_graphs[g_name].remove_node(isolate_node)
for name, g in nx_graphs.iteritems():
if nx.number_connected_components(g) > 1:
print 'Unconnected graph: ', name
visualization_file_name = './visualizations/{0}-visualization.pdf'.format(name)
statistics_file_name = './network-statistics/{0}-statistics.pkl'.format(name)
title = "Random NetworkX Graph: " + name
save_visualization(g, visualization_file_name, title)
save_network_statistics(g, statistics_file_name)
|
network-visualizations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def getRow(rowIndex):
if rowIndex == 0:
return [1]
elif rowIndex == 1:
return [1, 1]
a = [[1], [1, 1]]
for i in range(2, rowIndex + 1):
s = a[i - 1]
arr = [1]
for j in range(len(s) - 1):
sum = s[j] + s[j + 1]
arr.append(sum)
arr.append(1)
a.append(arr)
return a[-1]
getRow(5)
# -
|
Anjani/Leetcode/Array/Pascal's Triangle II.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Function rgb2hsv
# ## Synopse
# Convert RGB to HSV image.
#
# - **g = rgb2hsv(rgb_img) **
#
# - **g**: Image.
# - **rgb_img**: Image rgb.
# +
def rgb2hsv(rgb_img):
import numpy as np
r = rgb_img[:,:,0].ravel()
g = rgb_img[:,:,1].ravel()
b = rgb_img[:,:,2].ravel()
hsv_map = map(rgb2hsvmap, r, g, b)
hsv_img = np.array(list(hsv_map)).reshape(rgb_img.shape)
return hsv_img
def rgb2hsvmap(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
#tonalidade variando de 0 a 360, indicando o ângulo no circulo aonde a tonalidade (H) está definido,
#e a saturação e o brilho variando de 0.0 a 1.0, representando o menor e o maior valor possível
return h, s, v
#adapted from python source code (Lib/colorsys.py - rgb_to_hsv(r, g, b))
# -
# ## Description
# Returns a image using HSV color model: the H (Hue), S (Saturation), V (Value) of a RGB image. The HSV model is a representation in cylindrical coordinates of the points of the RGB version.
# ## Examples
# - **Example 1**
testing = (__name__ == "__main__")
if testing:
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import matplotlib.colors as mcolor
rgb_img = mpimg.imread('../data/boat.tif')
print('rgb_img.shape: ' , rgb_img.shape)
plt.figure(1)
plt.title('RGB')
plt.imshow(rgb_img)
r = rgb_img[:,:,0].ravel()
g = rgb_img[:,:,1].ravel()
b = rgb_img[:,:,2].ravel()
print('r: ', r)
print('g: ',g)
print('b: ',b)
hsv_img = rgb2hsv(rgb_img)
print('hsv_img.shape: ' , hsv_img.shape)
h = hsv_img[:,:,0].ravel()
s = hsv_img[:,:,1].ravel()
v = hsv_img[:,:,2].ravel()
plt.figure(2)
plt.title('rgb2hsv')
plt.imshow(hsv_img)
mcolor_hsv = mcolor.rgb_to_hsv(rgb_img)
plt.figure(3)
plt.title('mcolor.rgb_to_hsv')
plt.imshow(mcolor_hsv)
print('h: ', h)
print('s: ',s)
print('v: ',v)
# - **Example 2**
if testing:
import sys,os
ea979path = os.path.abspath('../../')
if ea979path not in sys.path:
sys.path.append(ea979path)
import ea979.src as ia
pixels = np.array([
[[243., 114., 25.],
[111., 19., 115.],
[43., 188., 69.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
])
print("Shape: ", pixels.shape)
hsv_img = rgb2hsv(pixels)
fig = plt.figure(figsize=(10, 10))
fig.add_subplot(1, 3, 1)
plt.imshow(pixels)
plt.title('Original image RGB')
fig.add_subplot(1, 3, 2)
plt.imshow(hsv_img, cmap='hsv')
plt.title('Image HSV rgb2hsv')
mcolor_hsv = mcolor.rgb_to_hsv(pixels.copy())
fig.add_subplot(1, 3, 3)
plt.imshow(mcolor_hsv, cmap='hsv')
plt.title('Image HSV mcolor')
plt.show()
# ## Contributions
#
# <NAME>, 1st semester 2017
|
src/rgb2hsv.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Solution:
def nthUglyNumber(self, n: int) -> int:
dp, a, b, c = [1] * n, 0, 0, 0
for i in range(1, n):
n2, n3, n5 = dp[a] * 2, dp[b] * 3, dp[c] * 5
dp[i] = min(n2, n3, n5)
if dp[i] == n2: a += 1
if dp[i] == n3: b += 1
if dp[i] == n5: c += 1
return dp[-1]
# +
# 好好看题解
# -
|
剑指offer/.ipynb_checkpoints/#49-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2.7
# language: python
# name: python_2.7
# ---
# # Test suite for Jupyter-notebook
# Sample example of use of PyCOMPSs from Jupyter
# ## First step
# Import ipycompss library
import pycompss.interactive as ipycompss
# ## Second step
# Initialize COMPSs runtime
# Parameters indicates if the execution will generate task graph, tracefile, monitor interval and debug information. The parameter taskCount is a work around for the dot generation of the legend
ipycompss.start(graph=True, trace=True, debug=True, project_xml='../project.xml', resources_xml='../resources.xml')
# ## Third step
# Import task module before annotating functions or methods
from pycompss.api.task import task
# ## Fourth step
# Declare functions and decorate with @task those that should be tasks
@task(returns=int)
def test(val1):
return val1 * val1
@task(returns=int)
def test2(val2, val3):
return val2 + val3
# ## Fifth step
# Invoke tasks
a = test(2)
b = test2(a, 5)
# ## Sixt step
# Import compss_wait_on module and synchronize tasks
from pycompss.api.api import compss_wait_on
result = compss_wait_on(b)
# ### Only those results being sychronized with compss_wait_on will have a valid value
print("Results: ")
print("a: ", a)
print("b: ", b)
print("result: ", result)
# ### Stop COMPSs runtime. All data will be synchronized in the main program
ipycompss.stop(sync=True)
print("Results after stopping PyCOMPSs: ")
print("a: ", a)
print("b: ", b)
print("result: ", result)
# ### CHECK THE RESULTS FOR THE TEST
from pycompss.runtime.binding import Future
if a == 4 and isinstance(b, Future) and result == 9:
print("RESULT=EXPECTED")
else:
print("RESULT=UNEXPECTED")
|
tests/sources/python/9_jupyter_notebook/src/simple.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
import scipy.stats as st
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities_weather.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
#Creating URLs
base_url = "http://api.openweathermap.org/data/2.5/weather?"
units = "imperial"
query_url = f"{base_url}appid={weather_api_key}&units={units}&q="
#Print statement
print("Beginning Data Retrieval")
print ("---------------------------------------")
#Variables to hold data, and record & set counters for loop to match output
lat = []
lng = []
max_temp = []
humidity = []
cloudiness = []
wind_speed = []
city_name = []
country = []
date = []
record_counter = 0
set_counter = 0
records = []
sets = []
#Testing API here
response = requests.get(query_url + "Indianapolis").json()
response
#Beginning loop for cities
for city in cities:
response = requests.get(query_url + city).json()
if record_counter < 50:
record_counter += 1
else:
set_counter += 1
record_counter = 0
print (f'Processing record {record_counter} of set {set_counter} : {city.capitalize()}')
#Try fields to match output file columns for easier reading
try:
response = requests.get(query_url + city).json()
city_name.append(response["name"])
cloudiness.append(response["clouds"]["all"])
country.append(response["sys"]["country"])
date.append(response["dt"])
humidity.append(response["main"]["humidity"])
lat.append(response["coord"]["lat"])
lng.append(response["coord"]["lon"])
max_temp.append(response['main']['temp_max'])
wind_speed.append(response["wind"]["speed"])
#Exception to handle error if city is not found in API
except:
print(f"City not found. Skipping...")
pass
print("---------------------------------------")
print("Data Retrieval Complete")
print("---------------------------------------")
# -
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# +
#Create DataFrame to house data
weather_df = pd.DataFrame({
"City" : city_name,
"Cloudiness" : cloudiness,
"Country" : country,
"Date" : date,
"Humidity" : humidity,
"Lat" : lat,
"Lng" : lng,
"Max Temp" : max_temp,
"Wind Speed" : wind_speed,
})
weather_df
# -
#Ouput of DataFrame to csv file
weather_df.to_csv("../output_data/cities_weather.csv", index = False)
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
# +
# Get the indices of cities that have humidity over 100%.
over_100 = weather_df.loc[weather_df['Humidity'] > 100 ]
over_100
#No Cities have > 100% humidity
# +
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# This Step not needed with this dataset
# -
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
# +
#Creating Plot
plt.scatter(weather_df["Lat"], weather_df["Max Temp"], edgecolor = 'black')
plt.title("Latitude vs Max Temperature on 1/24/21")
plt.xlabel("Latitude")
plt.ylabel("Temperature (F)")
plt.grid(linestyle= '-')
#Saving Plot Image
plt.savefig("../Images/Latitude vs Temperature.png")
# -
# Temperatures hold steady between -40 and 20 latitudes, but start to regress after 20 Latitude.
# ## Latitude vs. Humidity Plot
# +
#Creating Plot
plt.scatter(weather_df["Lat"], weather_df["Humidity"], edgecolor = 'black')
plt.title("Latitude vs Humidity on 1/24/21")
plt.xlabel("Latitude")
plt.ylabel("Humidity %")
plt.grid(linestyle= '-')
#Saving Plot Image
plt.savefig("../Images/Latitude vs Humidity.png")
# -
# There does not appear to be any correlation between latitude and humidity percent.
# ## Latitude vs. Cloudiness Plot
# +
#Creating Plot
plt.scatter(weather_df["Lat"], weather_df["Cloudiness"], edgecolor = 'black')
plt.title("Latitude vs Cloudiness on 1/24/21")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness %")
plt.grid(linestyle= '-')
#Saving Plot Image
plt.savefig("../Images/Latitude vs Cloudiness.png")
# -
# No relation between latitude and cloudiness it seems.
# ## Latitude vs. Wind Speed Plot
# +
#Creating Plot
plt.scatter(weather_df["Lat"], weather_df["Wind Speed"], edgecolor = 'black')
plt.title("Latitude vs Wind Speed on 1/24/21")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
plt.grid(linestyle= '-')
plt.ylim(0,40)
#Saving Plot Image
plt.savefig("../Images/Latitude vs Wind Speed.png")
# -
# It was relatively mild winds on 1/24/21 for all cities, with most falling between 0-15 mph for the random city sample.
# ## Linear Regression
# +
#Splitting data into north & south hemispheres
northern_hemisphere = weather_df.loc[weather_df["Lat"] >= 0] #389 count
southern_hemisphere = weather_df.loc[weather_df["Lat"] < 0] #181 count
#Making life easy and creating a function for linear regression
#Also came back to add scatter plot here as well
def LinearRegression(x_values, y_values):
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_values,y_values)
line_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x +" + str(round(intercept,2))
plt.scatter(x_values, y_values)
plt.plot (x_values,line_values,"r-")
#Labels for plots
plt.xlabel('Latitude')
plt.ylabel(str(y_values.name))
plt.annotate(line_eq,(0,0), color="red")
print(f"The r-squared value is: {rvalue}")
plt.show()
# -
import scipy.stats as st
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
#Create plot & line
x_values = northern_hemisphere["Lat"]
y_values = northern_hemisphere["Max Temp"]
LinearRegression(x_values, y_values)
plt.title("Northern Hemisphere - Latitude vs Temp")
#Save Image of plot & line
plt.savefig("../Images/Northern Hemisphere Latitude vs Max Temp.png")
# There is a strong negative correlation in the northern hemisphere between latitude and temperature.
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
#Create plot & line
x_values = southern_hemisphere["Lat"]
y_values = southern_hemisphere["Max Temp"]
LinearRegression(x_values, y_values)
plt.title("Southern Hemisphere - Latitude vs Temp")
#Save Image of plot & line
plt.savefig("../Images/Southern Hemisphere Latitude vs Max Temp.png")
# -
# There is a moderate positive correleation between latitude and temperature in the southern hemisphere.
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
#Create plot & line
x_values = northern_hemisphere["Lat"]
y_values = northern_hemisphere["Humidity"]
LinearRegression(x_values, y_values)
plt.title("Northern Hemisphere - Latitude vs Humidity")
#Save Image of plot & line
plt.savefig("../Images/Northern Hemisphere Latitude vs Humidity.png")
# -
# There is a moderate positive correleation between latitude and humidity in the northern hemisphere.
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
x_values = southern_hemisphere["Lat"]
y_values = southern_hemisphere["Humidity"]
LinearRegression(x_values, y_values)
plt.title("Southern Hemisphere - Latitude vs Humidity")
#Save Image of plot & line
plt.savefig("../Images/Southern Hemisphere Latitude vs Humidity.png")
# There is a moderate positive correleation between latitude and humidity in the southern hemisphere.
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = northern_hemisphere["Lat"]
y_values = northern_hemisphere["Cloudiness"]
LinearRegression(x_values, y_values)
plt.title("Northern Hemisphere - Latitude vs Cloudiness")
#Save Image of plot & line
plt.savefig("../Images/Northern Hemisphere Latitude vs Cloudiness.png")
# -
# There is a slight positive correleation between latitude and cloudiness in the northern hemisphere.
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
x_values = southern_hemisphere["Lat"]
y_values = southern_hemisphere["Cloudiness"]
LinearRegression(x_values, y_values)
plt.title("Southern Hemisphere - Latitude vs Cloudiness")
#Save Image of plot & line
plt.savefig("../Images/Southern Hemisphere Latitude vs Cloudiness.png")
# There is a slight positive correleation between latitude and cloudiness in the southern hemisphere.
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
x_values = northern_hemisphere["Lat"]
y_values = northern_hemisphere["Wind Speed"]
LinearRegression(x_values, y_values)
plt.title("Northern Hemisphere - Latitude vs Wind Speed")
#Save Image of plot & line
plt.savefig("../Images/Northern Hemisphere Latitude vs Wind Speed.png")
# There is no correlation between wind speed and latitude in the northern hemisphere.
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
x_values = southern_hemisphere["Lat"]
y_values = southern_hemisphere["Wind Speed"]
LinearRegression(x_values, y_values)
plt.title("Southern Hemisphere - Latitude vs Wind Speed")
#Save Image of plot & line
plt.savefig("../Images/Southern Hemisphere Latitude vs Wind Speed.png")
# There is a moderate negative correlation between latitude and wind speed in the southern hemisphere.
|
starter_code/WeatherPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Estadística con Python
#
# ### GitHub repository: https://github.com/jorgemauricio/python_statistics
#
# ### Instructor: <NAME>
# ## Covarianza y Correlación
#
# La covarianza mide como dos variables pueden variar entre si de acuerdo a sus medias
#
# Tomemos como ejemplo la base de datos de las estaciones del Estado de Aguascalientes.
#
# Correlación = 1: Correlación perfecta positiva
# Correlación = 0: No correlación
# Correlación = -1: Correlación perfecta negativa
#librerías
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv("data/estado1.csv")
df.head()
X = np.array(df["tmed"])
y = np.array(df["humr"])
np.corrcoef(X,y)
plt.scatter(X,y)
X = np.array(df["tmed"])
y = np.array(df["tmax"])
np.corrcoef(X,y)
plt.scatter(X,y)
X = np.array(df["prec"])
y = np.array(df["humr"])
np.corrcoef(X,y)
plt.scatter(X,y)
|
Covarianza_Correlacion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: widgets-tutorial
# language: python
# name: widgets-tutorial
# ---
# # ipympl: The Matplotlib Jupyter Widget Backend
#
# ## https://github.com/matplotlib/jupyter-matplotlib
#
#
# Enabling interaction with matplotlib charts in the Jupyter notebook and JupyterLab
#
# - BSD-3-Clause
#
# **Installation:**
#
# ```bash
# conda install -c conda-forge ipympl
# ```
# Enabling the `widget` backend. This requires jupyter-matplotlib a.k.a. ipympl. ipympl can be install via pip or conda.
# %matplotlib widget
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import VBox, FloatSlider
# When using the `widget` backend from ipympl, fig.canvas is a proper Jupyter interactive widget, which can be embedded in Layout classes like HBox and Vbox.
#
# One can bound figure attributes to other widget values.
# +
plt.ioff()
plt.clf()
slider = FloatSlider(
value=1.0,
min=0.02,
max=2.0
)
fig1 = plt.figure(1)
x1 = np.linspace(0, 20, 500)
lines = plt.plot(x1, np.sin(slider.value * x1))
def update_lines(change):
lines[0].set_data(x1, np.sin(change.new * x1))
fig1.canvas.draw()
fig1.canvas.flush_events()
slider.observe(update_lines, names='value')
VBox([slider, fig1.canvas])
# +
from mpl_toolkits.mplot3d import axes3d
fig2 = plt.figure(2)
ax = fig2.add_subplot(111, projection='3d')
# Grab some test data.
X, Y, Z = axes3d.get_test_data(0.05)
# Plot a basic wireframe.
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
fig2.canvas
# +
np.random.seed(0)
n_bins = 10
x2 = np.random.randn(1000, 3)
fig3, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
colors = ['red', 'tan', 'lime']
ax0.hist(x2, n_bins, density=1, histtype='bar', color=colors, label=colors)
ax0.legend(prop={'size': 10})
ax0.set_title('bars with legend')
ax1.hist(x2, n_bins, density=1, histtype='bar', stacked=True)
ax1.set_title('stacked bar')
ax2.hist(x2, n_bins, histtype='step', stacked=True, fill=False)
ax2.set_title('stack step (unfilled)')
# Make a multiple-histogram of data-sets with different length.
x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]]
ax3.hist(x_multi, n_bins, histtype='bar')
ax3.set_title('different sample sizes')
fig3.tight_layout()
fig3.canvas
# -
|
notebooks/10.14-ipympl.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn import svm
from sklearn.externals import joblib
import numpy as np
# customer ages
X_train = np.array([50, 17, 35, 23, 28, 40, 31, 29, 19, 62])
X_train = X_train.reshape(-1, 1)
# churn y/n
y_train = ["yes", "no", "no", "no", "yes", "yes", "yes", "no", "no", "yes"]
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(X_train, y_train)
joblib.dump(value=clf, filename="churn-model.pkl")
# -
from azure_subscription import subscription_id
ws_name='azure-pune-meet'
res_gp='workshop'
# +
# from azureml.core import Workspace
# ws = Workspace.create(name=ws_name,
# subscription_id=subscription_id,
# resource_group=res_gp,
# create_resource_group=True,
# location='eastus'
# )
# -
from azureml.core import Workspace
ws = Workspace.get(name=ws_name, subscription_id=subscription_id, resource_group=res_gp)
from azureml.core.model import Model
model = Model.register(workspace=ws, model_path="churn-model.pkl", model_name="churn-model-test")
|
1a_train_submit_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="bD6DUkgzmFoR"
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# + [markdown] colab_type="text" id="Jj6j6__ZmFoW"
# # Absolute camera orientation given set of relative camera pairs
#
# This tutorial showcases the `cameras`, `transforms` and `so3` API.
#
# The problem we deal with is defined as follows:
#
# Given an optical system of $N$ cameras with extrinsics $\{g_1, ..., g_N | g_i \in SE(3)\}$, and a set of relative camera positions $\{g_{ij} | g_{ij}\in SE(3)\}$ that map between coordinate frames of randomly selected pairs of cameras $(i, j)$, we search for the absolute extrinsic parameters $\{g_1, ..., g_N\}$ that are consistent with the relative camera motions.
#
# More formally:
# $$
# g_1, ..., g_N =
# {\arg \min}_{g_1, ..., g_N} \sum_{g_{ij}} d(g_{ij}, g_i^{-1} g_j),
# $$,
# where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras $g_i$ and $g_j$.
#
# Visually, the problem can be described as follows. The picture below depicts the situation at the beginning of our optimization. The ground truth cameras are plotted in purple while the randomly initialized estimated cameras are plotted in orange:
# 
#
# Our optimization seeks to align the estimated (orange) cameras with the ground truth (purple) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows:
# 
#
# In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \in SO(3); T \in \mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exp_map`) of the axis-angle representation of the rotation `log_R_absolute`.
#
# Note that the solution to this problem could only be recovered up to an unknown global rigid transformation $g_{glob} \in SE(3)$. Thus, for simplicity, we assume knowledge of the absolute extrinsics of the first camera $g_0$. We set $g_0$ as a trivial camera $g_0 = (I, \vec{0})$.
#
# + [markdown] colab_type="text" id="nAQY4EnHmFoX"
# ## 0. Install and Import Modules
# + [markdown] colab_type="text" id="WAHR1LMJmP-h"
# Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:
# + colab={"base_uri": "https://localhost:8080/", "height": 717} colab_type="code" id="uo7a3gdImMZx" outputId="bf07fd03-dec0-4294-b2ba-9cf5b7333672"
import os
import sys
import torch
need_pytorch3d=False
try:
import pytorch3d
except ModuleNotFoundError:
need_pytorch3d=True
if need_pytorch3d:
if torch.__version__.startswith("1.10.") and sys.platform.startswith("linux"):
# We try to install PyTorch3D via a released wheel.
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
# !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
else:
# We try to install PyTorch3D from source.
# !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
# !tar xzf 1.10.0.tar.gz
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
# !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UgLa7XQimFoY" outputId="16404f4f-4c7c-4f3f-b96a-e9a876def4c1"
# imports
import torch
from pytorch3d.transforms.so3 import (
so3_exp_map,
so3_relative_angle,
)
from pytorch3d.renderer.cameras import (
SfMPerspectiveCameras,
)
# add path for demo utils
import sys
import os
sys.path.append(os.path.abspath(''))
# set for reproducibility
torch.manual_seed(42)
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
print("WARNING: CPU only, this will be slow!")
# + [markdown] colab_type="text" id="u4emnRuzmpRB"
# If using **Google Colab**, fetch the utils file for plotting the camera scene, and the ground truth camera positions:
# + colab={"base_uri": "https://localhost:8080/", "height": 391} colab_type="code" id="kOvMPYJdmd15" outputId="9f2a601b-891b-4cb6-d8f6-a444f7829132"
# !wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/camera_visualization.py
from camera_visualization import plot_camera_scene
# !mkdir data
# !wget -P data https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/data/camera_graph.pth
# + [markdown] colab_type="text" id="L9WD5vaimw3K"
# OR if running **locally** uncomment and run the following cell:
# + colab={} colab_type="code" id="ucGlQj5EmmJ5"
# from utils import plot_camera_scene
# + [markdown] colab_type="text" id="7WeEi7IgmFoc"
# ## 1. Set up Cameras and load ground truth positions
# + colab={} colab_type="code" id="D_Wm0zikmFod"
# load the SE3 graph of relative/absolute camera positions
camera_graph_file = './data/camera_graph.pth'
(R_absolute_gt, T_absolute_gt), \
(R_relative, T_relative), \
relative_edges = \
torch.load(camera_graph_file)
# create the relative cameras
cameras_relative = SfMPerspectiveCameras(
R = R_relative.to(device),
T = T_relative.to(device),
device = device,
)
# create the absolute ground truth cameras
cameras_absolute_gt = SfMPerspectiveCameras(
R = R_absolute_gt.to(device),
T = T_absolute_gt.to(device),
device = device,
)
# the number of absolute camera positions
N = R_absolute_gt.shape[0]
# + [markdown] colab_type="text" id="-f-RNlGemFog"
# ## 2. Define optimization functions
#
# ### Relative cameras and camera distance
# We now define two functions crucial for the optimization.
#
# **`calc_camera_distance`** compares a pair of cameras. This function is important as it defines the loss that we are minimizing. The method utilizes the `so3_relative_angle` function from the SO3 API.
#
# **`get_relative_camera`** computes the parameters of a relative camera that maps between a pair of absolute cameras. Here we utilize the `compose` and `inverse` class methods from the PyTorch3D Transforms API.
# + colab={} colab_type="code" id="xzzk88RHmFoh"
def calc_camera_distance(cam_1, cam_2):
"""
Calculates the divergence of a batch of pairs of cameras cam_1, cam_2.
The distance is composed of the cosine of the relative angle between
the rotation components of the camera extrinsics and the l2 distance
between the translation vectors.
"""
# rotation distance
R_distance = (1.-so3_relative_angle(cam_1.R, cam_2.R, cos_angle=True)).mean()
# translation distance
T_distance = ((cam_1.T - cam_2.T)**2).sum(1).mean()
# the final distance is the sum
return R_distance + T_distance
def get_relative_camera(cams, edges):
"""
For each pair of indices (i,j) in "edges" generate a camera
that maps from the coordinates of the camera cams[i] to
the coordinates of the camera cams[j]
"""
# first generate the world-to-view Transform3d objects of each
# camera pair (i, j) according to the edges argument
trans_i, trans_j = [
SfMPerspectiveCameras(
R = cams.R[edges[:, i]],
T = cams.T[edges[:, i]],
device = device,
).get_world_to_view_transform()
for i in (0, 1)
]
# compose the relative transformation as g_i^{-1} g_j
trans_rel = trans_i.inverse().compose(trans_j)
# generate a camera from the relative transform
matrix_rel = trans_rel.get_matrix()
cams_relative = SfMPerspectiveCameras(
R = matrix_rel[:, :3, :3],
T = matrix_rel[:, 3, :3],
device = device,
)
return cams_relative
# + [markdown] colab_type="text" id="Ys9J7MbMmFol"
# ## 3. Optimization
# Finally, we start the optimization of the absolute cameras.
#
# We use SGD with momentum and optimize over `log_R_absolute` and `T_absolute`.
#
# As mentioned earlier, `log_R_absolute` is the axis angle representation of the rotation part of our absolute cameras. We can obtain the 3x3 rotation matrix `R_absolute` that corresponds to `log_R_absolute` with:
#
# `R_absolute = so3_exp_map(log_R_absolute)`
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="iOK_DUzVmFom" outputId="4195bc36-7b84-4070-dcc1-d3abb1e12031"
# initialize the absolute log-rotations/translations with random entries
log_R_absolute_init = torch.randn(N, 3, dtype=torch.float32, device=device)
T_absolute_init = torch.randn(N, 3, dtype=torch.float32, device=device)
# furthermore, we know that the first camera is a trivial one
# (see the description above)
log_R_absolute_init[0, :] = 0.
T_absolute_init[0, :] = 0.
# instantiate a copy of the initialization of log_R / T
log_R_absolute = log_R_absolute_init.clone().detach()
log_R_absolute.requires_grad = True
T_absolute = T_absolute_init.clone().detach()
T_absolute.requires_grad = True
# the mask the specifies which cameras are going to be optimized
# (since we know the first camera is already correct,
# we only optimize over the 2nd-to-last cameras)
camera_mask = torch.ones(N, 1, dtype=torch.float32, device=device)
camera_mask[0] = 0.
# init the optimizer
optimizer = torch.optim.SGD([log_R_absolute, T_absolute], lr=.1, momentum=0.9)
# run the optimization
n_iter = 2000 # fix the number of iterations
for it in range(n_iter):
# re-init the optimizer gradients
optimizer.zero_grad()
# compute the absolute camera rotations as
# an exponential map of the logarithms (=axis-angles)
# of the absolute rotations
R_absolute = so3_exp_map(log_R_absolute * camera_mask)
# get the current absolute cameras
cameras_absolute = SfMPerspectiveCameras(
R = R_absolute,
T = T_absolute * camera_mask,
device = device,
)
# compute the relative cameras as a composition of the absolute cameras
cameras_relative_composed = \
get_relative_camera(cameras_absolute, relative_edges)
# compare the composed cameras with the ground truth relative cameras
# camera_distance corresponds to $d$ from the description
camera_distance = \
calc_camera_distance(cameras_relative_composed, cameras_relative)
# our loss function is the camera_distance
camera_distance.backward()
# apply the gradients
optimizer.step()
# plot and print status message
if it % 200==0 or it==n_iter-1:
status = 'iteration=%3d; camera_distance=%1.3e' % (it, camera_distance)
plot_camera_scene(cameras_absolute, cameras_absolute_gt, status)
print('Optimization finished.')
# + [markdown] colab_type="text" id="vncLMvxWnhmO"
# ## 4. Conclusion
#
# In this tutorial we learnt how to initialize a batch of SfM Cameras, set up loss functions for bundle adjustment, and run an optimization loop.
|
docs/tutorials/bundle_adjustment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
# +
import h5py
import array
import time
_LMT_TIMESTEP = 5
f = h5py.File( '/global/project/projectdirs/pma/www/daily/2016-06-04/edison_snx11025.h5lmt', 'r')
# -
# ## Find the time corresponding to each column
for x in f['FSStepsGroup/FSStepsDataSet'][0:3]:
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(x))
# ## Find peak metadata op rate
#
# For each metadata operation we track, print out the highest op rate (ops/sec) observed. To convert this op rate into an op count, multiply by `_LMT_TIMESTEP`.
for i in range(len(f['MDSOpsGroup/MDSOpsDataSet'].attrs['OpNames'])):
op_name = f['MDSOpsGroup/MDSOpsDataSet'].attrs['OpNames'][i]
max_op_rate = f['MDSOpsGroup/MDSOpsDataSet'][i,:].max()
if max_op_rate > 0.0:
print op_name, max_op_rate
# ## Find the bytes read by one OST over the whole day
#
# Note that we multiply by `_LMT_TIMESTEP` to get total bytes read.
ost_target = 'snx11025-OST005c'
ost_target_idx = list(f['OSTReadGroup/OSTBulkReadDataSet'].attrs[u'OSTNames']).index(ost_target)
print "Bytes read: ", f['OSTReadGroup/OSTBulkReadDataSet'][ost_target_idx,:].sum() * _LMT_TIMESTEP
# ## Find the fraction of data points that are marked as missing
tot_missing = f['FSMissingGroup/FSMissingDataSet'][:].sum()
# The following multiplies all of the dimensions of the given dataset together
# to get the total number of elements contained
tot_data = reduce(lambda a, b: a*b, f['FSMissingGroup/FSMissingDataSet'].shape)
print "Total data points missing:", tot_missing, "out of", tot_data
print "Fraction of data missing:", float(tot_missing) / tot_data
# ## Plot the observed bandwidth across the file system for the entire HDF5 file
# Build the array that we will plot
bytes_per_sec = array.array(
'd',
[ f['OSTReadGroup/OSTBulkReadDataSet'][:,x].sum() for x in range(f['OSTReadGroup/OSTBulkReadDataSet'].shape[1]) ] )
from matplotlib import pyplot as plt
plt.xlabel('Time')
plt.ylabel('GiB Read')
plt.title('Read activity')
plt.plot([ x0 * _LMT_TIMESTEP / 3600.0 for x0 in range(len(bytes_per_sec)) ],
[ x / 2.0**30 for x in bytes_per_sec ])
|
h5lmt_demo_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit (conda)
# metadata:
# interpreter:
# hash: ecf5722fdaf1897a315d257d89d94520bfcaa453217d5becf09b39e73618b0de
# name: python3
# ---
import re
idchk = "0123456789ABCDEFGHJKLMNPQRSTUVXYWZIO"
# +
# idchk.find("E")
# -
pattern = '^[A-Z]{1}(1|2)\\d{8}$'
def cal_id_check_code(id_number):
""" 國民身分證、新式居留證,檢查碼驗證 """
is_valid = False
idchk = "<KEY>"
const_list = [1,9,8,7,6,5,4,3,2,1]
p1 = idchk.find(id_number[0])
id_list = list(str(p1)+id_number[1:-1])
weighted_list = [(int(a) * b) % 10 for a, b in zip(id_list, const_list)]
chk_code = 10 - sum(weighted_list, 0) % 10
if chk_code == int(id_number[-1:]):
is_valid = True
return is_valid
def cal_id_check_code_for_old_resident_cert(id_number):
""" 舊式居留證Resident Certificate,檢查碼驗證 """
is_valid = False
idchk = "0123456789ABCDEFGHJKLMNPQRSTUVXYWZIO"
const_list = [1,9,8,7,6,5,4,3,2,1]
p1 = idchk.find(id_number[0])
p2 = str(idchk.find(id_number[1]))[-1:]
id_list = list(str(p1) + p2 + id_number[2:-1])
weighted_list = [(int(a) * b) % 10 for a, b in zip(id_list, const_list)]
chk_code = 10 - sum(weighted_list, 0) % 10
if chk_code == int(id_number[-1:]):
is_valid = True
return is_valid
def tw_id_check(id_number):
""" 身分證、居留證驗證 """
is_valid = False
id_number = id_number.upper()
# 身分證&居留證(新式2021起)
id_pattern = '^[A-Z]{1}[1-2|8-9]{1}[0-9]{8}$'
if re.match(id_pattern, id_number):
if id_number[1] in ["1", "2"]:
print("國民身分證")
else:
print("新式居留證")
# 檢查碼驗證
is_valid = cal_id_check_code(id_number)
# 居留證(舊式2021前)
old_pattern = '^[A-Z]{1}[A-D]{1}[0-9]{8}$'
if re.match(old_pattern, id_number):
print("舊式居留證")
# 檢查碼驗證
is_valid = cal_id_check_code_for_old_resident_cert(id_number)
return is_valid
# tw_id_check("AC01234567")
tw_id_check("E123215808")
# +
def checkID(id_number):
""" 身分證驗證 """
is_valid = False
idchk = "0123456789ABCDEFGHJKLMNPQRSTUVXYWZIO"
id_number = id_number.upper()
const_list = [1,9,8,7,6,5,4,3,2,1]
# 身分證&居留證(新式2021起)
native_pattern = '^[A-Z]{1}(1-2|8-9)\\d{8}$'
p1 = idchk.find(id_number[0])
id_list = list(str(p1)+id_number[1:-1])
if re.match(native_pattern, id_number):
if id_number[2] in [1, 2]:
print("國民身分證")
else:
print("新式居留證")
weighted_list = [(int(a) * b) % 10 for a, b in zip(id_list, const_list)]
chk_code = 10 - sum(weighted_list, 0) % 10
if chk_code == int(id_number[-1:]):
is_valid = True
# 居留證(舊式2021前)
foreigner_pattern = '^[A-Z]{1}[A-D]{1}[0-9]{8}$'
p1 = idchk.find(id_number[0])
p2 = str(idchk.find(id_number[1]))[-1:]
id_list = list(str(p1) + p2 + id_number[2:-1])
print(id_list)
if re.match(foreigner_pattern, id_number):
weighted_list = [(int(a) * b) % 10 for a, b in zip(id_list, const_list)]
chk_code = 10 - sum(weighted_list, 0) % 10
if chk_code == int(id_number[-1:]):
is_valid = True
return is_valid
# -
checkID("AC01234567")
list("A123456789")
a = [1,4,1,2,3,2,1,5,8,0]
b = [1,9,8,7,6,5,4,3,2,1]
bs = [(a * b) % 10 for a, b in zip(a, b)]
sum(bs, 0) % 10
|
60_case_study/身分證、居留證檢查驗證/tw_id.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="AnHKP3tQ6Yer" outputId="9f751d79-6030-40a8-f01f-bd3cca1fc363"
# !pip install transformers
# # !pip install datasets
# + id="j89Alu-C6jiH"
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification, AdamW, Trainer, TrainingArguments
from sklearn.metrics import accuracy_score, f1_score
from scipy import stats
import pandas as pd
import numpy as np
import joblib
import torch
# + id="fiyTbnF4Bxhw"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/"} id="AAzx8RKq6vfY" outputId="41728139-0e6a-451f-f41c-688794195670"
# %cd '/content/drive/My Drive/IIITD/SEM-7/ML/ML Project/Code/Dataset'
# + id="hwQB9lGc2isq"
train_data = pd.read_csv('train_data_cleaned.csv')
valid_data = pd.read_csv('valid_data_cleaned.csv')
# + id="KxZeEMKlB0co" colab={"base_uri": "https://localhost:8080/"} outputId="e93dfcbe-0ac3-4a86-e58a-03bd4dc3d22c"
train_data.columns
# + [markdown] id="ve1T1JmsM9vC"
# # Load BERT
# + colab={"base_uri": "https://localhost:8080/"} id="E-CAaEPGdDry" outputId="98753ceb-c34d-4cb6-f950-f3a6005a377a"
model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
# + id="b_ZkjlU-OMpb"
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
# + [markdown] id="OCtdIqB5M_d1"
# ## Freeze encoder
# Use only weights of head layers
# + id="3MsMZGcIM5kz"
# for param in model.base_model.parameters():
# param.requires_grad = False
# + [markdown] id="Gx5pNHn2tByV"
# ## Generate Encodings
# + id="uRCh6e2VOXBN"
train_text = train_data.cleaned.tolist()
valid_text = valid_data.cleaned.tolist()
# + id="o5cDHKhgOdca"
train_encodings = tokenizer(train_text,truncation=True,padding=True)
valid_encodings = tokenizer(valid_text,truncation=True,padding=True)
# + id="bldPcemxORMr"
class OLIdDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
# + id="f9HCxwi8Oriz"
train_dataset = OLIdDataset(train_encodings,train_data.label.tolist())
valid_dataset = OLIdDataset(valid_encodings,valid_data.label.tolist())
# + id="Z7HnXy-V4YEJ"
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
f1m = f1_score(labels,preds,average='macro')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'macro f1': f1m
}
training_args = TrainingArguments(
output_dir='./results',
num_train_epochs=20,
per_device_train_batch_size=32,
per_device_eval_batch_size=64,
warmup_steps=500,
weight_decay=0.01,
logging_dir='./logs',
)
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=valid_dataset
)
# + colab={"base_uri": "https://localhost:8080/", "height": 588} id="GbzQvWES5YP5" outputId="82681307-ce04-4027-ab7c-3fe4dabfa734"
trainer.train()
# + colab={"base_uri": "https://localhost:8080/", "height": 105} id="GK1IPWFB52OK" outputId="6198564a-61b6-4029-fad2-a0662aa646cd"
trainer.evaluate()
# + colab={"base_uri": "https://localhost:8080/", "height": 838} id="w2LK-0gEPk8p" outputId="e56df961-69e1-423f-9389-51b32f6575ab"
# %load_ext tensorboard
# %tensorboard --logdir logs
|
Experiments/DistilBERT_Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import obspy
from obspy.imaging.cm import obspy_sequential
from obspy.signal.tf_misfit import cwt
from scipy import signal
import torch
import torch.nn as nn
import torch.nn.functional as F
import pywt
# +
st = obspy.read()
tr = st[0] # 读取一个默认时间序列
npts = tr.stats.npts # 一共有多长
dt = tr.stats.delta # 因为采样频率是100Hz,所以间隔是0.01
t = np.linspace(0, dt * npts, npts)
f_min = 1
f_max = 50
scalogram = cwt(tr.data, dt, 8, f_min, f_max)
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.meshgrid(
t,
np.logspace(np.log10(f_min), np.log10(f_max), scalogram.shape[0]))
ax.pcolormesh(x, y, np.abs(scalogram), cmap=obspy_sequential)
ax.set_ylim(f_min, f_max)
plt.show()
# -
wavelet = 'morl'
scales = np.arange(1, f_max) # range of scales
coeffs, freqs = pywt.cwt(tr.data, scales, wavelet = wavelet)
plt.imshow(np.abs(scalogram), cmap = 'coolwarm', aspect = 'auto')
plt.imshow(coeffs, cmap = 'coolwarm', aspect = 'auto')
plt.plot(tr.data)
# +
c_in = 3
fmax = 50
kernel = 3
length = 512
conv1d_dw = nn.Conv2d(in_channels = c_in,
out_channels = c_in,
kernel_size=[3, kernel],
padding = [0,1],
groups = c_in,
bias = False,
stride = [1,4],
padding_mode="replicate"
)
input_ = torch.rand(1,c_in,fmax,length).float()
conv1d_dw(input_).shape
# -
class Freq_Forward_block(nn.Module):
def __init__(self,
c_in,
c_out, # 主要是把channel的dim压平
kernel_size,
stride=1,
bias = False,
padding_mode = "replicate"):
super(Freq_Forward_block, self).__init__()
self.dw_conv = nn.Conv2d(in_channels = c_in,
out_channels = c_in,
kernel_size = [kernel_size,kernel_size],
padding = [int(kernel_size/2),int(kernel_size/2)],
groups = c_in,
stride = [1,stride], #缩短长度
bias = bias,
padding_mode = padding_mode)
self.batch_norm_1 = nn.BatchNorm2d(c_in)
self.act_1 = nn.ReLU()
self.pw_conv = nn.Conv2d(in_channels = c_in,
out_channels = c_out, # 压平
kernel_size = 1,
padding = [0,0],
stride = 1,
bias = bias,
padding_mode = padding_mode)
self.batch_norm_2 = nn.BatchNorm2d(c_out)
self.act_2 = nn.ReLU()
def forward(self, x):
x = self.dw_conv(x)
x = self.batch_norm_1(x)
x = self.act_1(x)
x = self.pw_conv(x)
x = self.batch_norm_2(x)
x = self.act_2(x)
return x
c_in = 128
fmax = 20
kernel = 3
length = 512
op_1 = depthweise_seperable_convolution(c_in,64,3,stride=2)
# op_2 = depthweise_seperable_convolution(64,32,3,stride=2)
# op_3 = depthweise_seperable_convolution(32,4,3,stride=2)
# op_4 = depthweise_seperable_convolution(4,1,3,stride=2)
input_ = torch.rand(2,c_in,fmax,length).float()
op_1(input_).shape
#op_4(op_3(op_2(op_1(input_)))).shape
# +
class Freq_TokenEmbedding(nn.Module):
def __init__(self,
c_in,
token_d_model,
kernel_size = 3,
stride = 1, #横向方向缩短距离
conv_bias = False,
n_conv_layers = 1,
f_max = 100,
padding_mode = 'replicate',
light_weight = False):
"""
c_in : 模型输入的维度
token_d_model : embedding的维度 TODO看看后面是需要被相加还是被cat
kernel_size : 每一层conv的kernel大小
"""
super(Freq_TokenEmbedding, self).__init__()
n_filter_list = [c_in] + [max(1,int(100/2**(i+1))) for i in range(n_conv_layers - 1)] + [1]
self.conv_layers = []
for i in range(n_conv_layers):
self.conv_layers.append(Freq_Forward_block(c_in = n_filter_list[i],
c_out = n_filter_list[i + 1], # 主要是把channel的dim压平
kernel_size = kernel_size,
stride = stride,
bias = conv_bias,
padding_mode = padding_mode))
self.conv_layers = nn.ModuleList(self.conv_layers)
self.conv = nn.Conv1d(in_channels = self.channel(c_in = c_in, f_max = f_max, length=100),
out_channels = token_d_model,
kernel_size = kernel_size,
padding = int(kernel_size/2),
stride = 1,
bias = conv_bias,
padding_mode = padding_mode)
self.norm = nn.LayerNorm(token_d_model)
self.activation = nn.ReLU()
def forward(self, x):
for layer in self.conv_layers:
x = layer(x)
print(x.shape)
x = torch.squeeze(x, 1)
print(".",x.shape)
x = self.conv(x) # B C L
x = self.activation(self.norm(x.permute(0, 2, 1)))
print(".",x.shape)
return x
def sequence_length(self, c_in = 100, f_max = 50, length=100):
x = torch.rand(1,c_in,fmax,length).float()
for layer in self.conv_layers:
x = layer(x)
return x.shape[3]
def channel(self, c_in = 100, f_max = 50, length=100):
x = torch.rand(1,c_in,fmax,length).float()
for layer in self.conv_layers:
x = layer(x)
print(".",x.shape[2])
return x.shape[2]
c_in = 128
token_d_model = 60
fmax = 50
kernel = 3
length = 512
token_embedding = Freq_TokenEmbedding(c_in = c_in,
token_d_model = token_d_model,
kernel_size = kernel,
stride = 2, #横向方向缩短距离
conv_bias = False,
n_conv_layers = 4,
f_max = fmax,
padding_mode = 'replicate',
light_weight = False)
input_ = torch.rand(20,c_in,fmax,length).float()
token_embedding(input_).shape
# -
n_filter_list = [1] + [max(1,int(100/2**(i+1))) for i in range(10 - 1)] + [1]
op.dw_conv.weight.shape
op.pw_conv.weight.shape
class_emb = nn.Parameter(torch.ones(1, 1, 10), requires_grad=True)
x = torch.zeros(1,100,10).float()
cls_token = class_emb.expand(x.shape[0], -1, -1)
class_emb
cls_token
torch.cat((cls_token, x), dim=1)
from einops import rearrange, repeat
# +
class_emb = nn.Parameter(torch.ones(1, 1, 10), requires_grad=True)
x = torch.zeros(1,100,10).float()
b, n, _ = x.shape
cls_tokens = repeat(class_emb, '() n d -> b n d', b = b)
# -
cls_tokens
# +
st = obspy.read()
tr = st[0]
npts = tr.stats.npts
dt = tr.stats.delta
t = np.linspace(0, dt * npts, npts)
f_min = 1
f_max = 50
scalogram = cwt(tr.data, dt, 8, f_min, f_max)
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.meshgrid(
t,
np.logspace(np.log10(f_min), np.log10(f_max), scalogram.shape[0]))
# t = np.linspace(0, npts, npts)
# x, y = np.meshgrid(
# t,
# np.logspace(0, f_max, scalogram.shape[0]))
ax.pcolormesh(x, y, np.abs(scalogram), cmap=obspy_sequential)
#ax.set_xlabel("Time after %s [s]" % tr.stats.starttime)
#ax.set_ylabel("Frequency [Hz]")
#ax.set_yscale('log')
ax.set_ylim(f_min, f_max)
plt.show()
|
notebooks/.ipynb_checkpoints/CWT_Spectrogram_Export-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.12 64-bit
# language: python
# name: python3
# ---
# ## Numpy 常用功能
#
# 首先一定要记住一点,Numpy 是列向量优先的,也就是说**一维数组就是列向量!转置后才是行向量**!
# 一维向量的索引是「行」,因此 Numpy 的默认索引是行索引(`axis=0`)
#
# >https://www.datacamp.com/cheat-sheet/numpy-cheat-sheet-data-analysis-in-python
#
# 
# +
import numpy as np
# 1. 数组
## 数组可以有任意个维度(0 - 任意正整数),常用 1 - 3 维
## 一个三维数组如下:
a1 = np.arange(27).reshape((3,3,3))
a1
# -
# 2. 索引
## axis=0 行方向
## axis=1 列方向
## axis=2 第三个维度,可以理解为跟屏幕垂直的方向
a1[1,1,1] # 三个维度索引都为 1,即 3*3*3 的数组中最中心的数字
|
data-science/numpy-cheat-sheet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Hello Seaborn (+ Matplotlib & Pandas)
#
# Dieses Notebook wurde über ein Conda Environment mithilfe von folgenden Modulen erstellt:
# * [Numpy](https://anaconda.org/anaconda/numpy)
# * [Pandas](https://anaconda.org/anaconda/pandas)
# * [Seaborn](https://anaconda.org/anaconda/seaborn)
# * [Matplotlib](https://anaconda.org/anaconda/matplotlib)
#
# Das ganze soll nur als eine kleine Einführung in die Welt des Data Plotting (mit Matplotlib, Seaborn, Pandas) dienen. Es gibt selbstredend wesentlich mehr Arten von Graphen.
#
# Für weitere Hilfen, oder Beispiele, gibt es die:
# * [Dokumentation Pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html)
# * [Dokumentation Seaborn](https://seaborn.pydata.org/examples/index.html)
# * [Dokumentation Matplotlib](https://matplotlib.org/3.2.1/contents.html)
#
# Bei sonstigen Fragen stehe ich (Long) euch gerne zur Verfügung.
import numpy as np # Importieren für lineare Algebra
import pandas as pd # Importieren, um Daten von der .csv zu importieren und zu verarbeiten
import seaborn as sns # Importieren, um Daten mithilfe von Seaborn visualisieren zu können
import matplotlib.pyplot as plt # Importieren, da Seaborn auf Matplotlib aufbaut
"""
Hier setzen wir den Grafikstil auf "darkgrid".
By default bringt Seaborn folgende Themes mit:
- darkgrid
- whitegrid
- dark
- white
- ticks
Referenz:
https://seaborn.pydata.org/tutorial/aesthetics.html
"""
sns.set(style = "darkgrid")
"""
Hier nutzen wir Pandas (als pd), um die .csv-Datei mit den Iris-Daten (Iris.csv) auszulesen.
Referenz:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
"""
iris = pd.read_csv('./Iris.csv')
# Gibt die ersten 10 Zeilen des Dataframes aus der .csv-Datei zurück
iris.head(10)
# Spalte 'Id' entfernen (ist irrelevant für Analyse)
iris.drop('Id',axis=1,inplace=True)
# Grundlegende Informationen zum Dataframe ausgeben (z. B. zur Überprüfung von fehlenden Werten)
iris.info()
# Wie viele Pflanzen gibt es pro Spezies
iris['Species'].value_counts()
"""
Ein "Count-Plot" entspricht einem Histogramm für einen bestimmten kategorialen Bereich.
Es zeigt einfach die Anzahl der Vorkommnisse eines Items, basierend auf einem bestimmten Kategorietyp.
Referenz:
https://seaborn.pydata.org/generated/seaborn.countplot.html
"""
sns.countplot('Species', # Kategorie
data=iris) # Datensatz, der verwendet wird
plt.show() # Graph anzeigen lassen
"""
Eine weitere Methode diese Daten darzustellen, wäre das Kreis- / Tortendiagramm
Referenz (hier mal Matplotlib):
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.pie.html
"""
iris['Species'].value_counts().plot.pie(explode=[.05, .05, .05], # Abstand der Kreisabschnitte
autopct='%1.1f%%', # Label der einzelnen Abschnitte
shadow=True, # Schatteneffekt
figsize=(10,8)) # Größe des Graphen
plt.show() # Graph anzeigen lassen
"""
Joint-Plot ist Seaborn-spezifisch und wird verwendet, um:
- die Beziehung zwischen 2 Variablen zu visualisieren/analysieren (bivariat)
- individuelle Verteilungen im selben Graphen zu beschreiben (univariat)
Referenz:
https://seaborn.pydata.org/generated/seaborn.jointplot.html
"""
fig = sns.jointplot("SepalLengthCm", # x-Daten
"SepalWidthCm", # y-Daten
data=iris, # Datensatz, der verwendet wird
kind="reg", # Art des Graphen (hier reg, bzw. Regression)
color='b', # Farbe des Graphen
space=0) # Abstand vom Rand des Graphen
"""
Eine weitere Möglichkeit des Joint-Plots
"""
fig = sns.jointplot("SepalLengthCm", # x-Daten
"SepalWidthCm", # y-Daten
data=iris, # Datensatz, der verwendet wird
kind="kde") # Art des Graphen (hier kde, bzw. Kernel Density Estimation)
"""
Scatter-Plots (dt. Streudiagramm) stellen die Beziehung zwischen zwei NUMERISCHEN Variablen dar,
wobei jedes Element als Punkt (x, y) dargestellt wird.
In diesem Beispiel wird direkt mit der .plot-Erweiterung von Pandas geplottet.
Referenz:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html
"""
sc = iris[iris.Species=='Iris-setosa'].plot(kind='scatter', # Art des Graphen
x='SepalLengthCm', # Daten für x
y='SepalWidthCm', # Daten für y
color='red', # Farbe der Spezies
label='Setosa') # Name für die Legende
iris[iris.Species=='Iris-versicolor'].plot(kind='scatter', # Art des Graphen
x='SepalLengthCm', # Daten für x
y='SepalWidthCm', # Daten für y
color='green', # Farbe der Spezies
label='Versicolor', # Name für die Legende
ax=sc) # Übernahme der Achsen vom ersten Graphen
iris[iris.Species=='Iris-virginica'].plot(kind='scatter', # Art des Graphen
x='SepalLengthCm', # Daten für x
y='SepalWidthCm', # Daten für y
color='orange', # Farbe der Spezies
label='Virginica', # Name für die Legende
ax=sc) # Übernahme der Achsen vom ersten Graphen
sc.set_xlabel('Sepal Length in cm') # Label der x-Achse
sc.set_ylabel('Sepal Width in cm') # Label der y-Achse
sc.set_title('Sepal Length Vs Sepal Width') # Label des gesamten Graphen
sc = plt.gcf() # gcf = get current figure
sc.set_size_inches(10,6) # Größe der Abbildung (width, height)
"""
Der Box-Plot, bzw. Box-Whisker-Plot, gibt eine gute statische Zusammenfassung der Merkmale, wobei:
- die obere Linie für den Maximalwert steht
- die obere Kante des Kastens für das 3. Quartil steht
- die mittlere Kante des Kastens für das 2. Quartil steht
- die untere Kante des Kastens für das 1. Quartil steht
- die unterste Linie für den Minimalwert des Merkmals steht
- die schwarzen Punkte für Ausreißer stehen
Die Höhe des Kastens wird auch als Interquartilbereich bezeichnet.
Referenz:
https://seaborn.pydata.org/generated/seaborn.boxplot.html
"""
fig = plt.gcf() # gcf = get current figure
fig.set_size_inches(10,7) # Größe der Abbildung (width, height)
fig = sns.boxplot(x='Species', # Daten für x
y='PetalLengthCm', # Daten für y
data=iris, # Datensatz, der verwendet wird
order=['Iris-setosa','Iris-versicolor','Iris-virginica'], # Reihenfolge auf x
linewidth=2, # Liniendicke im Graphen
orient='v') # Orientierung des Graphen (v = vertikal)
"""
Ein Strip-Plot ist eine Art Streudiagramm, in dem eine Variable kategorial ist.
Referenz:
https://seaborn.pydata.org/generated/seaborn.stripplot.html
"""
fig = plt.gcf() # gcf = get current figure
fig.set_size_inches(10,7) # Größe der Abbildung (width, height)
fig = sns.stripplot(x='Species', # Daten für x
y='SepalLengthCm', # Daten für y
data=iris, # Datensatz, der verwendet wird
jitter=True, # Hinzufügen, um Überlappung der Punkte zu verringern
edgecolor='grey', # Linienfarbe um jeden Punkt
size=8, # Radius der Punkte
orient='v') # Orientierung des Graphen (v = vertikal)
"""
Diese Graphen lassen sich auch beliebig für einen groben Überblick kombinieren
hier: Box-Plot + Strip-Plot
"""
fig = plt.gcf() # gcf = get current figure
fig.set_size_inches(10,7) # Größe der Abbildung (width, height)
fig = sns.boxplot(x='Species', # Daten für x
y='SepalLengthCm', # Daten für y
data=iris) # Datensatz, der verwendet wird
fig = sns.stripplot(x='Species', # Daten für x
y='SepalLengthCm', # Daten für y
data=iris, # Datensatz, der verwendet wird
jitter=True, # Hinzufügen, um Überlappung der Punkte zu verringern
edgecolor='gray') # Linienfarbe um jeden Punkt
"""
Eine übersichtlichere Alternative zu Strip-Plots, sind Swarm-Plots (kommt vom Bienenschwarm)
Der Vorteil hiervon ist, dass jeder Datenpunkt deutlich sichtbar ist und nichts verdeckt wird.
Referenz:
https://seaborn.pydata.org/generated/seaborn.swarmplot.html
"""
fig = plt.gcf() # gcf = get current figure
fig.set_size_inches(10,7) # Größe der Abbildung (width, height)
fig = sns.swarmplot(x="Species", # Daten für x
y="PetalLengthCm", # Daten für y
data=iris) # Datensatz, der verwendet wird
# +
"""
Violin-Plot ist Seaborn-spezifisch und eine Kombination aus einem Box-Plot + Density-Plot,
welcher gedreht und auf jeder Seite platziert wird, um die Verteilungsform der Daten anzuzeigen.
Hierbei stellt:
- der dicke schwarze Balken in der Mitte den Interquartilbereich dar
- die dünne schwarze Linie, die davon ausgeht, die 95%-Konfidenzintervalle dar
- der weiße Punkt den Median dar
Das ganze stellt eine Erweiterung des Box-Plots dar.
Referenz:
https://seaborn.pydata.org/generated/seaborn.violinplot.html
Subplots (mehrere Diagramme in einem gesamten Diagramm):
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.figure.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.subplots.html
"""
plt.figure(figsize=(15,10)) # Größe (width, height)
plt.subplot(2,2,1) # erster Teilgraph für das erste Merkmal
sns.violinplot(x='Species', # Daten für x
y='PetalLengthCm', # Daten für y
data=iris) # Datensatz, der verwendet wird
plt.subplot(2,2,2) # zweiter Teilgraph für das zweite Merkmal
sns.violinplot(x='Species', # Daten für x
y='PetalWidthCm', # Daten für y
data=iris) # Datensatz, der verwendet wird
plt.subplot(2,2,3) # dritter Teilgraph für das dritte Merkmal
sns.violinplot(x='Species', # Daten für x
y='SepalLengthCm', # Daten für y
data=iris) # Datensatz, der verwendet wird
plt.subplot(2,2,4) # vierter Teilgraph für das vierte Merkmal
sns.violinplot(x='Species', # Daten für x
y='SepalWidthCm', # Daten für y
data=iris) # Datensatz, der verwendet wird
# -
"""
Natürlich darf auch hier das einfache Histogramm nicht fehlen,
welches das Vorkommen der einzelnen Ausprägungen visualisiert. (zur Übersicht)
In diesem Beispiel wird mit der .hist-Erweiterung von Pandas geplottet.
Referenz:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.hist.html
"""
iris.hist(edgecolor='black', # Umrandungsfarbe
linewidth=1) # Dicke der Linie
fig = plt.gcf() # gcf = get current figure
fig.set_size_inches(12,6) # Größe der Abbildung (width, height)
"""
Die Heatmap wird verwendet, um die Korrelation zwischen den Merkmalen im Datensatz zu ermitteln.
Ein hoher positiver oder negativer Wert zeigt, dass die Merkmale eine hohe Korrelation aufweisen.
Dies ist hilfreich für die Auswahl der Parameter für das maschinelle Lernen.
Die Seaborn Heatmap ist hierbei im Python-Ecosystem die intuitivste.
In diesem Beispiel wird zunächst mit Pandas die Korrelationsmatrix ermittelt,
welche dann mit Seaborn dargestellt wird.
Referenz:
https://seaborn.pydata.org/generated/seaborn.heatmap.html
Pandas Korrelationsmatrix:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html
"""
fig = plt.gcf() # gcf = get current figure
fig.set_size_inches(10,7) # Größe der Abbildung (width, height)
fig = sns.heatmap(iris.corr(), # Erstellen der Korrelationsmatrix aus dem Dataframe
annot=True, # Werte in einzelnen Zellen anzeigen
cmap='RdBu', # Farbpalette (colormap)
linewidths=1, # Dicke der Trennlinien
linecolor='k', # Farbe der Trennlinien
square=True, # Quadratische Form
mask=False, # Wenn True, werden Daten ausgeblendet
vmin=-1, # Niedrigster Wert
vmax=1, # Höchster Wert
cbar_kws={"orientation": "vertical"}, # Orientierung des Graphen (v = vertikal)
cbar=True) # Colorbar anzeigen
"""
Ein Pair-Plot ist auch als Streudiagramm bekannt,
bei dem eine Variable in derselben Datenzeile
mit dem Wert einer anderen Variable (paarweise) verglichen wird.
Referenz:
https://seaborn.pydata.org/generated/seaborn.pairplot.html
"""
fig = sns.pairplot(iris, # Datensatz aus Dataframe (Pandas)
hue='Species') # Farbe
# ## Schlusswort
#
# Danke für das Durcharbeiten meiner kleinen Übersicht. Ich hoffe, ich habe euch damit einen kleinen Einblick in die Welt der Datenvisualisierung in Python geben können. Am besten ihr probiert euch an eigenen Beispielen aus, denn nur Übung macht den Meister.
|
py_data_viz.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, accuracy_score
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('./data/houseprices.csv')
# rename the columns for easy manipulation
data.columns = ['house_age','nearest_station_distance','nearby_convenience_store','latitude','longitude','house_price']
data.head()
# +
# remove NaN values
cdata = data.dropna(subset=['nearest_station_distance','house_price'],inplace=True); # delete the rows with NaN
# extract the X -independent and Y - dependent variables from data set
X = data.loc[:,'nearest_station_distance'].values.reshape(-1,1)
Y = data.loc[:,'house_price'].values.reshape(-1,1)
# +
# Create model
model = LinearRegression()
# train the model
model.fit(X,Y)
Y_pred = model.predict(X)
# +
# set min and max values for X and Y
plt.axis([10,6500,76000,1175000])
plt.title('House Price Regression')
plt.xlabel('Distance from Station Station')
plt.ylabel('House Price')
# draw line
plt.plot(X, Y_pred, 'b')
# plot the data
plt.scatter(X,Y,color='green')
plt.show()
# -
print(f'R Squared : {r2_score(Y,Y_pred)}')
|
DS_Linear_Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Active Learning dashboard
# +
import json
import psycopg2
import pandas as pd
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
with open('config.json') as f:
conf = json.load(f)
conn_str = "host={} dbname={} user={} password={}".format(conf['host'], conf['database'], conf['user'], conf['passw'])
conn = psycopg2.connect(conn_str)
plotly.tools.set_credentials_file(username=conf['plotlyusername'], api_key=conf['plotlyapikey'])
# -
# Plotting a pie chart for current image tagging status:
# +
names = []
numbers = []
cursor = conn.cursor()
cursor.execute('select count(a.imageid), b.tagstatename from Image_Tagging_State a join tagstate b ON a.tagstateid = b.tagstateid group by b.tagstatename')
for (number, name) in cursor:
names.append(name)
numbers.append(number)
fig = {
'data': [{'labels': names,
'values': numbers,
'type': 'pie'}],
'layout': {'title': 'Tag state by count'}
}
py.iplot(fig)
# -
# Plotting a time chart for the tagging activity over the last few days:
# +
dates = []
numimages = []
cursor = conn.cursor()
cursor.execute("select count(imageid), date_trunc('day', modifieddtim) timerange from image_tagging_state_audit group by date_trunc('day', modifieddtim) order by timerange")
for (numimage, date) in cursor:
x = pd.to_datetime(date)
dates.append(x)
numimages.append(numimage)
# Create a trace
trace = go.Scatter(
x = dates,
y = numimages,
# mode = 'markers'
name = 'Number of tagging activities'
)
data = [trace]
layout = dict(title = 'Number of tagging activities by date', xaxis=dict(title='Date'), yaxis=dict(title='Tagging activities'))
fig = dict(data=data, layout=layout)
py.iplot(fig, filename='basic-scatter')
# -
# Top taggers
# +
cursor = conn.cursor()
usernames = []
tagcounts = []
cursor.execute("SELECT e.username, count(*) as TagCount FROM image_info a join image_tags b on a.imageid = b.imageid join tags_classification c on b.imagetagid = c.imagetagid join classification_info d on c.classificationid = d.classificationid join user_info e on b.createdbyuser = e.userid group by e.username order by TagCount desc")
for (username, tagcount) in cursor:
usernames.append(username)
tagcounts.append(tagcount)
fig = {
'data': [{'labels': usernames,
'values': tagcounts,
'type': 'pie'}],
'layout': {'title': 'Top taggers by number of classifications'}
}
py.iplot(fig)
# +
class_data = {}
cursor = conn.cursor()
cursor.execute("SELECT e.username, d.classificationname, count(*) as TagCount FROM image_info a join image_tags b on a.imageid = b.imageid join tags_classification c on b.imagetagid = c.imagetagid join classification_info d on c.classificationid = d.classificationid join user_info e on b.createdbyuser = e.userid group by e.username, d.classificationname order by TagCount desc")
for (username, classname, tagcount) in cursor:
if username not in class_data:
class_data[username] = {}
class_data[username]['classnames'] = []
class_data[username]['tagcount'] = []
class_data[username]['classnames'].append(classname)
class_data[username]['tagcount'].append(tagcount)
data = []
for key in class_data:
trace = go.Bar(
x=class_data[key]['classnames'],
y=class_data[key]['tagcount'],
name=key
)
data.append(trace)
layout = go.Layout(
barmode='stack',
title='Top taggers by classification info'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='stacked-bar')
# +
cursor = conn.cursor()
cursor.execute('SELECT b.classificationname, count(*) AS ClassificationCount FROM tags_classification a join classification_info b on a.classificationid = b.classificationid group by classificationname order by ClassificationCount desc')
classnames = []
counts = []
for (classname, count) in cursor:
classnames.append(classname)
counts.append(count)
trace = go.Bar(
x=classnames,
y=counts
)
data = [trace]
layout = dict(title = 'Top classifications', xaxis=dict(title='Classification'), yaxis=dict(title='Number of tags'))
fig = dict(data=data, layout=layout)
py.iplot(fig, filename='basic-plot')
# -
|
dashboard/dashboard.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="jrI6q7RmWQam"
# <table align="center">
# <td align="center"><a target="_blank" href="http://introtodeeplearning.com">
# <img src="https://i.ibb.co/Jr88sn2/mit.png" style="padding-bottom:5px;" />
# Visit MIT Deep Learning</a></td>
# <td align="center"><a target="_blank" href="https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab3/RL.ipynb">
# <img src="https://i.ibb.co/2P3SLwK/colab.png" style="padding-bottom:5px;" />Run in Google Colab</a></td>
# <td align="center"><a target="_blank" href="https://github.com/aamini/introtodeeplearning/blob/master/lab3/RL.ipynb">
# <img src="https://i.ibb.co/xfJbPmL/github.png" height="70px" style="padding-bottom:5px;" />View Source on GitHub</a></td>
# </table>
#
# # Copyright Information
# + id="wkd375upWYok"
# Copyright 2021 MIT 6.S191 Introduction to Deep Learning. All Rights Reserved.
#
# Licensed under the MIT License. You may not use this file except in compliance
# with the License. Use and/or modification of this code outside of 6.S191 must
# reference:
#
# © MIT 6.S191: Introduction to Deep Learning
# http://introtodeeplearning.com
#
# + [markdown] id="WoXYKhfZMHiw"
# # Laboratory 3: Reinforcement Learning
#
# Reinforcement learning (RL) is a subset of machine learning which poses learning problems as interactions between agents and environments. It often assumes agents have no prior knowledge of a world, so they must learn to navigate environments by optimizing a reward function. Within an environment, an agent can take certain actions and receive feedback, in the form of positive or negative rewards, with respect to their decision. As such, an agent's feedback loop is somewhat akin to the idea of "trial and error", or the manner in which a child might learn to distinguish between "good" and "bad" actions.
#
# In practical terms, our RL agent will interact with the environment by taking an action at each timestep, receiving a corresponding reward, and updating its state according to what it has "learned".
#
# 
#
# While the ultimate goal of reinforcement learning is to teach agents to act in the real, physical world, games provide a convenient proving ground for developing RL algorithms and agents. Games have some properties that make them particularly well suited for RL:
#
# 1. In many cases, games have perfectly describable environments. For example, all rules of chess can be formally written and programmed into a chess game simulator;
# 2. Games are massively parallelizable. Since they do not require running in the real world, simultaneous environments can be run on large data clusters;
# 3. Simpler scenarios in games enable fast prototyping. This speeds up the development of algorithms that could eventually run in the real-world; and
# 4. ... Games are fun!
#
# In previous labs, we have explored both supervised (with LSTMs, CNNs) and unsupervised / semi-supervised (with VAEs) learning tasks. Reinforcement learning is fundamentally different, in that we are training a deep learning algorithm to govern the actions of our RL agent, that is trying, within its environment, to find the optimal way to achieve a goal. The goal of training an RL agent is to determine the best next step to take to earn the greatest final payoff or return. In this lab, we focus on building a reinforcement learning algorithm to master two different environments with varying complexity.
#
# 1. **Cartpole**: Balance a pole, protruding from a cart, in an upright position by only moving the base left or right. Environment with a low-dimensional observation space.
# 2. [**Pong**](https://en.wikipedia.org/wiki/Pong): Beat your competitors (whether other AI or humans!) at the game of Pong. Environment with a high-dimensional observation space -- learning directly from raw pixels.
#
# Let's get started! First we'll import TensorFlow, the course package, and some dependencies.
#
# + id="EvdePP-VyVWp"
#Install some dependencies for visualizing the agents
# !apt-get install -y xvfb python-opengl x11-utils > /dev/null 2>&1
# !pip install gym pyvirtualdisplay scikit-video > /dev/null 2>&1
# Import Tensorflow 2.0
# %tensorflow_version 2.x
import tensorflow as tf
import numpy as np
import base64, io, time, gym
import IPython, functools
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
# Download and import the MIT 6.S191 package
# !pip install mitdeeplearning
import mitdeeplearning as mdl
# + [markdown] id="zmrHSiXKTXTY"
# Before we dive in, let's take a step back and outline our approach, which is generally applicable to reinforcement learning problems in general:
#
# 1. **Initialize our environment and our agent**: here we will describe the different observations and actions the agent can make in the environemnt.
# 2. **Define our agent's memory**: this will enable the agent to remember its past actions, observations, and rewards.
# 3. **Define a reward function**: describes the reward associated with an action or sequence of actions.
# 4. **Define the learning algorithm**: this will be used to reinforce the agent's good behaviors and discourage bad behaviors.
#
# + [markdown] id="UT7YL8KBJIIc"
# # Part 1: Cartpole
#
# ## 3.1 Define the Cartpole environment and agent
#
# ### Environment
#
# In order to model the environment for both the Cartpole and Pong tasks, we'll be using a toolkit developed by OpenAI called [OpenAI Gym](https://gym.openai.com/). It provides several pre-defined environments for training and testing reinforcement learning agents, including those for classic physics control tasks, Atari video games, and robotic simulations. To access the Cartpole environment, we can use `env = gym.make("CartPole-v0")`, which we gained access to when we imported the `gym` package. We can instantiate different [environments](https://gym.openai.com/envs/#classic_control) by passing the enivronment name to the `make` function.
#
# One issue we might experience when developing RL algorithms is that many aspects of the learning process are inherently random: initializing game states, changes in the environment, and the agent's actions. As such, it can be helpful to set a initial "seed" for the environment to ensure some level of reproducibility. Much like you might use `numpy.random.seed`, we can call the comparable function in gym, `seed`, with our defined environment to ensure the environment's random variables are initialized the same each time.
# + id="quv9SC0iIYFm"
### Instantiate the Cartpole environment ###
env = gym.make("CartPole-v0")
env.seed(1)
# + [markdown] id="mhEITUcKK455"
# In Cartpole, a pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pole starts upright, and the goal is to prevent it from falling over. The system is controlled by applying a force of +1 or -1 to the cart. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center of the track. A visual summary of the cartpole environment is depicted below:
#
# <img width="400px" src="https://danielpiedrahita.files.wordpress.com/2017/02/cart-pole.png"></img>
#
# Given this setup for the environment and the objective of the game, we can think about: 1) what observations help define the environment's state; 2) what actions the agent can take.
#
# First, let's consider the observation space. In this Cartpole environment our observations are:
#
# 1. Cart position
# 2. Cart velocity
# 3. Pole angle
# 4. Pole rotation rate
#
# We can confirm the size of the space by querying the environment's observation space:
#
# + id="UVJaEcbdIX82"
n_observations = env.observation_space
print("Environment has observation space =", n_observations)
# + [markdown] id="ZibGgjrALgPM"
# Second, we consider the action space. At every time step, the agent can move either right or left. Again we can confirm the size of the action space by querying the environment:
# + id="qc9SIPxBIXrm"
n_actions = env.action_space.n
print("Number of possible actions that the agent can choose from =", n_actions)
# + [markdown] id="pPfHME8aRKkb"
# ### Cartpole agent
#
# Now that we have instantiated the environment and understood the dimensionality of the observation and action spaces, we are ready to define our agent. In deep reinforcement learning, a deep neural network defines the agent. This network will take as input an observation of the environment and output the probability of taking each of the possible actions. Since Cartpole is defined by a low-dimensional observation space, a simple feed-forward neural network should work well for our agent. We will define this using the `Sequential` API.
#
# + id="W-o_XK4oQ4eu"
### Define the Cartpole agent ###
# Defines a feed-forward neural network
def create_cartpole_model():
model = tf.keras.models.Sequential([
# First Dense layer
tf.keras.layers.Dense(units=32, activation='relu'),
'''TODO: Define the last Dense layer, which will provide the network's output.
# Think about the space the agent needs to act in!'''
# [TODO: Dense layer]
])
return model
cartpole_model = create_cartpole_model()
# + [markdown] id="d5D5NSIYS2IW"
# Now that we have defined the core network architecture, we will define an *action function* that executes a forward pass through the network, given a set of observations, and samples from the output. This sampling from the output probabilities will be used to select the next action for the agent. We will also add support so that the `choose_action` function can handle either a single observation or a batch of observations.
#
# **Critically, this action function is totally general -- we will use this function for both Cartpole and Pong, and it is applicable to other RL tasks, as well!**
# + id="E_vVZRr8Q4R_"
### Define the agent's action function ###
# Function that takes observations as input, executes a forward pass through model,
# and outputs a sampled action.
# Arguments:
# model: the network that defines our agent
# observation: observation(s) which is/are fed as input to the model
# single: flag as to whether we are handling a single observation or batch of
# observations, provided as an np.array
# Returns:
# action: choice of agent action
def choose_action(model, observation, single=True):
# add batch dimension to the observation if only a single example was provided
observation = np.expand_dims(observation, axis=0) if single else observation
'''TODO: feed the observations through the model to predict the log
probabilities of each possible action.'''
logits = model.predict('''TODO''')
'''TODO: Choose an action from the categorical distribution defined by the log
probabilities of each possible action.'''
action = ['''TODO''']
action = action.numpy().flatten()
return action[0] if single else action
# + [markdown] id="_tR9uAWcTnkr"
# ## 3.2 Define the agent's memory
#
# Now that we have instantiated the environment and defined the agent network architecture and action function, we are ready to move on to the next step in our RL workflow:
# 1. **Initialize our environment and our agent**: here we will describe the different observations and actions the agent can make in the environemnt.
# 2. **Define our agent's memory**: this will enable the agent to remember its past actions, observations, and rewards.
# 3. **Define the learning algorithm**: this will be used to reinforce the agent's good behaviors and discourage bad behaviors.
#
# In reinforcement learning, training occurs alongside the agent's acting in the environment; an *episode* refers to a sequence of actions that ends in some terminal state, such as the pole falling down or the cart crashing. The agent will need to remember all of its observations and actions, such that once an episode ends, it can learn to "reinforce" the good actions and punish the undesirable actions via training. Our first step is to define a simple `Memory` buffer that contains the agent's observations, actions, and received rewards from a given episode. We will also add support to combine a list of `Memory` objects into a single `Memory`. This will be very useful for batching, which will help you accelerate training later on in the lab.
#
# **Once again, note the modularity of this memory buffer -- it can and will be applied to other RL tasks as well!**
# + id="8MM6JwXVQ4JG"
### Agent Memory ###
class Memory:
def __init__(self):
self.clear()
# Resets/restarts the memory buffer
def clear(self):
self.observations = []
self.actions = []
self.rewards = []
# Add observations, actions, rewards to memory
def add_to_memory(self, new_observation, new_action, new_reward):
self.observations.append(new_observation)
'''TODO: update the list of actions with new action'''
# TODO: your update code here
'''TODO: update the list of rewards with new reward'''
# TODO: your update code here
# Helper function to combine a list of Memory objects into a single Memory.
# This will be very useful for batching.
def aggregate_memories(memories):
batch_memory = Memory()
for memory in memories:
for step in zip(memory.observations, memory.actions, memory.rewards):
batch_memory.add_to_memory(*step)
return batch_memory
# Instantiate a single Memory buffer
memory = Memory()
# + [markdown] id="D4YhtPaUVj5m"
# ## 3.3 Reward function
#
# We're almost ready to begin the learning algorithm for our agent! The next step is to compute the rewards of our agent as it acts in the environment. Since we (and the agent) is uncertain about if and when the game or task will end (i.e., when the pole will fall), it is useful to emphasize getting rewards **now** rather than later in the future -- this is the idea of discounting. This is a similar concept to discounting money in the case of interest. ecall from lecture, we use reward discount to give more preference at getting rewards now rather than later in the future. The idea of discounting rewards is similar to discounting money in the case of interest.
#
# To compute the expected cumulative reward, known as the **return**, at a given timestep in a learning episode, we sum the discounted rewards expected at that time step $t$, within a learning episode, and projecting into the future. We define the return (cumulative reward) at a time step $t$, $R_{t}$ as:
#
# >$R_{t}=\sum_{k=0}^\infty\gamma^kr_{t+k}$
#
# where $0 < \gamma < 1$ is the discount factor and $r_{t}$ is the reward at time step $t$, and the index $k$ increments projection into the future within a single learning episode. Intuitively, you can think of this function as depreciating any rewards received at later time steps, which will force the agent prioritize getting rewards now. Since we can't extend episodes to infinity, in practice the computation will be limited to the number of timesteps in an episode -- after that the reward is assumed to be zero.
#
# Take note of the form of this sum -- we'll have to be clever about how we implement this function. Specifically, we'll need to initialize an array of zeros, with length of the number of time steps, and fill it with the real discounted reward values as we loop through the rewards from the episode, which will have been saved in the agents memory. What we ultimately care about is which actions are better relative to other actions taken in that episode -- so, we'll normalize our computed rewards, using the mean and standard deviation of the rewards across the learning episode.
#
# + id="5_Q2OFYtQ32X"
### Reward function ###
# Helper function that normalizes an np.array x
def normalize(x):
x -= np.mean(x)
x /= np.std(x)
return x.astype(np.float32)
# Compute normalized, discounted, cumulative rewards (i.e., return)
# Arguments:
# rewards: reward at timesteps in episode
# gamma: discounting factor
# Returns:
# normalized discounted reward
def discount_rewards(rewards, gamma=0.95):
discounted_rewards = np.zeros_like(rewards)
R = 0
for t in reversed(range(0, len(rewards))):
# update the total discounted reward
R = R * gamma + rewards[t]
discounted_rewards[t] = R
return normalize(discounted_rewards)
# + [markdown] id="QzbY-mjGYcmt"
# ## 3.4 Learning algorithm
#
# Now we can start to define the learing algorithm which will be used to reinforce good behaviors of the agent and discourage bad behaviours. In this lab, we will focus on *policy gradient* methods which aim to **maximize** the likelihood of actions that result in large rewards. Equivalently, this means that we want to **minimize** the negative likelihood of these same actions. We achieve this by simply **scaling** the probabilities by their associated rewards -- effectively amplifying the likelihood of actions that resujlt in large rewards.
#
# Since the log function is monotonically increasing, this means that minimizing **negative likelihood** is equivalent to minimizing **negative log-likelihood**. Recall that we can easily compute the negative log-likelihood of a discrete action by evaluting its [softmax cross entropy](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits). Like in supervised learning, we can use stochastic gradient descent methods to achieve the desired minimization.
#
# Let's begin by defining the loss function.
# + id="fsgZ3IDCY_Zn"
### Loss function ###
# Arguments:
# logits: network's predictions for actions to take
# actions: the actions the agent took in an episode
# rewards: the rewards the agent received in an episode
# Returns:
# loss
def compute_loss(logits, actions, rewards):
'''TODO: complete the function call to compute the negative log probabilities'''
neg_logprob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits='''TODO''', labels='''TODO''')
'''TODO: scale the negative log probability by the rewards'''
loss = tf.reduce_mean('''TODO''')
return loss
# + [markdown] id="Rr5vQ9fqbPpp"
# Now let's use the loss function to define a training step of our learning algorithm:
# + id="_50ada7nbZ7L"
### Training step (forward and backpropagation) ###
def train_step(model, optimizer, observations, actions, discounted_rewards):
with tf.GradientTape() as tape:
# Forward propagate through the agent network
logits = model(observations)
'''TODO: call the compute_loss function to compute the loss'''
loss = compute_loss('''TODO''', '''TODO''', '''TODO''')
'''TODO: run backpropagation to minimize the loss using the tape.gradient method'''
grads = tape.gradient('''TODO''', model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# + [markdown] id="XsjKXh6BcgjR"
# ## 3.5 Run cartpole!
#
# Having had no prior knowledge of the environment, the agent will begin to learn how to balance the pole on the cart based only on the feedback received from the environment! Having defined how our agent can move, how it takes in new observations, and how it updates its state, we'll see how it gradually learns a policy of actions to optimize balancing the pole as long as possible. To do this, we'll track how the rewards evolve as a function of training -- how should the rewards change as training progresses?
# + id="XmOzc2rrcn8Q"
### Cartpole training! ###
# Learning rate and optimizer
learning_rate = 1e-3
optimizer = tf.keras.optimizers.Adam(learning_rate)
# instantiate cartpole agent
cartpole_model = create_cartpole_model()
# to track our progress
smoothed_reward = mdl.util.LossHistory(smoothing_factor=0.9)
plotter = mdl.util.PeriodicPlotter(sec=2, xlabel='Iterations', ylabel='Rewards')
if hasattr(tqdm, '_instances'): tqdm._instances.clear() # clear if it exists
for i_episode in range(500):
plotter.plot(smoothed_reward.get())
# Restart the environment
observation = env.reset()
memory.clear()
while True:
# using our observation, choose an action and take it in the environment
action = choose_action(cartpole_model, observation)
next_observation, reward, done, info = env.step(action)
# add to memory
memory.add_to_memory(observation, action, reward)
# is the episode over? did you crash or do so well that you're done?
if done:
# determine total reward and keep a record of this
total_reward = sum(memory.rewards)
smoothed_reward.append(total_reward)
# initiate training - remember we don't know anything about how the
# agent is doing until it has crashed!
train_step(cartpole_model, optimizer,
observations=np.vstack(memory.observations),
actions=np.array(memory.actions),
discounted_rewards = discount_rewards(memory.rewards))
# reset the memory
memory.clear()
break
# update our observatons
observation = next_observation
# + [markdown] id="mkcUtGF1VE-K"
# To get a sense of how our agent did, we can save a video of the trained model working on balancing the pole. Realize that this is a brand new environment that the agent has not seen before!
#
# Let's display the saved video to watch how our agent did!
#
# + id="PAYBkv6Zbk0J"
saved_cartpole = mdl.lab3.save_video_of_model(cartpole_model, "CartPole-v0")
mdl.lab3.play_video(saved_cartpole)
# + [markdown] id="CSbVNDpaVb3_"
# How does the agent perform? Could you train it for shorter amounts of time and still perform well? Do you think that training longer would help even more?
# + [markdown] id="Eu6Mqxc720ST"
# #Part 2: Pong
#
# In Cartpole, we dealt with an environment that was static -- in other words, it didn't change over time. What happens if our environment is dynamic and unpredictable? Well that's exactly the case in [Pong](https://en.wikipedia.org/wiki/Pong), since part of the environment is the opposing player. We don't know how our opponent will act or react to our actions, so the complexity of our problem increases. It also becomes much more interesting, since we can compete to beat our opponent. RL provides a powerful framework for training AI systems with the ability to handle and interact with dynamic, unpredictable environments. In this part of the lab, we'll use the tools and workflow we explored in Part 1 to build an RL agent capable of playing the game of Pong.
#
# + [markdown] id="srZ4YE29isuA"
# ## 3.6 Define and inspect the Pong environment
#
# As with Cartpole, we'll instantiate the Pong environment in the OpenAI gym, using a seed of 1.
# + id="lbYHLr66i15n"
def create_pong_env():
return gym.make("Pong-v0", frameskip=5)
env = create_pong_env()
env.seed(1); # for reproducibility
# + [markdown] id="52uZ2Xhyi-MW"
# Let's next consider the observation space for the Pong environment. Instead of four physical descriptors of the cart-pole setup, in the case of Pong our observations are the individual video frames (i.e., images) that depict the state of the board. Thus, the observations are 210x160 RGB images (arrays of shape (210,160,3)).
#
# We can again confirm the size of the observation space by query:
# + id="0yX4GWvxjnHS"
print("Environment has observation space =", env.observation_space)
# + [markdown] id="uuEC2TdSjx9D"
# In Pong, at every time step, the agent (which controls the paddle) has six actions to choose from: no-op (no operation), move right, move left, fire, fire right, and fire left. Let's confirm the size of the action space by querying the environment:
# + id="Iuy9oPc1kag3"
n_actions = env.action_space.n
print("Number of possible actions that the agent can choose from =", n_actions)
# + [markdown] id="9-fghDRigUE5"
# ## 3.7 Define the Pong agent
#
# As before, we'll use a neural network to define our agent. What network architecture do you think would be especially well suited to this game? Since our observations are now in the form of images, we'll add convolutional layers to the network to increase the learning capacity of our network. Note that you will be tasked with completing a template CNN architecture for the Pong agent -- but you should certainly experiment beyond this template to try to optimize performance!
# + id="IJiqbFYpgYRH"
### Define the Pong agent ###
# Functionally define layers for convenience
# All convolutional layers will have ReLu activation
Conv2D = functools.partial(tf.keras.layers.Conv2D, padding='same', activation='relu')
Flatten = tf.keras.layers.Flatten
Dense = tf.keras.layers.Dense
# Defines a CNN for the Pong agent
def create_pong_model():
model = tf.keras.models.Sequential([
# Convolutional layers
# First, 32 5x5 filters and 2x2 stride
Conv2D(filters=32, kernel_size=5, strides=2),
'''TODO: define convolutional layers with 48 5x5 filters and 2x2 stride'''
# [your Conv layer here]
'''TODO: define two convolutional layers with 64 3x3 filters and 2x2 stride'''
# [your Conv layers here]
Flatten(),
# Fully connected layer and output
Dense(units=128, activation='relu'),
'''TODO: define the output dimension of the last Dense layer.
Pay attention to the space the agent needs to act in'''
# [TODO: your Dense layer here]
])
return model
pong_model = create_pong_model()
# + [markdown] id="yaeZ067olFiJ"
# Since we've already defined the action function, `choose_action(model, observation)`, we don't need to define it again. Instead, we'll be able to reuse it later on by passing in our new model we've just created, `pong_model`. This is awesome because our action function provides a modular and generalizable method for all sorts of RL agents!
# + [markdown] id="l0RvqOVkmc2r"
# ## 3.8 Pong-specific functions
#
# In Part 1 (Cartpole), we implemented some key functions and classes to build and train our RL agent -- `choose_action(model, observation)` and the `Memory` class, for example. However, in getting ready to apply these to a new game like Pong, we might need to make some slight modifications.
#
# Namely, we need to think about what happens when a game ends. In Pong, we know a game has ended if the reward is +1 (we won!) or -1 (we lost unfortunately). Otherwise, we expect the reward at a timestep to be zero -- the players (or agents) are just playing eachother. So, after a game ends, we will need to reset the reward to zero when a game ends. This will result in a modified reward function.
# + id="iEZG2o50luLu"
### Pong reward function ###
# Compute normalized, discounted rewards for Pong (i.e., return)
# Arguments:
# rewards: reward at timesteps in episode
# gamma: discounting factor. Note increase to 0.99 -- rate of depreciation will be slower.
# Returns:
# normalized discounted reward
def discount_rewards(rewards, gamma=0.99):
discounted_rewards = np.zeros_like(rewards)
R = 0
for t in reversed(range(0, len(rewards))):
# NEW: Reset the sum if the reward is not 0 (the game has ended!)
if rewards[t] != 0:
R = 0
# update the total discounted reward as before
R = R * gamma + rewards[t]
discounted_rewards[t] = R
return normalize(discounted_rewards)
# + [markdown] id="HopLpb4IoOqA"
# Additionally, we have to consider the nature of the observations in the Pong environment, and how they will be fed into our network. Our observations in this case are images. Before we input an image into our network, we'll do a bit of pre-processing to crop and scale, clean up the background colors to a single color, and set the important game elements to a single color. Let's use this function to visualize what a single observation might look like before and after pre-processing.
# + id="no5IIYtFm8pI"
observation = env.reset()
for i in range(30):
action = np.random.choice(n_actions)
observation, _,_,_ = env.step(action)
observation_pp = mdl.lab3.preprocess_pong(observation)
f = plt.figure(figsize=(10,3))
ax = f.add_subplot(121)
ax2 = f.add_subplot(122)
ax.imshow(observation); ax.grid(False);
ax2.imshow(np.squeeze(observation_pp)); ax2.grid(False); plt.title('Preprocessed Observation');
# + [markdown] id="YBLVfdpv7ajG"
# Let's also consider the fact that, unlike CartPole, the Pong environment has an additional element of uncertainty -- regardless of what action the agent takes, we don't know how the opponent will play. That is, the environment is changing over time, based on *both* the actions we take and the actions of the opponent, which result in motion of the ball and motion of the paddles.
#
# Therefore, to capture the dynamics, we also consider how the environment changes by looking at the difference between a previous observation (image frame) and the current observation (image frame). We've implemented a helper function, `pong_change`, that pre-processes two frames, calculates the change between the two, and then re-normalizes the values. Let's inspect this to visualize how the environment can change:
# + id="ItWrUwM87ZBw"
next_observation, _,_,_ = env.step(np.random.choice(n_actions))
diff = mdl.lab3.pong_change(observation, next_observation)
f, ax = plt.subplots(1, 3, figsize=(15,15))
for a in ax:
a.grid(False)
a.axis("off")
ax[0].imshow(observation); ax[0].set_title('Previous Frame');
ax[1].imshow(next_observation); ax[1].set_title('Current Frame');
ax[2].imshow(np.squeeze(diff)); ax[2].set_title('Difference (Model Input)');
# + [markdown] id="bYwIWC-Cz8F2"
# What do you notice? How and why might these pre-processing changes be important for training our RL algorithm? How and why might consideration of the difference between frames be important for training and performance?
# + [markdown] id="YiJLu9SEAJu6"
# ### Rollout function
#
# We're now set up to define our key action algorithm for the game of Pong, which will ultimately be used to train our Pong agent. This function can be thought of as a "rollout", where the agent will 1) make an observation of the environment, 2) select an action based on its state in the environment, 3) execute a policy based on that action, resulting in some reward and a change to the environment, and 4) finally add memory of that action-reward to its `Memory` buffer. We will define this algorithm in the `collect_rollout` function below, and use it soon within a training block.
#
# Earlier you visually inspected the raw environment frames, the pre-processed frames, and the difference between previous and current frames. As you may have gathered, in a dynamic game like Pong, it can actually be helpful to consider the difference between two consecutive observations. This gives us information about the movement between frames -- how the game is changing. We will do this using the `pong_change` function we explored above (which also pre-processes frames for us).
#
# We will use differences between frames as the input on which actions will be selected. These observation changes will be forward propagated through our Pong agent, the CNN network model, which will then predict the next action to take based on this observation. The raw reward will be computed. The observation, action, and reward will be recorded into memory. This will loop until a particular game ends -- the rollout is completed.
#
# For now, we will define `collect_rollout` such that a batch of observations (i.e., from a batch of agent-environment worlds) can be processed serially (i.e., one at a time, in sequence). We will later utilize a parallelized version of this function that will parallelize batch processing to help speed up training! Let's get to it.
# + id="CH9C4JXUOyv-"
### Rollout function ###
# Key steps for agent's operation in the environment, until completion of a rollout.
# An observation is drawn; the agent (controlled by model) selects an action;
# the agent executes that action in the environment and collects rewards;
# information is added to memory.
# This is repeated until the completion of the rollout -- the Pong game ends.
# Processes multiple batches serially.
#
# Arguments:
# batch_size: number of batches, to be processed serially
# env: environment
# model: Pong agent model
# choose_action: choose_action function
# Returns:
# memories: array of Memory buffers, of length batch_size, corresponding to the
# episode executions from the rollout
def collect_rollout(batch_size, env, model, choose_action):
# Holder array for the Memory buffers
memories = []
# Process batches serially by iterating through them
for b in range(batch_size):
# Instantiate Memory buffer, restart the environment
memory = Memory()
next_observation = env.reset()
previous_frame = next_observation
done = False # tracks whether the episode (game) is done or not
while not done:
current_frame = next_observation
'''TODO: determine the observation change.
Hint: this is the difference between the past two frames'''
frame_diff = # TODO
'''TODO: choose an action for the pong model, using the frame difference, and evaluate'''
action = # TODO
# Take the chosen action
next_observation, reward, done, info = env.step(action)
'''TODO: save the observed frame difference, the action that was taken, and the resulting reward!'''
memory.add_to_memory('''TODO''', '''TODO''', '''TODO''')
previous_frame = current_frame
# Add the memory from this batch to the array of all Memory buffers
memories.append(memory)
return memories
# + [markdown] id="NGIJvFUHHbDi"
# To get a sense of what is encapsulated by `collect_rollout`, we will instantiate an *untrained* Pong model, run a single rollout using this model, save the memory, and play back the observations the model sees. Note that these will be frame *differences*.
# + id="msNBRcULHbrd"
### Rollout with untrained Pong model ###
# Model
test_model = create_pong_model()
# Rollout with single batch
single_batch_size = 1
memories = collect_rollout(single_batch_size, env, test_model, choose_action)
rollout_video = mdl.lab3.save_video_of_memory(memories[0], "Pong-Random-Agent.mp4")
# Play back video of memories
mdl.lab3.play_video(rollout_video)
# + [markdown] id="mRqcaDQ1pm3x"
# ## 3.9 Training Pong
#
# We're now all set up to start training our RL algorithm and agent for the game of Pong! We've already defined the following:
#
# 1. Loss function, `compute_loss`, and backpropagation step, `train_step`. Our loss function employs policy gradient learning. `train_step` executes a single forward pass and backpropagation gradient update.
# 2. RL agent algorithm: `collect_rollout`. Serially processes batches of episodes, executing actions in the environment, collecting rewards, and saving these to `Memory`.
#
# We will use these functions to train the Pong agent.
#
# In the training block, episodes will be executed by agents in the environment via the RL algorithm defined in the `collect_rollout` function. Since RL agents start off with literally zero knowledge of their environment, it can often take a long time to train them and achieve stable behavior. To alleviate this, we have implemented a parallelized version of the RL algorithm, `parallelized_collect_rollout`, which you can use to accelerate training across multiple parallel batches.
#
# For training, information in the `Memory` buffer from all these batches will be aggregated (after all episodes, i.e., games, end). Discounted rewards will be computed, and this information will be used to execute a training step. Memory will be cleared, and we will do it all over again!
#
# Let's run the code block to train our Pong agent. Note that, even with parallelization, completing training and getting stable behavior will take quite a bit of time (estimated at least a couple of hours). We will again visualize the evolution of the total reward as a function of training to get a sense of how the agent is learning.
# + id="FaEHTMRVMRXP"
### Hyperparameters and setup for training ###
# Rerun this cell if you want to re-initialize the training process
# (i.e., create new model, reset loss, etc)
# Hyperparameters
learning_rate = 1e-3
MAX_ITERS = 1000 # increase the maximum to train longer
batch_size = 5 # number of batches to run
# Model, optimizer
pong_model = create_pong_model()
optimizer = tf.keras.optimizers.Adam(learning_rate)
iteration = 0 # counter for training steps
# Plotting
smoothed_reward = mdl.util.LossHistory(smoothing_factor=0.9)
smoothed_reward.append(0) # start the reward at zero for baseline comparison
plotter = mdl.util.PeriodicPlotter(sec=15, xlabel='Iterations', ylabel='Win Percentage (%)')
# Batches and environment
# To parallelize batches, we need to make multiple copies of the environment.
envs = [create_pong_env() for _ in range(batch_size)] # For parallelization
# + id="xCwyQQrPnkZG"
### Training Pong ###
# You can run this cell and stop it anytime in the middle of training to save
# a progress video (see next codeblock). To continue training, simply run this
# cell again, your model will pick up right where it left off. To reset training,
# you need to run the cell above.
games_to_win_episode = 21 # this is set by OpenAI gym and cannot be changed.
# Main training loop
while iteration < MAX_ITERS:
plotter.plot(smoothed_reward.get())
tic = time.time()
# RL agent algorithm. By default, uses serial batch processing.
# memories = collect_rollout(batch_size, env, pong_model, choose_action)
# Parallelized version. Uncomment line below (and comment out line above) to parallelize
memories = mdl.lab3.parallelized_collect_rollout(batch_size, envs, pong_model, choose_action)
print(time.time()-tic)
# Aggregate memories from multiple batches
batch_memory = aggregate_memories(memories)
# Track performance based on win percentage (calculated from rewards)
total_wins = sum(np.array(batch_memory.rewards) == 1)
total_games = sum(np.abs(np.array(batch_memory.rewards)))
win_rate = total_wins / total_games
smoothed_reward.append(100 * win_rate)
# Training!
train_step(
pong_model,
optimizer,
observations = np.stack(batch_memory.observations, 0),
actions = np.array(batch_memory.actions),
discounted_rewards = discount_rewards(batch_memory.rewards)
)
# Save a video of progress -- this can be played back later
if iteration % 100 == 0:
mdl.lab3.save_video_of_model(pong_model, "Pong-v0",
suffix="_"+str(iteration))
iteration += 1 # Mark next episode
# + [markdown] id="8LiEY5Y_ts-Z"
# Finally we can put our trained agent to the test! It will play in a newly instantiated Pong environment against the "computer", a base AI system for Pong. Your agent plays as the green paddle. Let's watch the match instant replay!
# + id="TvHXbkL0tR6M"
latest_pong = mdl.lab3.save_video_of_model(
pong_model, "Pong-v0", suffix="_latest")
mdl.lab3.play_video(latest_pong, width=400)
# + [markdown] id="TIlwIgBP3Js6"
# ## 3.10 Conclusion and submission information
#
# That's it! Congratulations on training two RL agents and putting them to the test! We encourage you to consider the following:
#
# * How does the agent perform?
# * Could you train it for shorter amounts of time and still perform well?
# * What are some limitations of the current representation i.e., difference of current and previous frames? How is this reflected in the agent's behavior? What could be done to generate an improved representation?
# * How does the complexity of Pong relative to CartPole alter the rate at which the agent learns and its performance?
# * What are some things you could change about the agent or the learning process to potentially improve performance?
#
# Try to optimize your **Pong** model and algorithm to achieve improved performance. **MIT students and affiliates will be eligible for prizes during the IAP offering.** To enter the competition, MIT students and affiliates should upload the following to the course Canvas:
#
# * Jupyter notebook with the code you used to generate your results, **with the Pong training executed**;
# * saved video of your Pong agent competing;
# * a description and/or diagram of the architecture, settings, and hyperparameters you used to generate your result -- if there are any additional or interesting modifications you made to the template code, please include these in your description;
# * discussion of why these modifications helped improve performance.
#
# Good luck!
|
lab3/RL.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: sfem3
# language: python
# name: sfem3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %autosave 0
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')
import os
import sys
import numpy as np
import pandas as pd
import sdata
import uuid
import time
from sdata.io.hdf import FlatHDFDataStore
print("sdata v{}".format(sdata.__version__))
# -
store = FlatHDFDataStore("/tmp/flatstore.h5", mode="w")
store
store.keys()
data = sdata.Data(name="otto",
uuid=sdata.uuid_from_str("otto"),
table=pd.DataFrame({"a":[1,2,3]}),
description="Hallo\nSpencer")
data
store.put(data)
store.keys()
store.hdf.get('/21b83703d98e38a7be2e50e38326d0ce/metadata')
store.keys()
datac = data.copy()
datac.name = "otto2"
datac.uuid = 'b8315be85d9945579cf8dc6a80a62524'
datac.df["b"] = datac.df["a"]**2
datac.df
datac.metadata.add("force", 1.23, dtype="float", description="a force", label="F")
datac.metadata.add("runid", 123, dtype="int", description="a int", label="r")
datac
store.put(datac)
store.keys()
data2 = store.get_data_by_uuid('b8315be85d9945579cf8dc6a80a62524')
print(data2)
data2.metadata.df
print([data2.description])
(data2.describe())
store.keys()
store.hdf.keys()
store.get_dict()
store.get_all_metadata()
datata.df
data2.df
data.metadata.attributes
# +
#store.close()
# -
store.hdf.groups()
store.hdf.info()
store.hdf.put("/a/b/b", data.df)
store.hdf.put("/a/b/c/d", data.df)
store.close()
data.describe()
|
ipynb/sdata_hdf5.ipynb
|