code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import sys
import numpy as np
import cv2
from PIL import Image
# +
src = cv2.imread('./openCV/grabcut/물고기 예제/물고기.jpg')
if src is None:
print('image load failed')
sys.exit()
# -
#사각형 선택
rc = cv2.selectROI(src)
#선택되지 않은 부분 블랙 처리
mask = np.zeros(src.shape[:2], np.uint8)
cv2.grabCut(src, mask, rc, None, None, 5, cv2.GC_INIT_WITH_RECT)
# 0 : cv2.GC_BGD, 2: cv2.GC_PR_RGD
mask_fg = np.where( (mask == 0) | (mask == 2), 0, 1).astype('uint8')
mask_bg = np.where( (mask == 1) | (mask == 3), 0, 1).astype('uint8')
dst_fg = src * mask_fg[:, :, np.newaxis]
dst_bg = src * mask_bg[:, :, np.newaxis]
Image.fromarray(mask_fg*255)
Image.fromarray(mask_bg*255)
Image.fromarray(dst_fg)
Image.fromarray(dst_bg)
| opencv_grabcut_nemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install PyGithub pandas > /dev/null 2>&1
import os
import datetime
import pandas as pd
from github import Github
github = Github(os.environ['GITHUB_TOKEN'])
# # Jupyter Community Stats
# +
repositories = {}
repositories['jupyter'] = ['notebook','jupyter_client', 'nb2kg', 'enterprise_gateway', 'kernel_gateway']
repositories['jupyterlab'] = ['jupyterlab']
repositories['jupyterhub'] = ['jupyterhub']
repositories['ipython'] = ['ipython']
repositories['irkernel'] = ['irkernel']
repositories['apache'] = ['incubator-toree']
community_stats = {}
index = 0
for org, repos in repositories.items():
github_org = github.get_organization(org)
for repo in repos:
github_repo = github_org.get_repo(repo)
contributors = github_repo.get_contributors().totalCount
community_stats[index] = {'org':org, 'repo':repo, 'full_name':github_repo.full_name, 'stars':github_repo.stargazers_count, 'watchers':github_repo.watchers_count, 'forks':github_repo.forks_count,'contributors':contributors}
index = index+1
community_stats_df = pd.DataFrame.from_dict(community_stats, orient='index')
# -
community_stats_df.to_csv('community_stats.csv', index=False)
| pipelines/generate-stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:lincs-complimentarity] *
# language: python
# name: conda-env-lincs-complimentarity-py
# ---
# ### Calculating Null Distribution
#
#
#
# Null distribution - is generated by getting the median correlation score of randomly combined replicates that do not come from the same compounds.
#
#
#
# ### The goal here:
#
# -- is to compute the **p-value** for each compound per dose by evaluating the probability of random combinations of replicates (from different compounds) having greater median correlation score than replicates that come from the same compound.
#
#
#
#
# - In our case, we generated 1000 median correlation scores from randomly combined replicates as the **null distribution** for each no_of_replicates/replicate class per DOSE i.e. for a no_of_replicates class for every DOSE (1-6) - we have 1000 medians scores from randomly combined replicates of different compounds.
#
#
#
#
#
# **no_of_replicate** is the number of replicates in a specific compound and **no_of_replicate class** is a specific group of compounds that have the same amount of replicates e.g all compounds with 5 replicates in them are in the same no_of_replicates class.
# +
import os
import pathlib
import pandas as pd
import numpy as np
from collections import defaultdict
from pycytominer import feature_select
from statistics import median
import random
from scipy import stats
import pickle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
# -
np.random.seed(42)
# +
# Load common compounds
common_file = pathlib.Path(
"..", "..", "..", "6.paper_figures", "data", "significant_compounds_by_threshold_both_assays.tsv.gz"
)
common_df = pd.read_csv(common_file, sep="\t")
common_compounds = common_df.compound.unique()
print(len(common_compounds))
# -
# ### - Load in Level 4 Datasets generated from `calculate_median_scores_notebook`
cp_level4_path = "cellpainting_lvl4_cpd_replicate_datasets"
# +
df_level4 = pd.read_csv(
os.path.join(cp_level4_path, 'cp_level4_cpd_replicates.csv.gz'),
compression='gzip',low_memory = False
)
print(df_level4.shape)
df_level4.head()
# +
df_cpd_med_scores = pd.read_csv(os.path.join(cp_level4_path, 'cpd_replicate_median_scores.csv'))
df_cpd_med_scores = df_cpd_med_scores.set_index('cpd').rename_axis(None, axis=0).copy()
# Subset to common compound measurements
df_cpd_med_scores = df_cpd_med_scores.loc[df_cpd_med_scores.index.isin(common_compounds), :]
print(df_cpd_med_scores.shape)
df_cpd_med_scores.head()
# -
def get_cpds_replicates(df, df_lvl4):
"""
This function returns all replicates id/names found in each compound
and in all doses(1-6)
"""
dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]
replicates_in_all = []
cpds_replicates = {}
for dose in dose_list:
rep_list = []
df_doses = df_lvl4[df_lvl4['Metadata_dose_recode'] == dose].copy()
for cpd in df.index:
replicate_names = df_doses[df_doses['pert_iname'] == cpd]['replicate_name'].values.tolist()
rep_list += replicate_names
if cpd not in cpds_replicates:
cpds_replicates[cpd] = [replicate_names]
else:
cpds_replicates[cpd] += [replicate_names]
replicates_in_all.append(rep_list)
return replicates_in_all, cpds_replicates
replicates_in_all, cpds_replicates = get_cpds_replicates(df_cpd_med_scores, df_level4)
def get_replicates_classes_per_dose(df, df_lvl4, cpds_replicates):
"""
This function gets all replicates ids for each distinct
no_of_replicates (i.e. number of replicates per cpd) class per dose (1-6)
Returns replicate_class_dict dictionary, with no_of_replicate classes as the keys,
and all the replicate_ids for each no_of_replicate class as the values
"""
df['replicate_id'] = list(cpds_replicates.values())
dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]
replicate_class_dict = {}
for dose in dose_list:
for size in df['no_of_replicates'].unique():
rep_lists = []
for idx in range(df[df['no_of_replicates'] == size].shape[0]):
rep_ids = df[df['no_of_replicates'] == size]['replicate_id'].values.tolist()[idx][dose-1]
rep_lists += rep_ids
if size not in replicate_class_dict:
replicate_class_dict[size] = [rep_lists]
else:
replicate_class_dict[size] += [rep_lists]
return replicate_class_dict
cpd_replicate_class_dict = get_replicates_classes_per_dose(df_cpd_med_scores, df_level4, cpds_replicates)
cpd_replicate_class_dict.keys()
def check_similar_replicates(replicates, dose, cpd_dict):
"""This function checks if two replicates are of the same compounds"""
for x in range(len(replicates)):
for y in range(x+1, len(replicates)):
for kys in cpd_dict:
if all(i in cpd_dict[kys][dose-1] for i in [replicates[x], replicates[y]]):
return True
return False
def get_random_replicates(all_replicates, no_of_replicates, dose, replicates_ids, cpd_replicate_dict):
"""
This function return a list of random replicates that are not of the same compounds
or found in the current cpd's size list
"""
while (True):
random_replicates = random.sample(all_replicates, no_of_replicates)
if not (any(rep in replicates_ids for rep in random_replicates) &
(check_similar_replicates(random_replicates, dose, cpd_replicate_dict))):
break
return random_replicates
def get_null_distribution_replicates(
cpd_replicate_class_dict,
dose_list,
replicates_lists,
cpd_replicate_dict,
rand_num = 1000
):
"""
This function returns a null distribution dictionary, with no_of_replicates(replicate class)
as the keys and 1000 lists of randomly selected replicate combinations as the values
for each no_of_replicates class per DOSE(1-6)
"""
random.seed(1903)
null_distribution_reps = {}
for dose in dose_list:
for replicate_class in cpd_replicate_class_dict:
replicates_ids = cpd_replicate_class_dict[replicate_class][dose-1]
replicate_list = []
for idx in range(rand_num):
start_again = True
while (start_again):
rand_cpds = get_random_replicates(
replicates_lists[dose-1],
replicate_class,
dose,
replicates_ids,
cpd_replicate_dict
)
if rand_cpds not in replicate_list:
start_again = False
replicate_list.append(rand_cpds)
if replicate_class not in null_distribution_reps:
null_distribution_reps[replicate_class] = [replicate_list]
else:
null_distribution_reps[replicate_class] += [replicate_list]
return null_distribution_reps
len(cpds_replicates.keys())
# +
dose_list = list(set(df_level4['Metadata_dose_recode'].unique().tolist()))[1:7]
null_distribution_replicates = get_null_distribution_replicates(
cpd_replicate_class_dict, dose_list, replicates_in_all, cpds_replicates
)
# -
def save_to_pickle(null_distribution, path, file_name):
"""This function saves the null distribution replicates ids into a pickle file"""
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, file_name), 'wb') as handle:
pickle.dump(null_distribution, handle, protocol=pickle.HIGHEST_PROTOCOL)
#save the null_distribution_moa to pickle
save_to_pickle(null_distribution_replicates, cp_level4_path, 'null_distribution.pickle')
##load the null_distribution_moa from pickle
with open(os.path.join(cp_level4_path, 'null_distribution.pickle'), 'rb') as handle:
null_distribution_replicates = pickle.load(handle)
def assert_null_distribution(null_distribution_reps, dose_list):
"""
This function assert that each of the list in the 1000 lists of random replicate
combination (per dose) for each no_of_replicate class are distinct with no duplicates
"""
duplicates_reps = {}
for dose in dose_list:
for keys in null_distribution_reps:
null_dist = null_distribution_reps[keys][dose-1]
for reps in null_dist:
dup_reps = []
new_list = list(filter(lambda x: x != reps, null_dist))
if (len(new_list) != len(null_dist) - 1):
dup_reps.append(reps)
if dup_reps:
if keys not in duplicates_reps:
duplicates_reps[keys] = [dup_reps]
else:
duplicates_reps[keys] += [dup_reps]
return duplicates_reps
duplicate_replicates = assert_null_distribution(null_distribution_replicates, dose_list)
duplicate_replicates ##no duplicates
def calc_null_dist_median_scores(df, dose_num, replicate_lists):
"""
This function calculate the median of the correlation
values for each list in the 1000 lists of random replicate
combination for each no_of_replicate class per dose
"""
df_dose = df[df['Metadata_dose_recode'] == dose_num].copy()
df_dose = df_dose.set_index('replicate_name').rename_axis(None, axis=0)
df_dose.drop(['Metadata_broad_sample', 'Metadata_pert_id', 'Metadata_dose_recode',
'Metadata_Plate', 'Metadata_Well', 'Metadata_broad_id', 'Metadata_moa',
'broad_id', 'pert_iname', 'moa'],
axis = 1, inplace = True)
median_corr_list = []
for rep_list in replicate_lists:
df_reps = df_dose.loc[rep_list].copy()
reps_corr = df_reps.astype('float64').T.corr(method = 'pearson').values
median_corr_val = median(list(reps_corr[np.triu_indices(len(reps_corr), k = 1)]))
median_corr_list.append(median_corr_val)
return median_corr_list
def get_null_dist_median_scores(null_distribution_cpds, dose_list, df):
"""
This function calculate the median correlation scores for all
1000 lists of randomly combined compounds for each no_of_replicate class
across all doses (1-6)
"""
null_distribution_medians = {}
for key in null_distribution_cpds:
median_score_list = []
for dose in dose_list:
replicate_median_scores = calc_null_dist_median_scores(df, dose, null_distribution_cpds[key][dose-1])
median_score_list.append(replicate_median_scores)
null_distribution_medians[key] = median_score_list
return null_distribution_medians
null_distribution_medians = get_null_dist_median_scores(null_distribution_replicates, dose_list, df_level4)
def compute_dose_median_scores(null_dist_medians, dose_list):
"""
This function align median scores per dose, and return a dictionary,
with keys as dose numbers and values as all median null distribution/non-replicate correlation
scores for each dose
"""
median_scores_per_dose = {}
for dose in dose_list:
median_list = []
for keys in null_distribution_medians:
dose_median_list = null_distribution_medians[keys][dose-1]
median_list += dose_median_list
median_scores_per_dose[dose] = median_list
return median_scores_per_dose
dose_null_medians = compute_dose_median_scores(null_distribution_medians, dose_list)
#save the null_distribution_medians_per_dose to pickle
save_to_pickle(dose_null_medians, cp_level4_path, 'null_dist_medians_per_dose.pickle')
# **A P value can be computed nonparametrically by evaluating the probability of random replicates of different compounds having median similarity value greater than replicates of the same compounds.**
def get_p_value(median_scores_list, df, dose_name, cpd_name):
"""
This function calculate the p-value from the
null_distribution median scores for each compound
"""
actual_med = df.loc[cpd_name, dose_name]
p_value = np.sum(median_scores_list >= actual_med) / len(median_scores_list)
return p_value
def get_moa_p_vals(null_dist_median, dose_list, df_med_values):
"""
This function returns a dict, with compounds as the keys and the compound's
p-values for each dose (1-6) as the values
"""
null_p_vals = {}
for key in null_dist_median:
df_replicate_class = df_med_values[df_med_values['no_of_replicates'] == key]
for cpd in df_replicate_class.index:
dose_p_values = []
for num in dose_list:
dose_name = 'dose_' + str(num)
cpd_p_value = get_p_value(null_dist_median[key][num-1], df_replicate_class, dose_name, cpd)
dose_p_values.append(cpd_p_value)
null_p_vals[cpd] = dose_p_values
sorted_null_p_vals = {key:value for key, value in sorted(null_p_vals.items(), key=lambda item: item[0])}
return sorted_null_p_vals
null_p_vals = get_moa_p_vals(null_distribution_medians, dose_list, df_cpd_med_scores)
df_null_p_vals = pd.DataFrame.from_dict(null_p_vals, orient='index', columns = ['dose_' + str(x) for x in dose_list])
df_null_p_vals['no_of_replicates'] = df_cpd_med_scores['no_of_replicates']
df_null_p_vals.head(10)
def save_to_csv(df, path, file_name):
"""saves dataframes to csv"""
if not os.path.exists(path):
os.mkdir(path)
df.to_csv(os.path.join(path, file_name), index = False)
save_to_csv(df_null_p_vals.reset_index().rename({'index':'cpd'}, axis = 1), cp_level4_path,
'cpd_replicate_p_values.csv')
# +
cpd_summary_file = pathlib.Path(cp_level4_path, 'cpd_replicate_p_values_melted.csv')
dose_recode_info = {
'dose_1': '0.04 uM', 'dose_2':'0.12 uM', 'dose_3':'0.37 uM',
'dose_4': '1.11 uM', 'dose_5':'3.33 uM', 'dose_6':'10 uM'
}
# Melt the p values
cpd_score_summary_pval_df = (
df_null_p_vals
.reset_index()
.rename(columns={"index": "compound"})
.melt(
id_vars=["compound", "no_of_replicates"],
value_vars=["dose_1", "dose_2", "dose_3", "dose_4", "dose_5", "dose_6"],
var_name="dose",
value_name="p_value"
)
)
cpd_score_summary_pval_df.dose = cpd_score_summary_pval_df.dose.replace(dose_recode_info)
# Melt the median matching scores
cpd_score_summary_df = (
df_cpd_med_scores
.reset_index()
.rename(columns={"index": "compound"})
.melt(
id_vars=["compound", "no_of_replicates"],
value_vars=["dose_1", "dose_2", "dose_3", "dose_4", "dose_5", "dose_6"],
var_name="dose",
value_name="matching_score"
)
)
cpd_score_summary_df.dose = cpd_score_summary_df.dose.replace(dose_recode_info)
summary_df = (
cpd_score_summary_pval_df
.merge(cpd_score_summary_df, on=["compound", "no_of_replicates", "dose"], how="inner")
.assign(
assay="Cell Painting",
normalization="spherized",
category="all_data"
)
)
summary_df.to_csv(cpd_summary_file, sep="\t", index=False)
print(summary_df.shape)
summary_df.head()
| 1.Data-exploration/Profiles_level4/cell_painting/3.cellpainting_calculate_null_p_values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#import quad library
from scipy.integrate import quad
# +
#define function for integration of x
def integrateFunction(x):
return x
# +
#perform or pass the quad integration for function of x limit 0 to 1
quad(integrateFunction, 0,1)
# +
#lets create another example, define function for ax +b
def integrateFn(x,a,b):
return x*a+b
# -
#declare value of a and b
a = 3
b = 2
# +
#perform quad integration and pass functions and arguments
quad(integrateFn, 0,1,args=(a,b))
# -
# Multiple Integration Example
#import integrate sub-package
import scipy.integrate as integrate
#define function for x + y
def f(x,y):
return x +y
integrate.dblquad(f,0,1,lambda x:0, lambda x:2) #perform multiple integration using built in funciton
| SciPy_Integration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Is Seattle Really Seeing an Uptick in Cycling?
# *This notebook originally appeared as a [post](http://jakevdp.github.io/blog/2014/06/10/is-seattle-really-seeing-an-uptick-in-cycling/) on the blog [Pythonic Perambulations](http://jakevdp.github.io). The content is BSD licensed.*
# *Update: as I was putting the finishing touches on this notebook, I noticed [this post](http://www.seattlebikeblog.com/2014/06/09/a-statistical-analysis-of-biking-on-the-fremont-bridge-part-1-overview/), the first in a series on Seattle Bike Blog which analyzes much of the same data used here. Apparently great minds think alike! (incidentally, to prove that I'm not just cribbing that content, check the [github commit log](http://github.com/jakevdp/SeattleBike/commits/master): I wrote the bulk of this post several days before the SBB blog series was posted. Version control priority FTW!)*
#
# *Update #2: I added error bars to the estimates in the final section (should have done that from the beginning, I know...*
# <!-- PELICAN_BEGIN_SUMMARY -->
#
# Cycling in Seattle seems to be taking off. This can be seen qualitatively in the increased visibility of advocacy groups like [Seattle Neighborhood Greenways](http://seattlegreenways.org/) and [Cascade Bicycle Club](http://www.cascade.org/), the excellent reporting of sites like the [Seattle Bike Blog](http://www.seattlebikeblog.com/), and the investment by the city in high-profile traffic safety projects such as [Protected Bike Lanes](http://www.seattle.gov/transportation/PBL.htm), [Road diets/Rechannelizations](http://www.seattle.gov/transportation/pedestrian_masterplan/pedestrian_toolbox/tools_deua_diets.htm) and the [Seattle Bicycle Master Plan](http://www.seattle.gov/transportation/bikemaster.htm).
#
# But, qualitative arguments aside, there is also an increasing array of quantitative data available, primarily from the [Bicycle counters](http://www.seattle.gov/transportation/bikecounter.htm) installed at key locations around the city. The first was the [Fremont Bridge Bicycle Counter](http://www.seattle.gov/transportation/bikecounter_fremont.htm), installed in October 2012, which gives daily updates on the number of bicycles crossing the bridge: currently upwards of 5000-6000 per day during sunny commute days.
#
# Bicycle advocates have been [pointing out](http://www.seattlebikeblog.com/2014/05/14/fremont-bridge-smashes-bike-count-record-for-real-this-time-bike-use-rises-all-over-town/) the upward trend of the counter, and I must admit I've been excited as anyone else to see this surge in popularity of cycling (Most days, I bicycle 22 miles round trip, crossing both the Spokane St. and Fremont bridge each way).
#
# But anyone who looks closely at the data must admit: there is a large weekly and monthly swing in the bicycle counts, and people seem most willing to ride on dry, sunny summer days. Given the warm streak we've had in Seattle this spring, I wondered: **are we really seeing an increase in cycling, or can it just be attributed to good weather?**
#
# <!-- PELICAN_END_SUMMARY -->
#
# Here I've set-out to try and answer this question. Along the way, we'll try to deduce just how much the weather conditions affect Seattleites' transportation choices.
# ## A Quick Aside
# If anyone is landing on this page via the normal bicycle advocacy channels, I should warn you that this won't look like a typical Seattle-bike-advocate blog post. I currently work as a data scientist at the [University of Washington eScience Institute](http://data.washington.edu/), where I'm incredibly fortunate to have the flexibility to spend a couple hours each week on side-projects like this. Most of my [blog posts](http://jakevdp.github.io) are pretty technical in nature: I tend to focus on statistical methods and visualization using the [Python](http://www.python.org) programming language.
#
# This post is composed in an [IPython notebook](http://ipython.org/notebook.html), which is a fully executable document which combines text, data, code, and visualizations all in one place. The nice thing is that anyone with a bit of desire and initiative could install the (free) IPython software on their computer and open this document, re-running and checking my results, and perhaps modifying my assumptions to see what happens. In a way, this post is as much about **how** to work with data as it is about **what** we learn from the data.
#
# In other words, this is an **entirely reproducible analysis**. Every piece of data and software used here is open and freely available to anyone who wants to use it. It's an example of the direction I think data journalism should go as it starts to more and more emulate data-driven scientific research.
#
# That said, there's a lot of technical stuff below. If you're not familiar with Python or other data analysis frameworks, don't be afraid to skip over the code and look at the plots, which I'll do my best to explain.
# ## The Data
# This post will use two datasets, which you can easily access with an internet connection. You can find the exact data I used in the [GitHub repository](https://github.com/jakevdp/SeattleBike), or access it from the original sources below.
#
# First, I'll be using the Fremont Bridge Hourly Bicycle Counts. To download this data, go to the [fremont bridge](https://data.seattle.gov/Transportation/Fremont-Bridge-Hourly-Bicycle-Counts-by-Month-Octo/65db-xm6k) page, and do the following (I accessed this on June 6th, 2014):
#
# - click "export"
# - click "download as CSV"
#
# Second, I'll be using weather data available at the [National Climatic Data Center](http://www.ncdc.noaa.gov/). We'll use weather data from the SeaTac Airport weather station. To get this data, go to the [Climate Data Search](http://www.ncdc.noaa.gov/cdo-web/search?datasetid=GHCND) page and do the following (I accessed this on June 6th, 2014):
#
# - Choose "Daily Summaries"
# - Choose 2012/10/1 to the present date
# - Search for "Station", and type in "USW00024233" (ID for SeaTac Airport weather station)
# - Click the icon on the map and "Add to Cart"
# - go to "Shopping Cart"
#
# - make sure date range is 2012/10/1 to 2014/5/14
# - choose Custom GHCN-Daily CSV
# - click "Continue"
#
# - next page: click "select all"
# - click "continue"
# - enter email address and submit order
#
# When the data set is ready, you will get an email with a download link. It was about an hour wait when I did it.
# ## Examining the Fremont Bridge Data
# The first thing we're going to do is load and examine the data from the Fremont bike counter. We'll use the [pandas](http://pandas.pydata.org/) package, a free and open source set of data analysis tools for the Python language.
# +
# some necessary imports
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# +
# Load the data file, and create a column with total north/south traffic
hourly = pd.read_csv("FremontHourly.csv", index_col='Date', parse_dates=True)
hourly.columns = ['northbound', 'southbound']
hourly['total'] = hourly['northbound'] + hourly['southbound']
# +
# Resample the data into daily and weekly totals
daily = hourly.resample('d', 'sum')
weekly = daily.resample('w', 'sum')
# -
# Now let's take a peek at our data and see what it looks like:
weekly[['northbound', 'southbound', 'total']].plot()
plt.ylabel('Weekly riders');
# The red line shows the total number of weekly crossings, which is the sum of the northbound and southbound crossings.
#
# At first glance, April and May 2014 include some spikes in the data: over 32,000 riders per week crossed the bridge one week in May! This trend might be a bit clearer if we use a **moving window average**: basically, for each day we'll take the average of the 30-day period around it:
pd.stats.moments.rolling_mean(daily['total'], 30).plot();
# This is the increased ridership that folks have been talking about. There is some seasonal variation, but the trend seems clear: 2014 has seen a lot of cyclists crossing the bridge.
#
# But it is clear that there is still some seasonal variation. What we're going to try to do below is to **model this variation** based on our intuition about what factors might come into play in people's decision about whether to ride.
#
# For simplicity, I'm going to stick with a **linear model** here. It would be possible to go deeper and use a more sophisticated model (I'd eventually like to try Random Forests), but a linear model should give us a good approximation of what's happening.
# ## Step 1: Accounting for hours of daylight
# The largest component of the variation we see is a seasonal swing. I'm going to hypothesize that that swing is at least partially due to the changing daylight hours. We'll compute the number of hours of daylight and use this to de-trend the data.
#
# Fortunately, my PhD is in Astronomy, so I once-upon-a-time learned how to compute this:
# +
# Define a function which returns the hours of daylight
# given the day of the year, from 0 to 365
def hours_of_daylight(date, axis=23.44, latitude=47.61):
"""Compute the hours of daylight for the given date"""
diff = date - pd.datetime(2000, 12, 21)
day = diff.total_seconds() / 24. / 3600
day %= 365.25
m = 1. - np.tan(np.radians(latitude)) * np.tan(np.radians(axis) * np.cos(day * np.pi / 182.625))
m = max(0, min(m, 2))
return 24. * np.degrees(np.arccos(1 - m)) / 180.
# add this to our weekly data
weekly['daylight'] = map(hours_of_daylight, weekly.index)
daily['daylight'] = map(hours_of_daylight, daily.index)
# +
# Plot the daylight curve
weekly['daylight'].plot()
plt.ylabel('hours of daylight (Seattle)');
# -
# This looks reasonable: just over 8 hours of daylight in December, and just under 16 hours in June.
#
# To get a feel for the trend, let's plot the daylight hours versus the weekly bicycle traffic:
plt.scatter(weekly['daylight'], weekly['total'])
plt.xlabel('daylight hours')
plt.ylabel('weekly bicycle traffic');
# We see a clear trend, though it's also apparent from the wide vertical scatter that other effects are at play.
#
# Let's apply a linear fit to this data. Basically, we'll draw a best-fit line to the points using some convenient tools in the [scikit-learn](http://scikit-learn.org) package, which I've been active in developing:
# +
from sklearn.linear_model import LinearRegression
X = weekly[['daylight']].to_dense()
y = weekly['total']
clf = LinearRegression(fit_intercept=True).fit(X, y)
weekly['daylight_trend'] = clf.predict(X)
weekly['daylight_corrected_total'] = weekly['total'] - weekly['daylight_trend'] + weekly['daylight_trend'].mean()
xfit = np.linspace(7, 17)
yfit = clf.predict(xfit[:, None])
plt.scatter(weekly['daylight'], weekly['total'])
plt.plot(xfit, yfit, '-k')
plt.title("Bicycle traffic through the year")
plt.xlabel('daylight hours')
plt.ylabel('weekly bicycle traffic');
# -
# Once such a linear model is fit, we can look at the model coefficients to see, on average, how the change in one variable affects the change in another:
print(clf.coef_[0])
# This tells us that according to this model, each extra hour of daylight leads to about 2000 more riders per week across the bridge! Of course, in Seattle the length of the day also correlates highly with temperature and precipitation; we'll try to untangle those effects later.
# Now that we have fit this trend, let's subtract it off and replace it by the mean:
trend = clf.predict(weekly[['daylight']].as_matrix())
plt.scatter(weekly['daylight'], weekly['total'] - trend + np.mean(trend))
plt.plot(xfit, np.mean(trend) + 0 * yfit, '-k')
plt.title("weekly traffic (detrended)")
plt.xlabel('daylight hours')
plt.ylabel('adjusted weekly count');
# This is what I mean by "de-trended" data. We've basically removed the component of the data which correlates with the number of hours in a day, so that what is left is in some way agnostic to this quantity. The "adjusted weekly count" plotted here can be thought of as the number of cyclists we'd expect to see if the hours of daylight were not a factor.
#
# Let's visualize this another way. Instead of plotting the number of riders vs daylight hours, we'll again plot the number of riders vs the day of the year, along with the trend:
weekly[['total', 'daylight_trend']].plot()
plt.ylabel("total weekly riders");
# We can similarly view the adjusted total number of riders over time by subtracting this green line from the blue line:
weekly['daylight_corrected_total'].plot()
rms = np.std(weekly['daylight_corrected_total'])
plt.ylabel("adjusted total weekly riders")
print("root-mean-square about trend: {0:.0f} riders".format(rms))
# With the data de-trended, we get a better idea of how bicycling in Seattle has changed over time, corrected for the seasonal variation.
# ## Accounting for Day of the Week
# Above we've been looking at weekly data. This is because daily data shows a clear swing as a function of the day of the week, which we'll show here.
days = ['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']
daily['dayofweek'] = daily['total'].index.dayofweek
# +
grouped = daily.groupby('dayofweek')['total'].mean()
grouped.index = days
grouped.plot()
plt.title("Average Traffic By Day")
plt.ylabel("Average Daily Crossings");
# -
# As you might expect in a city of bicycle commuters, there is roughly 2.5 times the amount of traffic on weekdays as there is on weekends. Bicycles are not just for entertainment! In Seattle, at least, they are a real means of commuting for thousands of people per day, and the data show this clearly.
#
# Let's de-trend the daily bike counts based on the daily totals. We'll add a variable for each day of the week, and use each of these within the trend (this is an example of what's sometimes known as "one-hot" encoding).
# +
# Add one-hot indicators of weekday
for i in range(7):
daily[days[i]] = (daily.index.dayofweek == i).astype(float)
# de-trend on days of the week and daylight together
X = daily[days + ['daylight']]
y = daily['total']
clf = LinearRegression().fit(X, y)
daily['dayofweek_trend'] = clf.predict(X)
daily[['total', 'dayofweek_trend']].plot();
# -
# This shows all the daily bicycle counts (in blue) along with the best-fit trend based on the day of the week and the number of daylight hours per day. It's more clear if we plot the de-trended data:
daily['dayofweek_corrected'] = (daily['total'] - daily['dayofweek_trend'] + daily['dayofweek_trend'].mean())
print("rms = {0:.0f}".format(np.std(daily['dayofweek_corrected'])))
daily['dayofweek_corrected'].plot();
# Now we're getting somewhere! What we're seeing here is the number of bicycle crossings per day, corrected for the daily and annual trends. In other words, this is what we might expect the data to look like if the day of the week and the hours of light per day did not matter.
#
# Let's continue on this line of reasoning, and add some more information to the model.
# ## Accounting for Temperature and Precipitation
# Next we'll account for the NCDC climate data that was described above. The data includes the daily maximum and minimum temperatures, as well as the amount of recorded rainfall at SeaTac airport, about 15 miles to the south. Let's take a look at this data:
# +
# Read the weather file
weather = pd.read_csv('SeaTacWeather.csv', index_col='DATE', parse_dates=True, usecols=[2, 3, 6, 7])
# temperatures are in 1/10 deg C; convert to F
weather['TMIN'] = 0.18 * weather['TMIN'] + 32
weather['TMAX'] = 0.18 * weather['TMAX'] + 32
# precip is in 1/10 mm; convert to inches
weather['PRCP'] /= 254
weather['TMIN'].resample('w', 'min').plot()
weather['TMAX'].resample('w', 'max').plot()
plt.ylabel('Weekly Temperature Extremes (F)');
plt.title("Temperature Extremes in Seattle");
# -
# This shows the maximum and minimum temperatures in Seattle. They vary in ways you might expect. For example, you can easily see the late 2013 freeze that killed my artichoke plants last year.
#
# Now let's look at precipitation:
weather['PRCP'].resample('w', 'sum').plot();
plt.ylabel('Weekly precipitation (in)')
plt.title("Precipitation in Seattle");
# The precipitation is also as you might expect. Almost none in the summer months, and up to about 4 inches per week in the rainiest parts of the year.
#
# What we do next will start to look very familiar: we'll add the climate information to our original daily dataset, and then use a linear model to de-trend the data with this new information.
# join the weather data to our daily data
daily = daily.join(weather)
# +
columns = days + ['daylight', 'TMIN', 'TMAX', 'PRCP']
X = daily[columns]
y = daily['total']
clf = LinearRegression().fit(X, y)
daily['overall_trend'] = clf.predict(X)
# Plot the overall trend
daily[['total', 'overall_trend']].plot()
plt.ylabel('Daily bicycle traffic');
# -
# This shows the data along with the overall trend, accounting for precipitation and temperature extremes. Let's de-trend the data with this model and see what's left over:
daily['overall_corrected'] = daily['total'] - daily['overall_trend'] + daily['overall_trend'].mean()
print("rms = {0:.0f}".format(np.std(daily['overall_corrected'])))
daily['overall_corrected'].plot()
plt.ylabel('corrected daily bicycle traffic');
# From the reduced RMS, we can see that our model does a fairly good job capturing the trends. But there's still some daily variation that it cannot account for. We could think about what this might be, and try to capture it with more inputs to the model. For example, a person's choice to ride might be affected not just by the weather today, but by the weather yesterday. We also could account for freezing temperatures, which may manifest as a nonlinear effect.
#
# In order to better see the remaining trend, let's plot a month-long moving-window average over the corrected data:
pd.stats.moments.rolling_mean(daily['overall_corrected'], 30).plot()
plt.ylabel('Corrected daily bicycle traffic')
plt.title('1-month Moving Window Average');
# This is similar to the moving window we used above, exept we are using de-trended data. What we see is that even after correcting for the length of day, the temperature, the precipitation, and the weekly trends, we still see a huge spike of riders in May 2014.
#
# I want to remark on one interesting feature: the detrended data hits a low-point in July 2013. This does not mean there were fewer riders in that month, but that there were **fewer riders than the model would expect** given the weather, hours of daylight, and day of the week. This points to the existence of other factors that we've not taken into account. Perhaps the average weather in a given time period affects people's decisions: that is, a warm day in January brings everyone out, while a warm day in July is so unexceptional that people leave their bike in the garage. It would be interesting to do a more detailed analysis and try to eke-out this type of information.
#
# Nevertheless, if you look at this plot and squint, you can also see what looks like a steady upward trend from left to right. Let's see if we can quantify this.
# ## Accounting for a Steady Increase or Decrease of Riders
# As a final model, we'll add-in the day index to allow our model to account for an overall increase or decrease in the number of riders with time:
# +
daily['daycount'] = np.arange(len(daily))
columns = days + ['daycount', 'daylight', 'TMIN', 'TMAX', 'PRCP']
X = daily[columns]
y = daily['total']
final_model = LinearRegression().fit(X, y)
daily['final_trend'] = final_model.predict(X)
daily['final_corrected'] = daily['total'] - daily['final_trend'] + daily['final_trend'].mean()
# -
daily['total'].plot()
daily['final_trend'].plot();
# And here is the final de-trended data:
daily['final_corrected'].plot()
plt.ylabel('corrected ridership')
print("rms = {0:.0f}".format(np.std(daily['final_corrected'])))
# ## What Can The Final Model Tell Us?
# Now that we have this model, we can use it to answer some questions about bike ridership in Seattle.
#
# First we'll compute the error covariance, which contains the error bars on each of the parameters:
vy = np.sum((y - daily['final_trend']) ** 2) / len(y)
X2 = np.hstack([X, np.ones((X.shape[0], 1))])
C = vy * np.linalg.inv(np.dot(X2.T, X2))
var = C.diagonal()
# Now we can answer some questions...
# ### How Does Rain Affect Ridership?
ind = columns.index('PRCP')
slope = final_model.coef_[ind]
error = np.sqrt(var[ind])
print("{0:.0f} +/- {1:.0f} daily crossings lost per inch of rain".format(-slope, error))
# Our model shows that once all other factors are accounted for, **every inch of rain translates, on average, to about 800 cyclists staying home**. Not a surprising result, but it's interesting to see it quantified!
# ### How Does Temperature Affect Ridership?
# For temperature, we have to be more careful. The minimum and maximum temperature will be highly correlated, so we should average the trends between the two:
ind1, ind2 = columns.index('TMIN'), columns.index('TMAX')
slope = final_model.coef_[ind1] + final_model.coef_[ind2]
error = np.sqrt(var[ind1] + var[ind2])
print('{0:.0f} +/- {1:.0f} riders per ten degrees Fahrenheit'.format(10 * slope, 10 * error))
# We see that for every increase of ten degrees, we add around 250 crossings on the Fremont bridge!
# ### How Does Daylight Affect Ridership?
ind = columns.index('daylight')
slope = final_model.coef_[ind]
error = np.sqrt(var[ind])
print("{0:.0f} +/- {1:.0f} daily crossings gained per hour of daylight".format(slope, error))
# We see that, once the effects of rain and temperature are removed, each hour of daylight results in about 125 more crossings at the Fremont Bridge. This is fewer than the ~2000/week (~300/day) that we saw above: this is because our first model did not include precipitation and temperature: apparently the weather is far more important than the darkness in affecting ridership!
# ### Is Ridership Increasing?
# Finally, let's try to answer the question we started out with: after correcting for all the above effects, is ridership increasing in Seattle?
ind = columns.index('daycount')
slope = final_model.coef_[ind]
error = np.sqrt(var[ind])
print("{0:.2f} +/- {1:.2f} new riders per day".format(slope, error))
print("{0:.1f} +/- {1:.1f} new riders per week".format(7 * slope, 7 * error))
print("annual change: ({0:.0f} +/- {1:.0f})%".format(100 * 365 * slope / daily['total'].mean(),
100 * 365 * error / daily['total'].mean()))
# By looking at the slope of the daily trend, we see that there are 4.4 new riders per week crossing the Fremont bridge: nearly one new rider per work day! This translates to about **10% growth in bike ridership over the past year, once we correct for all the other effects described above.**
#
# Good news for bicycling in Seattle!
# ## Conclusion
# Is Seattle really seeing an uptick in cycling? The answer appears to be yes: **corrected for seasonal and climate effects, Seattle's Fremont bridge is seeing, on average, about one new bicycle commuter each work day**, and that trend reflects the entire year-and-a-half that the city has been collecting data. Along the way, we saw how rain, temperature, and daylight affect the choices of Seattle's bicycling community.
#
# This has been an exploration of available cycling data using an incredibly simple linear model based on daily climate trends. This just scrapes the surface, and uses an extremely simplistic linear model to find some broad-brush results. It's clear from the ~500 rider RMS that there are still some factors I've left out: perhaps seasonal effects like Bike-to-work month (happening each May) and summer vacation times have some effect. My hunch is that the difference in weather from one day to the next might be more important than the weather isolated on any given day; we've not taken that into account. Most notably, nonlinear effects might play a role (for example, the trend for cold + precipitation might outweigh the trend for cold and the trend for rain considered separately) but nevertheless, I think these are some interesting results despite the shortcuts taken in the analysis.
#
# Thanks for reading!
#
# *This post was written entirely in the IPython notebook. You can
# [download](http://jakevdp.github.io/downloads/notebooks/SeattleCycling.ipynb)
# this notebook, or see a static view
# [here](http://nbviewer.ipython.org/url/jakevdp.github.io/downloads/notebooks/SeattleCycling.ipynb).*
| content/downloads/notebooks/SeattleCycling.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Ruby 2.2.2
# language: ruby
# name: ruby
# ---
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#テキスト" data-toc-modified-id="テキスト-1"><span class="toc-item-num">1 </span>テキスト</a></div><div class="lev1 toc-item"><a href="#文字出力" data-toc-modified-id="文字出力-2"><span class="toc-item-num">2 </span>文字出力</a></div><div class="lev1 toc-item"><a href="#数値" data-toc-modified-id="数値-3"><span class="toc-item-num">3 </span>数値</a></div><div class="lev1 toc-item"><a href="#変数" data-toc-modified-id="変数-4"><span class="toc-item-num">4 </span>変数</a></div><div class="lev1 toc-item"><a href="#コメント" data-toc-modified-id="コメント-5"><span class="toc-item-num">5 </span>コメント</a></div><div class="lev1 toc-item"><a href="#制御構造" data-toc-modified-id="制御構造-6"><span class="toc-item-num">6 </span>制御構造</a></div><div class="lev2 toc-item"><a href="#条件判断(if_then_else)" data-toc-modified-id="条件判断(if_then_else)-61"><span class="toc-item-num">6.1 </span>条件判断(if_then_else)</a></div><div class="lev2 toc-item"><a href="#繰り返し(loop)" data-toc-modified-id="繰り返し(loop)-62"><span class="toc-item-num">6.2 </span>繰り返し(loop)</a></div><div class="lev1 toc-item"><a href="#課題" data-toc-modified-id="課題-7"><span class="toc-item-num">7 </span>課題</a></div><div class="lev2 toc-item"><a href="#helloの変形(hello_name.rb)" data-toc-modified-id="helloの変形(hello_name.rb)-71"><span class="toc-item-num">7.1 </span>helloの変形(hello_name.rb)</a></div><div class="lev2 toc-item"><a href="#8回愛してる(hello_8.rb)" data-toc-modified-id="8回愛してる(hello_8.rb)-72"><span class="toc-item-num">7.2 </span>8回愛してる(hello_8.rb)</a></div><div class="lev2 toc-item"><a href="#今日の日付" data-toc-modified-id="今日の日付-73"><span class="toc-item-num">7.3 </span>今日の日付</a></div>
# -
# # テキスト
#
# * [たのしいruby c1](TanoshiiRuby_v5_c1.pdf)
# # 文字出力
#
# * print
# * ダブルコーテーションとシングル
# * puts, p
#
# print_hello.rb
print("Hello, Ruby.\n")
print("Hello, \nRuby\n!\n")
print("Hello, \"Ruby.\"\n")
print('Hello, \"Ruby.\"\n')
# puts_and_p.rb
puts "Hello, \n\tRuby."
p "Hello, \n\tRuby."
# # 数値
# arith.rb
print("1+1=",1+1,"\n")
include Math
print("sin(3.1415) = ", sin(3.1415),"\n")
print("sqrt(10000) = ", sqrt(10000),"\n")
# # 変数
# area_volume.rb
x = 10
y = 20
z = 30
area = (x*y + y*z + z*x)*2
volume = x*y*z
print("表面積=", area, "\n")
print("体積 =", volume, "\n")
print("表面積= #{area}\n")
print("体積 = #{volume}\n")
# # コメント
#
# * #以降がコメント
# * ブロックコメント(=begin_=end)
# # 制御構造
# ## 条件判断(if_then_else)
#
# bigger_smaller.rb
a = 20
if a >= 10
print("bigger\n")
else
print("smaller\n")
end
# ## 繰り返し(loop)
# * while
# * times
# * each
# while_loop.rb
i = 1
while i<=10
print(i, "\n")
i += 1
end
# times_loop.rb
100.times {
print "hello. "
}
# # 課題
# ## helloの変形(hello_name.rb)
# 自分の名前(例えば,name = 'bob')を変数に入れて
# > Hello, bob!
#
# と返すようにしなさい.
#
# ## 8回愛してる(hello_8.rb)
#
# > Hello, bob!
#
# を8回繰り返すようにmain loopを書き換えなさい.さらに,
# ```bash
# ruby hello_8.rb 28s Mon Sep 4 17:25:47 2017
# 0 Hello, bob.
# 1 Hello, bob.
# 2 Hello, bob.
# 3 Hello, bob.
# 4 Hello, bob.
# 5 Hello, bob.
# 6 Hello, bob.
# 7 Hello, bob.
# ```
#
# というように何回目かを付け足して表示するように書き換えなさい.
#
# ヒント:printfも試してみなさい.
# ## 今日の日付
#
# > require 'date'
#
# してから今日の日付表示させなさい.formatも,含めて,たとえば,
#
# ```bash
# Hello, bob!
# 2017年 09月 04日
# ```
# と返すようにしなさい.どうやるかはネットで調べよ.
| docs/happy_ruby/c2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# this is a list of features which are used in the LGTM score
features = [
'If a project uses LGTM checks for commits',
'The worst LGTM grade of a project'
]
# +
import commons
# load test vectors
file = '../../../../../src/main/resources/com/sap/sgs/phosphor/fosstars/model/score/oss/LgtmScoreTestVectors.yml'
test_vectors = commons.load_test_vectors_from_yaml(file, features)
# print out the test vectors
test_vectors
# -
commons.draw_hists(features, test_vectors)
| src/main/jupyter/oss/security/LgtmScoreTestVectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbgrader={"grade": false, "grade_id": "cell-b324b951784c4b07", "locked": false, "schema_version": 1, "solution": true}
### BEGIN SOLUTION
import os
import pandas as pd
import numpy as np
### END SOLUTION
# + nbgrader={"grade": false, "grade_id": "cell-46cb9115d8541708", "locked": false, "schema_version": 1, "solution": true}
# Grab a reference to the current directory
### BEGIN SOLUTION
rootdir = os.getcwd()
### END SOLUTION
# + nbgrader={"grade": false, "grade_id": "cell-0c4fd9f73a3f7feb", "locked": false, "schema_version": 1, "solution": true}
# Use `os.scandir` to get a list of all files in the current directory
### BEGIN SOLUTION
csvs = os.scandir(rootdir)
### END SOLUTION
# + nbgrader={"grade": false, "grade_id": "cell-ccb478e7116e802a", "locked": false, "schema_version": 1, "solution": true}
# Iterate through the list and clean/process any CSV file using Pandas
### BEGIN SOLUTION
for csv in csvs:
# Only open CSV file extensions
if csv.name.endswith('.csv'):
# Read the CSV file
df = pd.read_csv(csv.path, dtype=object)
# Drop the location column since lat, lon, and elev already exist
if 'location' in df.columns:
df = df.drop(['location'], axis=1).reset_index(drop=True)
# Use the mean to fill in any NaNs
df.fillna(df.mean(), inplace=True)
# Save the cleaned files with a `clean_` prefix
df.to_csv(os.path.join(rootdir, f"clean_{csv.name}"), index=False)
### END SOLUTION
# + nbgrader={"grade": false, "grade_id": "cell-684a376a90d9068b", "locked": true, "schema_version": 1, "solution": false}
# Verify that the cleaned files were created
# !ls
# -
| SQL_Alchemy_for_climate_analysis_and_exploration/data_engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('gtoc4_problem_data.txt', skiprows=2,delimiter= '\s+',header=None)
df.columns = ['Name','Epoch (MJD)','a (AU)','e','i (deg)','LAN (deg)','arg. periap. (deg)','mean anomaly (deg)']
df.head()
df.describe()
df.hist()
df
df['mean anomaly (deg)'][0]
def get_mean_anom(asteroid, )
def orb2cart(name, epoch, a,e, i, LAN, argPeri, meanAnom):
r = a*(1-e**2)/(1+e*np.cos(theta))
x = r*()
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Objectives" data-toc-modified-id="Objectives-1"><span class="toc-item-num">1 </span>Objectives</a></span></li><li><span><a href="#Motivation" data-toc-modified-id="Motivation-2"><span class="toc-item-num">2 </span>Motivation</a></span></li><li><span><a href="#Scenario:-Identifying-Fraudulent-Credit-Card-Transactions" data-toc-modified-id="Scenario:-Identifying-Fraudulent-Credit-Card-Transactions-3"><span class="toc-item-num">3 </span>Scenario: Identifying Fraudulent Credit Card Transactions</a></span><ul class="toc-item"><li><span><a href="#EDA" data-toc-modified-id="EDA-3.1"><span class="toc-item-num">3.1 </span>EDA</a></span></li><li><span><a href="#Logistic-Regression" data-toc-modified-id="Logistic-Regression-3.2"><span class="toc-item-num">3.2 </span>Logistic Regression</a></span></li><li><span><a href="#Evaluation" data-toc-modified-id="Evaluation-3.3"><span class="toc-item-num">3.3 </span>Evaluation</a></span></li><li><span><a href="#Confusion-Matrix" data-toc-modified-id="Confusion-Matrix-3.4"><span class="toc-item-num">3.4 </span>Confusion Matrix</a></span></li><li><span><a href="#Classification-Metrics" data-toc-modified-id="Classification-Metrics-3.5"><span class="toc-item-num">3.5 </span>Classification Metrics</a></span></li><li><span><a href="#Accuracy" data-toc-modified-id="Accuracy-3.6"><span class="toc-item-num">3.6 </span>Accuracy</a></span></li><li><span><a href="#Recall" data-toc-modified-id="Recall-3.7"><span class="toc-item-num">3.7 </span>Recall</a></span></li><li><span><a href="#Precision" data-toc-modified-id="Precision-3.8"><span class="toc-item-num">3.8 </span>Precision</a></span></li><li><span><a href="#$F$-Scores" data-toc-modified-id="$F$-Scores-3.9"><span class="toc-item-num">3.9 </span>$F$-Scores</a></span></li><li><span><a href="#classification_report()" data-toc-modified-id="classification_report()-3.10"><span class="toc-item-num">3.10 </span><code>classification_report()</code></a></span></li></ul></li><li><span><a href="#Exercise:-Breast-Cancer-Prediction" data-toc-modified-id="Exercise:-Breast-Cancer-Prediction-4"><span class="toc-item-num">4 </span>Exercise: Breast Cancer Prediction</a></span><ul class="toc-item"><li><span><a href="#Task" data-toc-modified-id="Task-4.1"><span class="toc-item-num">4.1 </span>Task</a></span></li></ul></li><li><span><a href="#Multiclass-Classification" data-toc-modified-id="Multiclass-Classification-5"><span class="toc-item-num">5 </span>Multiclass Classification</a></span></li><li><span><a href="#Summary:-Which-Metric-Should-I-Care-About?" data-toc-modified-id="Summary:-Which-Metric-Should-I-Care-About?-6"><span class="toc-item-num">6 </span>Summary: Which Metric Should I Care About?</a></span></li><li><span><a href="#Level-Up:-Cost-Matrix" data-toc-modified-id="Level-Up:-Cost-Matrix-7"><span class="toc-item-num">7 </span>Level Up: Cost Matrix</a></span></li><li><span><a href="#Level-Up:-Multiclass-Example" data-toc-modified-id="Level-Up:-Multiclass-Example-8"><span class="toc-item-num">8 </span>Level Up: Multiclass Example</a></span></li></ul></div>
# +
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.utils import resample
from sklearn.datasets import load_breast_cancer, load_iris, make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, plot_confusion_matrix,\
precision_score, recall_score, accuracy_score, f1_score, log_loss,\
roc_curve, roc_auc_score, classification_report
# + [markdown] heading_collapsed=true
# # Objectives
# + [markdown] hidden=true
# - Calculate and interpret a confusion matrix
# - Calculate and interpret classification metrics such as accuracy, recall, and precision
# - Choose classification metrics appropriate to a business problem
# + [markdown] heading_collapsed=true
# # Motivation
# + [markdown] hidden=true
# There are many ways to evaluate a classification model, and your choice of evaluation metric can have a major impact on how well your model serves its intended goals. This lecture will review common classification metrics you might consider using, and considerations for how to make your choice.
# + [markdown] heading_collapsed=true
# # Scenario: Identifying Fraudulent Credit Card Transactions
# + [markdown] hidden=true
# Credit card companies often try to identify whether a transaction is fraudulent at the time when it occurs, in order to decide whether to approve it. Let's build a classification model to try to classify fraudulent transactions!
#
# The data for this example from from [this Kaggle dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud).
# + hidden=true
# Code to downsample from original dataset
#
# credit_data = pd.read_csv('creditcard.csv')
# credit_data_small = credit_data.iloc[0:10000]
# credit_data_small.describe()
# credit_data_small.to_csv('credit_fraud_small.csv', index=False)
# + hidden=true
credit_data = pd.read_csv('credit_fraud_small.csv')
# + [markdown] hidden=true
# The dataset contains features for the transaction amount, the relative time of the transaction, and 28 other features formed using PCA. The target 'Class' is a 1 if the transaction was fraudulent, 0 otherwise
# + hidden=true
credit_data.head()
# + [markdown] heading_collapsed=true hidden=true
# ## EDA
# + [markdown] hidden=true
# Let's see what we can learn from some summary statistics.
# + hidden=true
credit_data.describe()
# + [markdown] hidden=true
# **Question**: What can we learn from the mean of the target 'Class'?
#
# <details>
# <summary>Answer</summary>
# Fraudulent transactions are rare - only 0.4% of transactions were fraudulent
# </details>
# + [markdown] heading_collapsed=true hidden=true
# ## Logistic Regression
# + [markdown] hidden=true
# Let's run a logistic regression model using all of our features.
# + hidden=true
# Separate data into feature and target DataFrames
X = credit_data.drop('Class', axis = 1)
y = credit_data['Class']
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25,
random_state=1)
# Scale the data for modeling
cred_scaler = StandardScaler()
cred_scaler.fit(X_train)
X_train_sc = cred_scaler.transform(X_train)
X_test_sc = cred_scaler.transform(X_test)
# Train a logistic regresssion model with the train data
cred_model = LogisticRegression(random_state=42)
cred_model.fit(X_train_sc, y_train)
# + [markdown] heading_collapsed=true hidden=true
# ## Evaluation
# + [markdown] hidden=true
# Let's calculate the accuracy score for our model using the test set.
# + hidden=true
cred_model.score(X_test_sc, y_test)
# + [markdown] hidden=true
# We got 99.88% accuracy, meaning that 99.88% of our predictions were correct! That seems great, right? Maybe... too great? Let's dig in deeper.
# + [markdown] heading_collapsed=true hidden=true
# ## Confusion Matrix
# + [markdown] hidden=true
# Let's consider the four categories of predictions our model might have made:
#
# * Predicting that a transaction was fraudulent when it actually was (**true positive** or **TP**)
# * Predicting that a transaction was fraudulent when it actually wasn't (**false positive** or **FP**)
# * Predicting that a transaction wasn't fraudulent when it actually was (**false negative** or **FN**)
# * Predicting that a transaction wasn't fraudulent when it actually wasn't (**true negative** or **TN**)
# + [markdown] hidden=true
# <img src='img/precisionrecall.png' width=70%/>
# + [markdown] hidden=true
# The **confusion matrix** gives us all four of these values.
# + hidden=true
y_pred = cred_model.predict(X_test_sc)
cm_1 = confusion_matrix(y_test, y_pred)
cm_1
# + hidden=true
# More visual representation
plot_confusion_matrix(cred_model, X_test_sc, y_test);
# + [markdown] hidden=true
# Notice the way that sklearn displays its confusion matrix: The rows are \['actually false', 'actually true'\]; the columns are \['predicted false', 'predicted true'\].
#
# So it displays:
#
# $\begin{bmatrix}
# TN & FP \\
# FN & TP
# \end{bmatrix}$
# + [markdown] hidden=true
# **Question:** Do you see anything surprising in the confusion matrix?
# + hidden=true
# + [markdown] heading_collapsed=true hidden=true
# ## Classification Metrics
# + [markdown] hidden=true
# Let's calculate some common classification metrics and consider which would be most useful for this scenario.
# + hidden=true
tn = cm_1[0, 0]
fp = cm_1[0, 1]
fn = cm_1[1, 0]
tp = cm_1[1, 1]
# + [markdown] heading_collapsed=true hidden=true
# ## Accuracy
# + [markdown] hidden=true
# **Accuracy** = $\frac{TP + TN}{TP + TN + FP + FN}$
#
# In words: How often did my model correctly identify transactions (fraudulent or not fraudulent)? This should give us the same value as we got from the `.score()` method.
# + hidden=true
acc = (tp + tn) / (tp + tn + fp + fn)
print(acc)
# + [markdown] heading_collapsed=true hidden=true
# ## Recall
# + [markdown] hidden=true
# **Recall** = **Sensitivity** = $\frac{TP}{TP + FN}$
#
# In words: How many of the actually fraudulent transactions did my model identify?
# + hidden=true
rec = tp / (tp + fn)
print(rec)
# + [markdown] hidden=true
# **Question:** Do you think a credit card company would consider recall to be an important metric? Why or why not?
# + [markdown] heading_collapsed=true hidden=true
# ## Precision
# + [markdown] hidden=true
# **Precision** = $\frac{TP}{TP + FP}$
#
# In words: How often was my model's prediction of 'fraudulent' correct?
# + hidden=true
prec = tp / (tp + fp)
print(prec)
# + [markdown] hidden=true
# **Question:** Do you think a credit card company would care more about recall or precision?
# + [markdown] heading_collapsed=true hidden=true
# ## $F$-Scores
# + [markdown] hidden=true
# The $F$-score is a combination of precision and recall, which can be useful when both are important for a business problem.
# + [markdown] hidden=true
# Most common is the **$F_1$ Score**, which is an equal balance of the two using a [harmonic mean](https://en.wikipedia.org/wiki/Harmonic_mean).
#
# $$F_1 = 2 \frac{Pr \cdot Rc}{Pr + Rc} = \frac{2TP}{2TP + FP + FN}$$
# + [markdown] hidden=true
# > _Recall a ***score** typically means higher is better_
# + hidden=true
f1_score = 2*prec*rec / (prec + rec)
print(f1_score)
# + [markdown] hidden=true
# **Question:** Which of these metrics do you think a credit card company would care most about when trying to flag fraudulent transactions to deny?
# + [markdown] hidden=true
# We can generalize this score to the **$F_\beta$ Score** where increasing $\beta$ puts more importance on _recall_:
#
# $$F_\beta = \frac{(1+\beta^2) \cdot Precision \cdot Recall}{\beta^2 \cdot Precision + Recall}$$
# + [markdown] heading_collapsed=true hidden=true
# ## `classification_report()`
# + [markdown] hidden=true
# You can get all of these metrics using the `classification_report()` function.
#
# - The top rows show statistics for if you treated each label as the "positive" class
# - **Support** shows the sample size in each class
# - The averages in the bottom two rows are across the rows in the class table above (useful when there are more than two classes)
# + hidden=true
print(classification_report(y_test, y_pred))
# + [markdown] heading_collapsed=true
# # Exercise: Breast Cancer Prediction
# + [markdown] hidden=true
# Let's evaulate a model using Scikit-Learn's breast cancer dataset:
# + hidden=true
# Load the data
preds, target = load_breast_cancer(return_X_y=True)
# Split into train and test
X_train, X_test, y_train, y_test = train_test_split(preds, target,
random_state=42)
# Scale the data
bc_scaler = StandardScaler()
bc_scaler.fit(X_train)
X_train_sc = bc_scaler.transform(X_train)
X_test_sc = bc_scaler.transform(X_test)
# Run the model
bc_model = LogisticRegression(solver='lbfgs', max_iter=10000,
random_state=42)
bc_model.fit(X_train_sc, y_train)
# + [markdown] heading_collapsed=true hidden=true
# ## Task
# + [markdown] hidden=true
# Calculate the following for this model:
#
# - Confusion Matrix
# - Accuracy
# - Precision
# - Recall
# - F1 Score
#
# Discuss: Which one would you choose to evaluate the model for use as a diagnostic tool to detect breast cancer? Why?
# + hidden=true
# Your work here
# + hidden=true
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# + [markdown] heading_collapsed=true
# # Multiclass Classification
# + [markdown] hidden=true
# What if our target has more than two classes?
#
# **Multiclass classification** problems have more than two possible values for the target. For example, your target would have 10 possible values if you were trying to [classify an image of a hand-written number as a digit from 0 to 9](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html).
# + [markdown] hidden=true
# In these cases, we can use the same methods to evaluate our models. Confusion matrices will no longer be 2x2, but will have a number of rows/columns equal to the number of classes.
#
# When calculating metrics like precision, we choose one class to be the "positive" class, and the rest are assigned to the "negative" class.
# + [markdown] hidden=true
# An example of comparing multiclass confusion matrices (letter recognition for two different models from [this repo](https://github.com/MrGeislinger/ASLTransalation):
#
# 
# 
# + [markdown] heading_collapsed=true
# # Summary: Which Metric Should I Care About?
# + [markdown] hidden=true
# Well, it depends.
#
# Accuracy:
# - Pro: Takes into account both false positives and false negatives.
# - Con: Can be misleadingly high when there is a significant class imbalance. (A lottery-ticket predictor that *always* predicts a loser will be highly accurate.)
#
# Recall:
# - Pro: Highly sensitive to false negatives.
# - Con: No sensitivity to false positives.
#
# Precision:
# - Pro: Highly sensitive to false positives.
# - Con: No sensitivity to false negatives.
#
# F-1 Score:
# - Harmonic mean of recall and precision.
#
# The nature of your business problem will help you determine which metric matters.
#
# Sometimes false positives are much worse than false negatives: Arguably, a model that compares a sample of crime-scene DNA with the DNA in a city's database of its citizens presents one such case. Here a false positive would mean falsely identifying someone as having been present at a crime scene, whereas a false negative would mean only that we fail to identify someone who really was present at the crime scene as such.
#
# On the other hand, consider a model that inputs X-ray images and predicts the presence of cancer. Here false negatives are surely worse than false positives: A false positive means only that someone without cancer is misdiagnosed as having it, while a false negative means that someone with cancer is misdiagnosed as *not* having it.
# + [markdown] heading_collapsed=true
# # Level Up: Cost Matrix
# + [markdown] hidden=true
# One might assign different weights to the costs associated with false positives and false negatives. (We'll standardly assume that the costs associated with *true* positives and negatives are negligible.)
#
# **Example**. Suppose we are in the DNA prediction scenario above. Then we might construct the following cost matrix:
# + hidden=true
cost = np.array([[0, 10], [3, 0]])
cost
# + [markdown] hidden=true
# This cost matrix will allow us to compare models if we have access to those models' rates of false positives and false negatives, i.e. if we have access to the models' confusion matrices!
#
# **Problem**. Given the cost matrix above and the confusion matrices below, which model should we go with?
# + hidden=true
conf1, conf2 = np.array([[100, 10], [30, 300]]), np.array([[120, 20], [0, 300]])
print(conf1, 2*'\n', conf2)
# + [markdown] heading_collapsed=true
# # Level Up: Multiclass Example
# + hidden=true
flowers = load_iris()
# + hidden=true
print(flowers.DESCR)
# + hidden=true
dims_train, dims_test, spec_train, spec_test = train_test_split(flowers.data,
flowers.target,
test_size=0.5,
random_state=42)
# + hidden=true
spec_train[:5]
# + hidden=true
ss_f = StandardScaler()
ss_f.fit(dims_train)
dims_train_sc = ss_f.transform(dims_train)
dims_test_sc = ss_f.transform(dims_test)
# + hidden=true
logreg_f = LogisticRegression(multi_class='multinomial',
C=0.01, random_state=42)
logreg_f.fit(dims_train_sc, spec_train)
# + hidden=true
plot_confusion_matrix(estimator=logreg_f,
X=dims_test_sc,
y_true=spec_test,
display_labels=[
'setosa',
'versicolor',
'virginica'
]);
# + hidden=true
print(classification_report(spec_test,
logreg_f.predict(dims_test_sc)))
| Phase_3/ds-classification_metrics-main/classification_metrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Mixture Density Networks
#
# Mixture density networks (MDN) (Bishop, 1994) are a class
# of models obtained by combining a conventional neural network with a
# mixture density model.
#
# We demonstrate with an example in Edward. A webpage version is available at
# http://edwardlib.org/tutorials/mixture-density-network.
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from edward.models import Categorical, Mixture, Normal
from tensorflow.contrib import slim
from scipy import stats
from sklearn.model_selection import train_test_split
# +
def plot_normal_mix(pis, mus, sigmas, ax, label='', comp=True):
"""Plots the mixture of Normal models to axis=ax comp=True plots all
components of mixture model
"""
x = np.linspace(-10.5, 10.5, 250)
final = np.zeros_like(x)
for i, (weight_mix, mu_mix, sigma_mix) in enumerate(zip(pis, mus, sigmas)):
temp = stats.norm.pdf(x, mu_mix, sigma_mix) * weight_mix
final = final + temp
if comp:
ax.plot(x, temp, label='Normal ' + str(i))
ax.plot(x, final, label='Mixture of Normals ' + label)
ax.legend(fontsize=13)
def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount):
"""Draws samples from mixture model.
Returns 2 d array with input X and sample from prediction of mixture model.
"""
samples = np.zeros((amount, 2))
n_mix = len(pred_weights[0])
to_choose_from = np.arange(n_mix)
for j, (weights, means, std_devs) in enumerate(
zip(pred_weights, pred_means, pred_std)):
index = np.random.choice(to_choose_from, p=weights)
samples[j, 1] = np.random.normal(means[index], std_devs[index], size=1)
samples[j, 0] = x[j]
if j == amount - 1:
break
return samples
# -
# ## Data
#
# We use the same toy data from
# [<NAME>'s blog post](http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/), where he explains MDNs. It is an inverse problem where
# for every input $x_n$ there are multiple outputs $y_n$.
# +
def build_toy_dataset(N):
y_data = np.random.uniform(-10.5, 10.5, N)
r_data = np.random.normal(size=N) # random noise
x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0
x_data = x_data.reshape((N, 1))
return train_test_split(x_data, y_data, random_state=42)
ed.set_seed(42)
N = 5000 # number of data points
D = 1 # number of features
K = 20 # number of mixture components
X_train, X_test, y_train, y_test = build_toy_dataset(N)
print("Size of features in training data: {}".format(X_train.shape))
print("Size of output in training data: {}".format(y_train.shape))
print("Size of features in test data: {}".format(X_test.shape))
print("Size of output in test data: {}".format(y_test.shape))
sns.regplot(X_train, y_train, fit_reg=False)
plt.show()
# -
# We define TensorFlow placeholders, which will be used to manually feed batches of data during inference. This is [one of many ways](http://edwardlib.org/api/data) to train models with data in Edward.
X_ph = tf.placeholder(tf.float32, [None, D])
y_ph = tf.placeholder(tf.float32, [None])
# ## Model
#
# We use a mixture of 20 normal distributions parameterized by a
# feedforward network. That is, the membership probabilities and
# per-component mean and standard deviation are given by the output of a
# feedforward network.
#
# We leverage TensorFlow Slim to construct neural networks. We specify
# a three-layer network with 15 hidden units for each hidden layer.
# +
def neural_network(X):
"""loc, scale, logits = NN(x; theta)"""
# 2 hidden layers with 15 hidden units
hidden1 = slim.fully_connected(X, 15)
hidden2 = slim.fully_connected(hidden1, 15)
locs = slim.fully_connected(hidden2, K, activation_fn=None)
scales = slim.fully_connected(hidden2, K, activation_fn=tf.exp)
logits = slim.fully_connected(hidden2, K, activation_fn=None)
return locs, scales, logits
locs, scales, logits = neural_network(X_ph)
cat = Categorical(logits=logits)
components = [Normal(loc=loc, scale=scale) for loc, scale
in zip(tf.unstack(tf.transpose(locs)),
tf.unstack(tf.transpose(scales)))]
y = Mixture(cat=cat, components=components, value=tf.zeros_like(y_ph))
# Note: A bug exists in Mixture which prevents samples from it to have
# a shape of [None]. For now fix it using the value argument, as
# sampling is not necessary for MAP estimation anyways.
# -
# Note that we use the `Mixture` random variable. It collapses
# out the membership assignments for each data point and makes the model
# differentiable with respect to all its parameters. It takes a
# `Categorical` random variable as input—denoting the probability for each
# cluster assignment—as well as `components`, which is a list of
# individual distributions to mix over.
#
# For more background on MDNs, take a look at
# [<NAME>'s blog post](http://cbonnett.github.io/MDN.html) or at Bishop (1994).
# ## Inference
#
# We use MAP estimation, passing in the model and data set.
# See this extended tutorial about
# [MAP estimation in Edward](http://edwardlib.org/tutorials/map)
# There are no latent variables to infer. Thus inference is concerned
# with only training model parameters, which are baked into how we
# specify the neural networks.
inference = ed.MAP(data={y: y_ph})
inference.initialize(var_list=tf.trainable_variables())
# Here, we will manually control the inference and how data is passed
# into it at each step.
# Initialize the algorithm and the TensorFlow variables.
# +
inference.initialize(var_list=tf.trainable_variables())
sess = ed.get_session()
tf.global_variables_initializer().run()
# -
# Now we train the MDN by calling `inference.update()`, passing
# in the data. The quantity `inference.loss` is the
# loss function (negative log-likelihood) at that step of inference. We
# also report the loss function on test data by calling
# `inference.loss` and where we feed test data to the TensorFlow
# placeholders instead of training data.
# We keep track of the losses under `train_loss` and `test_loss`.
n_epoch = 1000
train_loss = np.zeros(n_epoch)
test_loss = np.zeros(n_epoch)
for i in range(n_epoch):
info_dict = inference.update(feed_dict={X_ph: X_train, y_ph: y_train})
train_loss[i] = info_dict['loss']
test_loss[i] = sess.run(inference.loss,
feed_dict={X_ph: X_test, y_ph: y_test})
inference.print_progress(info_dict)
# Note a common failure mode when training MDNs is that an individual
# mixture distribution collapses to a point. This forces the standard
# deviation of the normal to be close to 0 and produces NaN values.
# We can prevent this by thresholding the standard deviation if desired.
#
# After training for a number of iterations, we get out the predictions
# we are interested in from the model: the predicted mixture weights,
# cluster means, and cluster standard deviations.
#
# To do this, we fetch their values from session, feeding test data
# `X_test` to the placeholder `X_ph`.
pred_weights, pred_means, pred_std = \
sess.run([tf.nn.softmax(logits), locs, scales], feed_dict={X_ph: X_test})
# Let's plot the log-likelihood of the training and test data as
# functions of the training epoch. The quantity `inference.loss`
# is the total log-likelihood, not the loss per data point. Below we
# plot the per-data point log-likelihood by dividing by the size of the
# train and test data respectively.
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(16, 3.5))
plt.plot(np.arange(n_epoch), -test_loss / len(X_test), label='Test')
plt.plot(np.arange(n_epoch), -train_loss / len(X_train), label='Train')
plt.legend(fontsize=20)
plt.xlabel('Epoch', fontsize=15)
plt.ylabel('Log-likelihood', fontsize=15)
plt.show()
# We see that it converges after roughly 400 iterations.
# ## Criticism
#
# Let's look at how individual examples perform. Note that as this is an
# inverse problem we can't get the answer correct, but we can hope that
# the truth lies in area where the model has high probability.
#
# In this plot the truth is the vertical grey line while the blue line
# is the prediction of the mixture density network. As you can see, we
# didn't do too bad.
# +
obj = [0, 4, 6]
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(16, 6))
plot_normal_mix(pred_weights[obj][0], pred_means[obj][0],
pred_std[obj][0], axes[0], comp=False)
axes[0].axvline(x=y_test[obj][0], color='black', alpha=0.5)
plot_normal_mix(pred_weights[obj][2], pred_means[obj][2],
pred_std[obj][2], axes[1], comp=False)
axes[1].axvline(x=y_test[obj][2], color='black', alpha=0.5)
plot_normal_mix(pred_weights[obj][1], pred_means[obj][1],
pred_std[obj][1], axes[2], comp=False)
axes[2].axvline(x=y_test[obj][1], color='black', alpha=0.5)
plt.show()
# -
# We can check the ensemble by drawing samples of the prediction and
# plotting the density of those. The MDN has learned what we'd like it
# to learn.
a = sample_from_mixture(X_test, pred_weights, pred_means,
pred_std, amount=len(X_test))
sns.jointplot(a[:, 0], a[:, 1], kind="hex", color="#4CB391",
ylim=(-10, 10), xlim=(-14, 14))
plt.show()
# ## Acknowledgments
#
# We thank <NAME> for writing the initial version
# of this tutorial. More generally, we thank Chris for pushing forward
# momentum to have Edward tutorials be accessible and easy-to-learn.
| notebooks/mixture_density_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Scikit-Learn (sklearn)
#
# This notebook demonstrates some of the most useful functions of the beautiful Scikit-Learn library.
#
# What we're going to cover:
# Let's listify the contents
what_were_covering = [
"0. An end-to-end Scikit-Learn workflow",
"1. Getting the data ready",
"2. Choose the right estimator/algorithm for our problems",
"3. Fit the model/algorithm and use it to make predictions on our data",
"4. Evaluating a model",
"5. Improve a model",
"6. Save and load a trained model",
"7. Putting it all together!"]
what_were_covering
# Standard imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# ## 0. An end-to-end Scikit-Learn workflow
# 1. Get the data ready
import pandas as pd
heart_disease = pd.read_csv("../data/heart-disease.csv")
heart_disease
# +
# Create X (features matrix)
X = heart_disease.drop("target", axis=1)
# Create y (labels)
y = heart_disease["target"]
# +
# 2. Choose the right model and hyperparameters
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
# We'll keep the default hyperparameters
clf.get_params()
# +
# 3. Fit the model to the training data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# -
clf.fit(X_train, y_train);
X_train
# make a prediction
y_label = clf.predict(np.array([0, 2, 3, 4]))
y_preds = clf.predict(X_test)
y_preds
y_test
# 4. Evaluate the model on the training data and test data
clf.score(X_train, y_train)
clf.score(X_test, y_test)
# +
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print(classification_report(y_test, y_preds))
# -
confusion_matrix(y_test, y_preds)
accuracy_score(y_test, y_preds)
# 5. Improve a model
# Try different amount of n_estimators
np.random.seed(42)
for i in range(10, 100, 10):
print(f"Trying model with {i} estimators...")
clf = RandomForestClassifier(n_estimators=i).fit(X_train, y_train)
print(f"Model accuracy on test set: {clf.score(X_test, y_test) * 100:.2f}%")
print("")
# +
# 6. Save a model and load it
import pickle
pickle.dump(clf, open("random_forst_model_1.pkl", "wb"))
# -
loaded_model = pickle.load(open("random_forst_model_1.pkl", "rb"))
loaded_model.score(X_test, y_test)
# ## 1. Getting our data ready to be used with machine learning
#
# Three main things we have to do:
# 1. Split the data into features and labels (usually `X` & `y`)
# 2. Filling (also called imputing) or disregarding missing values
# 3. Converting non-numerical values to numerical values (also called feature encoding)
heart_disease.head()
X = heart_disease.drop("target", axis=1)
X.head()
y = heart_disease["target"]
y.head()
# Split the data into training and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.3)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
X.shape[0] * 0.8
242 + 61
len(heart_disease)
# ### 1.1 Make sure it's all numerical
car_sales = pd.read_csv("../data/car-sales-extended.csv")
car_sales.head()
car_sales["Doors"].value_counts()
len(car_sales)
car_sales.dtypes
# +
# Split into X/y
X = car_sales.drop("Price", axis=1)
y = car_sales["Price"]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2)
# +
# Build machine learning model
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X_train, y_train)
model.score(X_test, y_test)
# -
X.head()
# +
# Turn the categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X = transformer.fit_transform(X)
transformed_X
# -
X.head()
pd.DataFrame(transformed_X)
# Another way to do it with pd.dummies...
dummies = pd.get_dummies(car_sales[["Make", "Colour", "Doors"]])
dummies
# +
# Let's refit the model
np.random.seed(42)
X_train, X_test, y_train, y_test = train_test_split(transformed_X,
y,
test_size=0.2)
model.fit(X_train, y_train)
# -
X.head()
model.score(X_test, y_test)
# ### 1.2 What if there were missing values?
#
# 1. Fill them with some value (also known as imputation).
# 2. Remove the samples with missing data altogether.
# Import car sales missing data
car_sales_missing = pd.read_csv("../data/car-sales-extended-missing-data.csv")
car_sales_missing.head()
car_sales_missing.isna().sum()
# Create X & y
X = car_sales_missing.drop("Price", axis=1)
y = car_sales_missing["Price"]
# +
# Let's try and convert our data to numbers
# Turn the categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X = transformer.fit_transform(X)
transformed_X
# -
car_sales_missing
car_sales_missing["Doors"].value_counts()
# #### Option 1: Fill missing data with Pandas
# +
# Fill the "Make" column
car_sales_missing["Make"].fillna("missing", inplace=True)
# Fill the "Colour" column
car_sales_missing["Colour"].fillna("missing", inplace=True)
# Fill the "Odometer (KM)" column
car_sales_missing["Odometer (KM)"].fillna(car_sales_missing["Odometer (KM)"].mean(), inplace=True)
# Fill the "Doors" column
car_sales_missing["Doors"].fillna(4, inplace=True)
# -
# Check our dataframe again
car_sales_missing.isna().sum()
# Remove rows with missing Price value
car_sales_missing.dropna(inplace=True)
car_sales_missing.isna().sum()
len(car_sales_missing)
X = car_sales_missing.drop("Price", axis=1)
y = car_sales_missing["Price"]
# +
# Let's try and convert our data to numbers
# Turn the categories into numbers
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
transformed_X = transformer.fit_transform(car_sales_missing)
transformed_X
# -
# ### Option 2: Filling missing data and transforming categorical data with Scikit-Learn
#
# **Note:** This section is different to the video. The video shows filling and transforming the entire dataset (`X`) and although the techniques are correct, it's best to fill and transform training and test sets separately (as shown in the code below).
#
# The main takeaways:
# - Split your data first (into train/test)
# - Fill/transform the training set and test sets separately
#
# Thank you Robert [for pointing this out](https://www.udemy.com/course/complete-machine-learning-and-data-science-zero-to-mastery/learn/#questions/9506426).
car_sales_missing = pd.read_csv("../data/car-sales-extended-missing-data.csv")
car_sales_missing.head()
car_sales_missing.isna().sum()
# Drop the rows with no labels
car_sales_missing.dropna(subset=["Price"], inplace=True)
car_sales_missing.isna().sum()
# +
# Split into X & y
X = car_sales_missing.drop("Price", axis=1)
y = car_sales_missing["Price"]
# Split data into train and test
np.random.seed(42)
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2)
# -
# Check missing values
X.isna().sum()
# +
# Fill missing values with Scikit-Learn
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
# Fill categorical values with 'missing' & numerical values with mean
cat_imputer = SimpleImputer(strategy="constant", fill_value="missing")
door_imputer = SimpleImputer(strategy="constant", fill_value=4)
num_imputer = SimpleImputer(strategy="mean")
# Define columns
cat_features = ["Make", "Colour"]
door_feature = ["Doors"]
num_features = ["Odometer (KM)"]
# Create an imputer (something that fills missing data)
imputer = ColumnTransformer([
("cat_imputer", cat_imputer, cat_features),
("door_imputer", door_imputer, door_feature),
("num_imputer", num_imputer, num_features)
])
# Fill train and test values separately
filled_X_train = imputer.fit_transform(X_train)
filled_X_test = imputer.transform(X_test)
# Check filled X_train
filled_X_train
# +
# Get our transformed data array's back into DataFrame's
car_sales_filled_train = pd.DataFrame(filled_X_train,
columns=["Make", "Colour", "Doors", "Odometer (KM)"])
car_sales_filled_test = pd.DataFrame(filled_X_test,
columns=["Make", "Colour", "Doors", "Odometer (KM)"])
# Check missing data in training set
car_sales_filled_train.isna().sum()
# -
# Check to see the original... still missing values
car_sales_missing.isna().sum()
# +
# Now let's one hot encode the features with the same code as before
categorical_features = ["Make", "Colour", "Doors"]
one_hot = OneHotEncoder()
transformer = ColumnTransformer([("one_hot",
one_hot,
categorical_features)],
remainder="passthrough")
# Fill train and test values separately
transformed_X_train = transformer.fit_transform(car_sales_filled_train)
transformed_X_test = transformer.transform(car_sales_filled_test)
# Check transformed and filled X_train
transformed_X_train.toarray()
# +
# Now we've transformed X, let's see if we can fit a model
np.random.seed(42)
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
# Make sure to use transformed (filled and one-hot encoded X data)
model.fit(transformed_X_train, y_train)
model.score(transformed_X_test, y_test)
# -
# Check length of transformed data (filled and one-hot encoded)
# vs. length of original data
len(transformed_X_train.toarray())+len(transformed_X_test.toarray()), len(car_sales)
# **Note:** The 50 less values in the transformed data is because we dropped the rows (50 total) with missing values in the Price column.
# ## 2. Choosing the right estimator/algorithm for our problem
#
# Scikit-Learn uses estimator as another term for machine learning model or algorithm.
#
# * Classification - predicting whether a sample is one thing or another
# * Regression - predicting a number
#
# Step 1 - Check the Scikit-Learn machine learning map... https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html
# ### 2.1 Picking a machine learning model for a regression problem
# Import Boston housing dataset
from sklearn.datasets import load_boston
boston = load_boston()
boston;
boston_df = pd.DataFrame(boston["data"], columns=boston["feature_names"])
boston_df["target"] = pd.Series(boston["target"])
boston_df.head()
# How many samples?
len(boston_df)
# +
# Let's try the Ridge Regression model
from sklearn.linear_model import Ridge
# Setup random seed
np.random.seed(42)
# Create the data
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate Ridge model
model = Ridge()
model.fit(X_train, y_train)
# Check the score of the Ridge model on test data
model.score(X_test, y_test)
# -
# How do we improve this score?
#
# What if Ridge wasn't working?
#
# Let's refer back to the map... https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html
# +
# Let's try the Random Forst Regressor
from sklearn.ensemble import RandomForestRegressor
# Setup random seed
np.random.seed(42)
# Create the data
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
# Split the data
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instatiate Random Forest Regressor
rf = RandomForestRegressor(n_estimators=100)
rf.fit(X_train, y_train)
# Evaluate the Random Forest Regressor
rf.score(X_test, y_test)
# -
# Check the Ridge model again
model.score(X_test, y_test)
# ### 2.2 Choosing an estimator for a classification problem
#
# Let's go to the map... https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html
heart_disease = pd.read_csv("data/heart-disease.csv")
heart_disease.head()
len(heart_disease)
# Consulting the map and it says to try `LinearSVC`.
# +
# Import the LinearSVC estimator class
from sklearn.svm import LinearSVC
# Setup random seed
np.random.seed(42)
# Make the data
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate LinearSVC
clf = LinearSVC(max_iter=10000)
clf.fit(X_train, y_train)
# Evaluate the LinearSVC
clf.score(X_test, y_test)
# -
heart_disease["target"].value_counts()
# +
# Import the RandomForestClassifier estimator class
from sklearn.ensemble import RandomForestClassifier
# Setup random seed
np.random.seed(42)
# Make the data
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate Random Forest Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
# Evaluate the Random Forest Classifier
clf.score(X_test, y_test)
# -
# Tidbit:
#
# 1. If you have structured data, used ensemble methods
# 2. If you have unstructured data, use deep learning or transfer learning
heart_disease
# ## 3. Fit the model/algorithm on our data and use it to make predictions
#
# ### 3.1 Fitting the model to the data
#
# Different names for:
# * `X` = features, features variables, data
# * `y` = labels, targets, target variables
# +
# Import the RandomForestClassifier estimator class
from sklearn.ensemble import RandomForestClassifier
# Setup random seed
np.random.seed(42)
# Make the data
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate Random Forest Classifier
clf = RandomForestClassifier(n_estimators=100)
# Fit the model to the data (training the machine learning model)
clf.fit(X_train, y_train)
# Evaluate the Random Forest Classifier (use the patterns the model has learned)
clf.score(X_test, y_test)
# -
X.head()
y.tail()
# ### Random Forest model deep dive
#
# These resources will help you understand what's happening inside the Random Forest models we've been using.
#
# * [Random Forest Wikipedia](https://en.wikipedia.org/wiki/Random_forest)
# * [Random Forest Wikipedia (simple version)](https://simple.wikipedia.org/wiki/Random_forest)
# * [Random Forests in Python](http://blog.yhat.com/posts/random-forests-in-python.html) by yhat
# * [An Implementation and Explanation of the Random Forest in Python](https://towardsdatascience.com/an-implementation-and-explanation-of-the-random-forest-in-python-77bf308a9b76) by <NAME>
# ### 3.2 Make predictions using a machine learning model
#
# 2 ways to make predictions:
# 1. `predict()`
# 2. `predict_proba()`
# Use a trained model to make predictions
clf.predict(np.array([1, 7, 8, 3, 4])) # this doesn't work...
X_test.head()
clf.predict(X_test)
np.array(y_test)
# Compare predictions to truth labels to evaluate the model
y_preds = clf.predict(X_test)
np.mean(y_preds == y_test)
clf.score(X_test, y_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_preds)
# Make predictions with `predict_proba()` - use this if someone asks you "what's the probability your model is assigning to each prediction?"
# predict_proba() returns probabilities of a classification label
clf.predict_proba(X_test[:5])
# Let's predict() on the same data...
clf.predict(X_test[:5])
X_test[:5]
heart_disease["target"].value_counts()
# `predict()` can also be used for regression models.
boston_df.head()
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
# Create the data
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
# Split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate and fit model
model = RandomForestRegressor(n_estimators=100).fit(X_train, y_train)
# Make predictions
y_preds = model.predict(X_test)
# -
y_preds[:10]
np.array(y_test[:10])
# Compare the predictions to the truth
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, y_preds)
# ## 4. Evaluating a machine learning model
#
# Three ways to evaluate Scikit-Learn models/esitmators:
# 1. Estimator `score` method
# 2. The `scoring` parameter
# 3. Problem-specific metric functions.
#
# ### 4.1 Evaluating a model with the `score` method
# +
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
# -
clf.score(X_train, y_train)
clf.score(X_test, y_test)
# Let's do the same but for regression...
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
# Create the data
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
# Split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate and fit model
model = RandomForestRegressor(n_estimators=100).fit(X_train, y_train)
# -
model.score(X_test, y_test)
# ### 4.2 Evaluating a model using the `scoring` parameter
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train);
# -
clf.score(X_test, y_test)
cross_val_score(clf, X, y, cv=5)
cross_val_score(clf, X, y, cv=10)
# +
np.random.seed(42)
# Single training and test split score
clf_single_score = clf.score(X_test, y_test)
# Take the mean of 5-fold cross-validation score
clf_cross_val_score = np.mean(cross_val_score(clf, X, y, cv=5))
# Compare the two
clf_single_score, clf_cross_val_score
# -
# Default scoring parameter of classifier = mean accuracy
clf.score()
# Scoring parameter set to None by default
cross_val_score(clf, X, y, cv=5, scoring=None)
# ### 4.2.1 Classification model evaluation metrics
#
# 1. Accuracy
# 2. Area under ROC curve
# 3. Confusion matrix
# 4. Classification report
#
# **Accuracy**
heart_disease.head()
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
clf = RandomForestClassifier(n_estimators=100)
cross_val_score = cross_val_score(clf, X, y, cv=5)
# -
np.mean(cross_val_score)
print(f"Heart Disease Classifier Cross-Validated Accuracy: {np.mean(cross_val_score) *100:.2f}%")
# **Area under the receiver operating characteristic curve (AUC/ROC)**
#
# * Area under curve (AUC)
# * ROC curve
#
# ROC curves are a comparison of a model's true postive rate (tpr) versus a models false positive rate (fpr).
#
# * True positive = model predicts 1 when truth is 1
# * False positive = model predicts 1 when truth is 0
# * True negative = model predicts 0 when truth is 0
# * False negative = model predicts 0 when truth is 1
#
# Create X_test... etc
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# +
from sklearn.metrics import roc_curve
# Fit the classifier
clf.fit(X_train, y_train)
# Make predictions with probabilities
y_probs = clf.predict_proba(X_test)
y_probs[:10], len(y_probs)
# -
y_probs_positive = y_probs[:, 1]
y_probs_positive[:10]
# +
# Caculate fpr, tpr and thresholds
fpr, tpr, thresholds = roc_curve(y_test, y_probs_positive)
# Check the false positive rates
fpr
# +
# Create a function for plotting ROC curves
import matplotlib.pyplot as plt
def plot_roc_curve(fpr, tpr):
"""
Plots a ROC curve given the false positive rate (fpr)
and true positive rate (tpr) of a model.
"""
# Plot roc curve
plt.plot(fpr, tpr, color="orange", label="ROC")
# Plot line with no predictive power (baseline)
#plt.plot([0, 1], [0, 1], color="darkblue", linestyle="--", label="Guessing")
# Customize the plot
plt.xlabel("False positive rate (fpr)")
plt.ylabel("True positive rate (tpr)")
plt.title("Receiver Operating Characteristic (ROC) Curve")
plt.legend()
plt.show()
plot_roc_curve(fpr, tpr)
# +
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test, y_probs_positive)
# -
# Plot perfect ROC curve and AUC score
fpr, tpr, thresholds = roc_curve(y_test, y_test)
plot_roc_curve(fpr, tpr)
# Perfect AUC score
roc_auc_score(y_test, y_test)
# **Confusion Matrix**
#
# A confusion matrix is a quick way to compare the labels a model predicts and the actual labels it was supposed to predict.
#
# In essence, giving you an idea of where the model is getting confused.
# +
from sklearn.metrics import confusion_matrix
y_preds = clf.predict(X_test)
confusion_matrix(y_test, y_preds)
# -
# Visualize confusion matrix with pd.crosstab()
pd.crosstab(y_test,
y_preds,
rownames=["Actual Labels"],
colnames=["Predicted Labels"])
22 + 7 + 8 + 24
len(X_test)
# +
# # How install a conda package into the current envrionment from a Jupyter Notebook
# import sys
# # !conda install --yes --prefix {sys.prefix} seaborn
# +
# Make our confusion matrix more visual with Seaborn's heatmap()
import seaborn as sns
# Set the font scale
sns.set(font_scale=1.5)
# Create a confusion matrix
conf_mat = confusion_matrix(y_test, y_preds)
# Plot it using Seaborn
sns.heatmap(conf_mat);
# -
# **Note:** In the original notebook, the function below had the `"True label"` as the x-axis label and the `"Predicted label"` as the y-axis label. But due to the way [`confusion_matrix()`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html) outputs values, these should be swapped around. The code below has been corrected.
# +
def plot_conf_mat(conf_mat):
"""
Plots a confusion matrix using Seaborn's heatmap().
"""
fig, ax = plt.subplots(figsize=(3,3))
ax = sns.heatmap(conf_mat,
annot=True, # Annotate the boxes with conf_mat info
cbar=False)
plt.xlabel("Predicted label")
plt.ylabel("True label")
# Fix the broken annotations (this happened in Matplotlib 3.1.1)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top-0.5);
plot_conf_mat(conf_mat)
# +
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(clf, X, y)
# -
# **Classification Report**
# +
from sklearn.metrics import classification_report
print(classification_report(y_test, y_preds))
# +
# Where precision and recall become valuable
disease_true = np.zeros(10000)
disease_true[0] = 1 # only one positive case
disease_preds = np.zeros(10000) # model predicts every case as 0
pd.DataFrame(classification_report(disease_true,
disease_preds,
output_dict=True))
# -
# To summarize classification metrics:
#
# * **Accuracy** is a good measure to start with if all classes are balanced (e.g. same amount of samples which are labelled with 0 or 1).
# * **Precision** and **recall** become more important when classes are imbalanced.
# * If false positive predictions are worse than false negatives, aim for higher precision.
# * If false negative predictions are worse than false positives, aim for higher recall.
# * **F1-score** is a combination of precision and recall.
# ### 4.2.2 Regression model evaluation metrics
#
# Model evaluation metrics documentation - https://scikit-learn.org/stable/modules/model_evaluation.html
#
# 1. R^2 (pronounced r-squared) or coefficient of determination.
# 2. Mean absolute error (MAE)
# 3. Mean squared error (MSE)
#
# **R^2**
#
# What R-squared does: Compares your models predictions to the mean of the targets. Values can range from negative infinity (a very poor model) to 1. For example, if all your model does is predict the mean of the targets, it's R^2 value would be 0. And if your model perfectly predicts a range of numbers it's R^2 value would be 1.
# +
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = RandomForestRegressor(n_estimators=100)
model.fit(X_train, y_train);
# -
model.score(X_test, y_test)
# +
from sklearn.metrics import r2_score
# Fill an array with y_test mean
y_test_mean = np.full(len(y_test), y_test.mean())
# -
y_test.mean()
# Model only predicting the mean gets an R^2 score of 0
r2_score(y_test, y_test_mean)
# Model predicting perfectly the correct values gets an R^2 score of 1
r2_score(y_test, y_test)
# **Mean absolue error (MAE)**
#
# MAE is the average of the aboslute differences between predictions and actual values. It gives you an idea of how wrong your models predictions are.
# +
# Mean absolute error
from sklearn.metrics import mean_absolute_error
y_preds = model.predict(X_test)
mae = mean_absolute_error(y_test, y_preds)
mae
# -
df = pd.DataFrame(data={"actual values": y_test,
"predicted values": y_preds})
df["differences"] = df["predicted values"] - df["actual values"]
df
# **Mean squared error (MSE)**
# +
# Mean squared error
from sklearn.metrics import mean_squared_error
y_preds = model.predict(X_test)
mse = mean_squared_error(y_test, y_preds)
mse
# -
# Calculate MSE by hand
squared = np.square(df["differences"])
squared.mean()
# ### 4.2.3 Finally using the `scoring` parameter
#
#
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
clf = RandomForestClassifier(n_estimators=100)
# -
np.random.seed(42)
cv_acc = cross_val_score(clf, X, y, cv=5, scoring=None)
cv_acc
# Cross-validated accuracy
print(f'The cross-validated accuracy is: {np.mean(cv_acc)*100:.2f}%')
np.random.seed(42)
cv_acc = cross_val_score(clf, X, y, cv=5, scoring="accuracy")
print(f'The cross-validated accuracy is: {np.mean(cv_acc)*100:.2f}%')
# Precision
cv_precision = cross_val_score(clf, X, y, cv=5, scoring="precision")
np.mean(cv_precision)
# Recall
cv_recall = cross_val_score(clf, X, y, cv=5, scoring="recall")
np.mean(cv_recall)
cv_f1 = cross_val_score(clf, X, y, cv=5, scoring="f1")
np.mean(cv_f1)
# How about our regression model?
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
np.random.seed(42)
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
model = RandomForestRegressor(n_estimators=100)
# -
np.random.seed(42)
cv_r2 = cross_val_score(model, X, y, cv=5, scoring=None)
np.mean(cv_r2)
np.random.seed(42)
cv_r2 = cross_val_score(model, X, y, cv=5, scoring="r2")
cv_r2
# Mean absolute error
cv_mae = cross_val_score(model, X, y, cv=5, scoring="neg_mean_absolute_error")
cv_mae
# Mean squared error
cv_mse = cross_val_score(model, X, y, cv=5, scoring="neg_mean_squared_error")
np.mean(cv_mse)
# ### 4.3 Using different evalution metrics as Scikit-Learn functions
#
# **Classification evaluation functions**
# +
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
np.random.seed(42)
X = heart_disease.drop("target", axis=1)
y = heart_disease["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
# Make some predictions
y_preds = clf.predict(X_test)
# Evaluate the classifier
print("Classifier metrics on the test set")
print(f"Accuracy: {accuracy_score(y_test, y_preds)*100:.2f}%")
print(f"Precision: {precision_score(y_test, y_preds)}")
print(f"Recall: {recall_score(y_test, y_preds)}")
print(f"F1: {f1_score(y_test, y_preds)}")
# -
# **Regression evaluation functions**
# +
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
np.random.seed(42)
X = boston_df.drop("target", axis=1)
y = boston_df["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = RandomForestRegressor(n_estimators=100)
model.fit(X_train, y_train)
# Make predictions using our regression model
y_preds = model.predict(X_test)
# Evaluate the regression model
print("Regression model metrics on the test set")
print(f"R^2: {r2_score(y_test, y_preds)}")
print(f"MAE: {mean_absolute_error(y_test, y_preds)}")
print(f"MSE: {mean_squared_error(y_test, y_preds)}")
# -
# ## 5. Improving a model
#
# First predictions = baseline predictions.
# First model = baseline model.
#
# From a data perspective:
# * Could we collect more data? (generally, the more data, the better)
# * Could we improve our data?
#
# From a model perspective:
# * Is there a better model we could use?
# * Could we improve the current model?
#
# Hyperparameters vs. Parameters
# * Parameters = model find these patterns in data
# * Hyperparameters = settings on a model you can adjust to (potentially) improve its ability to find patterns
#
# Three ways to adjust hyperparameters:
# 1. By hand
# 2. Randomly with RandomSearchCV
# 3. Exhaustively with GridSearchCV
# +
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimator=100)
# -
clf.get_params()
# ### 5.1 Tuning hyperparameters by hand
#
# Let's make 3 sets, training, validation and test.
clf.get_params()
# We're going to try and adjust:
#
# * `max_depth`
# * `max_features`
# * `min_samples_leaf`
# * `min_samples_split`
# * `n_estimators`
def evaluate_preds(y_true, y_preds):
"""
Performs evaluation comparison on y_true labels vs. y_pred labels
on a classification.
"""
accuracy = accuracy_score(y_true, y_preds)
precision = precision_score(y_true, y_preds)
recall = recall_score(y_true, y_preds)
f1 = f1_score(y_true, y_preds)
metric_dict = {"accuracy": round(accuracy, 2),
"precision": round(precision, 2),
"recall": round(recall, 2),
"f1": round(f1, 2)}
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}")
return metric_dict
# +
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
# Shuffle the data
heart_disease_shuffled = heart_disease.sample(frac=1)
# Split into X & y
X = heart_disease_shuffled.drop("target", axis=1)
y = heart_disease_shuffled["target"]
# Split the data into train, validation & test sets
train_split = round(0.7 * len(heart_disease_shuffled)) # 70% of data
valid_split = round(train_split + 0.15 * len(heart_disease_shuffled)) # 15% of data
X_train, y_train = X[:train_split], y[:train_split]
X_valid, y_valid = X[train_split:valid_split], y[train_split:valid_split]
X_test, y_test = X[valid_split:], y[:valid_split]
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
# Make baseline predictions
y_preds = clf.predict(X_valid)
# Evaluate the classifier on validation set
baseline_metrics = evaluate_preds(y_valid, y_preds)
baseline_metrics
# +
np.random.seed(42)
# Create a second classifier with different hyperparameters
clf_2 = RandomForestClassifier(n_estimators=100)
clf_2.fit(X_train, y_train)
# Make predictions with different hyperparameters
y_preds_2 = clf_2.predict(X_valid)
# Evalute the 2nd classsifier
clf_2_metrics = evaluate_preds(y_valid, y_preds_2)
# -
# ### 5.2 Hyperparameter tuning with RandomizedSearchCV
# +
from sklearn.model_selection import RandomizedSearchCV
grid = {"n_estimators": [10, 100, 200, 500, 1000, 1200],
"max_depth": [None, 5, 10, 20, 30],
"max_features": ["auto", "sqrt"],
"min_samples_split": [2, 4, 6],
"min_samples_leaf": [1, 2, 4]}
np.random.seed(42)
# Split into X & y
X = heart_disease_shuffled.drop("target", axis=1)
y = heart_disease_shuffled["target"]
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Instantiate RandomForestClassifier
clf = RandomForestClassifier(n_jobs=1)
# Setup RandomizedSearchCV
rs_clf = RandomizedSearchCV(estimator=clf,
param_distributions=grid,
n_iter=10, # number of models to try
cv=5,
verbose=2)
# Fit the RandomizedSearchCV version of clf
rs_clf.fit(X_train, y_train);
# -
rs_clf.best_params_
# +
# Make predictions with the best hyperparameters
rs_y_preds = rs_clf.predict(X_test)
# Evaluate the predictions
rs_metrics = evaluate_preds(y_test, rs_y_preds)
# -
# ### 5.3 Hyperparameter tuning with GridSearchCV
grid
grid_2 = {'n_estimators': [100, 200, 500],
'max_depth': [None],
'max_features': ['auto', 'sqrt'],
'min_samples_split': [6],
'min_samples_leaf': [1, 2]}
# +
from sklearn.model_selection import GridSearchCV, train_test_split
np.random.seed(42)
# Split into X & y
X = heart_disease_shuffled.drop("target", axis=1)
y = heart_disease_shuffled["target"]
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# # Instantiate RandomForestClassifier
# clf = RandomForestClassifier(n_jobs=1)
# # Setup GridSearchCV
# gs_clf = GridSearchCV(estimator=clf,
# param_grid=grid_2,
# cv=5,
# verbose=2)
# Fit the GridSearchCV version of clf
#gs_clf.fit(X_train, y_train);
# -
gs_clf.best_params_
# +
gs_y_preds = gs_clf.predict(X_test)
# evaluate the predictions
gs_metrics = evaluate_preds(y_test, gs_y_preds)
# -
# Let's compare our different models metrics.
# +
compare_metrics = pd.DataFrame({"baseline": baseline_metrics,
"clf_2": clf_2_metrics,
"random search": rs_metrics,
"grid search": gs_metrics})
compare_metrics.plot.bar(figsize=(10, 8));
# -
# ## 6. Saving and loading trained machine learning models
#
# Two ways to save and load machine learning models:
# 1. With Python's `pickle` module
# 2. With the `joblib` module
#
# **Pickle**
# +
import pickle
# Save an extisting model to file
pickle.dump(gs_clf, open("gs_random_random_forest_model_1.pkl", "wb"))
# -
# Load a saved model
loaded_pickle_model = pickle.load(open("gs_random_random_forest_model_1.pkl", "rb"))
# Make some predictions
pickle_y_preds = loaded_pickle_model.predict(X_test)
evaluate_preds(y_test, pickle_y_preds)
# **Joblib**
# +
from joblib import dump, load
# Save model to file
dump(gs_clf, filename="gs_random_forest_model_1.joblib")
# -
# Import a saved joblib model
loaded_joblib_model = load(filename="gs_random_forest_model_1.joblib")
# Make and evaluate joblib predictions
joblib_y_preds = loaded_joblib_model.predict(X_test)
evaluate_preds(y_test, joblib_y_preds)
# ## 7. Putting it all together!
data = pd.read_csv("data/car-sales-extended-missing-data.csv")
data
data.dtypes
data.isna().sum()
# Steps we want to do (all in one cell):
# 1. Fill missing data
# 2. Convert data to numbers
# 3. Build a model on the data
# +
# Getting data ready
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
# Modelling
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
# Setup random seed
import numpy as np
np.random.seed(42)
# Import data and drop rows with missing labels
data = pd.read_csv("data/car-sales-extended-missing-data.csv")
data.dropna(subset=["Price"], inplace=True)
# Define different features and transformer pipeline
categorical_features = ["Make", "Colour"]
categorical_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(handle_unknown="ignore"))])
door_feature = ["Doors"]
door_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="constant", fill_value=4))
])
numeric_features = ["Odometer (KM)"]
numeric_transformer = Pipeline(steps=[
("imputer", SimpleImputer(strategy="mean"))
])
# Setup preprocessing steps (fill missing values, then convert to numbers)
preprocessor = ColumnTransformer(
transformers=[
("cat", categorical_transformer, categorical_features),
("door", door_transformer, door_feature),
("num", numeric_transformer, numeric_features)
])
# Creating a preprocessing and modelling pipeline
model = Pipeline(steps=[("preprocessor", preprocessor),
("model", RandomForestRegressor())])
# Split data
X = data.drop("Price", axis=1)
y = data["Price"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit and score the model
model.fit(X_train, y_train)
model.score(X_test, y_test)
# -
# It's also possible to use `GridSearchCV` or `RandomizedSesrchCV` with our `Pipeline`.
# +
# Use GridSearchCV with our regression Pipeline
from sklearn.model_selection import GridSearchCV
pipe_grid = {
"preprocessor__num__imputer__strategy": ["mean", "median"],
"model__n_estimators": [100, 1000],
"model__max_depth": [None, 5],
"model__max_features": ["auto"],
"model__min_samples_split": [2, 4]
}
gs_model = GridSearchCV(model, pipe_grid, cv=5, verbose=2)
gs_model.fit(X_train, y_train)
# -
gs_model.score(X_test, y_test)
what_were_covering
| Data Science Resources/zero-to-mastery-ml-master/section-2-data-science-and-ml-tools/introduction-to-scikit-learn-video-OLD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 7장 시계열 데이터를 다뤄보자
# 데이터를 다룬다는 말 속에는 다양한 분야와 응용을 담고 있습니다. 그런데 데이터가 시간의 흐름에 따라 변화하는 추이가 있는 데이터, 대표적으로 웹 트래픽이나 주식 같은 데이터를 예측할 해야 할 때가 있습니다.<br>
# 시계열(Time Series)분석이라는 것은 통계적으로 어렵고 복잡한 작업입니다. 원 데이터의 안정성(stationary)을 판정하고, 안정한 형태로 변환하고, 예측 모델을 선정하고 검증하는 과정이 통계학의 깊은 지식을 요구합니다.<br>
# 그러나 통계 전문가에게 맡기기 전에 간단히 데이터 예측하는 가벼운 느낌이라면 이 책에서는 유용한 도구를 하나 소개할까 합니다. 페이스북에서 만든 fbprophet이라는 모듈입니다. Fbprophet을 사용하기 위해서는 몇 가지 절차를 거쳐야 합니다. 먼저 윈도우 유저들은 Visual C++ Build Tools를 설치해야 합니다. 맥유저는 이 절차가 필요 없습니다. 그리고 터미널에서 pip install pystan과 pip install prothet을 수행합니다. 이제 시계열 예측을 수행해 보도록 하겠습니다.
# ## 7-1 Numpy의 polyfit으로 회귀(regression) 분석하기
# +
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import pandas_datareader.data as web
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from fbprophet import Prophet
from datetime import datetime
# -
# 이번 장에서 사용할 모듈을 미리 import합니다. 특히 import pandas_datareader.data as wed에서 사용하는 pandas_datareader는 터미널에서 pip install pandas_datareader로 설치해야 합니다.
# +
import platform
from matplotlib import font_manager, rc
plt.rcParams['axes.unicode_minus'] = False
if platform.system() == 'Darwin':
rc('font', family='AppleGothic')
print('Mac version')
elif platform.system() == 'Windows':
path = "c:/Windows/Fonts/malgun.ttf"
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
print('Windows version')
elif platform.system() == 'Linux':
path = "/usr/share/fonts/NanumFont/NanumGothicBold.ttf"
font_name = font_manager.FontProperties(fname=path).get_name()
plt.rc('font', family=font_name)
print('Linux version')
else:
print('Unknown system... sorry~~~~')
# -
# 매 장마다 등장하는 matplotlib에서 한글 폰트 문제에 대한 코드도 실행합니다.
pinkwink_wed = pd.read_csv('data/08. PinkWink Web Traffic.csv',
encoding='utf-8', thousands=',',
names = ['date','hit'], index_col=0)
pinkwink_wed = pinkwink_wed[pinkwink_wed['hit'].notnull()]
pinkwink_wed.head()
# https://goo.gl/5wWzLL에서 08. PinkWink Web Traffic.csv라는 데이터를 받아서 data 폴더에 저장합니다.<br>
# 책 전반에 걸쳐 나타나짐만 저는 블로그 하나를 아주 오래 전부터 운용하고 있습니다. 처음에는 당시 시간 강의하던 수업 자료를 올릴 곳이 없어서 시작한 것이었는데 벌써 10년이 넘었습니다. 비록 공학도의 블로그이고 워낙 기초나 튜토리얼과 같은 쉬운 주제만 다루는 데다, 이것저것 잡다하게 다루다보니 딱히 인기는 없지만 나름대로 제가 세상과 소통하는 하나의 도구였습니다. 이번 실습 재료는 제 블로그의 웹 트래픽을 대상으로 하고 있습니다. 이 책이 도움이 되었다면 제 블로그에 오셔서 인사말 한 마니 부탁드립니다.
pinkwink_wed['hit'].plot(figsize=(12,4), grid=True);
# 단순하게 2016년 7월1일부터 2017년 6월16일까지 유입량을 그려봅니다. 어떤 주기성이 있을 것 같은데 시간 순으로 되어 있는 데이터를 그냥 봐서는 정확하게 잘 모르겠습니다. 물론 유심히 관찰하면 2016년 11월28일부터 2017년4월27일 정도로 약 6개월의 주기성 정도는 육안으로 확인되기도 합니다. 그래도 좀 더 정확한 주기성이 있다면 확인하고 싶습니다.
# +
time = np.arange(0,len(pinkwink_wed))
traffic = pinkwink_wed['hit'].values
fx = np.linspace(0, time[-1], 1000)
# -
# 먼저 시간축(time)을 만들고 웹 트래픽의 자료를 traffic 변수에 저장합니다. 지금 우리는 코드[4]의 결과로 보이는 그래프를 설명할 간단한 함수를 찾으려고 합니다. 그게 직선일 수도 있고 혹은 다항식으로 표현되는 곳선일 수도 있습니다. 어쨌든 현재 데이터를 간단함 도델로 표현하고 싶다는 뜻입니다. 그런 작업을 보통 회귀(regression)라고 합니다. 그래서 모델읠 1차,2차,3차, 15차 다항식으로 표현하고 그 결과를 확인하려고 합니다.
def error(f,x,y):
return np.sqrt(np.mean((f(x)-y)**2))
# 어떤 데이터를 어떤 모델로 표현하려고 하면 그 모델의 적합성을 확인하는 과정이 필요합니다. 그러기 위해서는 참 값과 비교해서 에러(error)를 계산해야 합니다. 그래서 코드[6]에서처럼 에러 함수를 정의했습니다.
# +
fp1 = np.polyfit(time, traffic, 1)
f1 = np.poly1d(fp1)
f2p = np.polyfit(time, traffic, 2)
f2 = np.poly1d(f2p)
f3p = np.polyfit(time, traffic, 3)
f3 = np.poly1d(f3p)
f15p = np.polyfit(time, traffic, 15)
f15 = np.poly1d(f15p)
print(error(f1, time, traffic))
print(error(f2, time, traffic))
print(error(f3, time, traffic))
print(error(f15, time, traffic))
# -
# 이제 책 초반 CCTV분석에서 사용했던 polyfit과 polyfit를 사용해서 함수로 표현할 수 있습니다. 그렇게 해서 1차,2차,3차,15차 함수로 표현해서 확인했더니 1,2,3차는 에러가 비슷합니다. 우선 그 결과를 그래프로 한번 확인해 보겠습니다.
# +
plt.figure(figsize=(10,6))
plt.scatter(time, traffic, s=10)
plt.plot(fx, f1(fx), lw=4, label='f1')
plt.plot(fx, f2(fx), lw=4, label='f2')
plt.plot(fx, f3(fx), lw=4, label='f3')
plt.plot(fx, f15(fx), lw=4, label='f15')
plt.grid(True, linestyle='-', color='0.75')
plt.legend(loc=2)
plt.show()
# -
# 위 결과에서 데이터를 1,2,3,15차 함수로 표현한 결과를 봅시다. 1차,2차,3차가 정의된 함수에서 에러를 확인해 보니 왜 큰 차이가 없는지 알겠습니다. 결국 2차나 3차로 표현하려면 그냥 1차로 표현하는 것이 차라리 나아 보입니다. 그렇다고 15차 함수를 사용해서 표현하는 것은 과적합(over-fitting)일 수 있습니다. 어떤 모델을 선택할지는 결국 분석하는 사람의 몫입니다. 그러나 위 그림만 가지고는 어떤 결론을 내려야 할지 잘 모르겠습니다. 우리는 그 뒷부분, 즉 앞으로의 데이터도 예측하고 싶으니까요.
# ## 7-2 Prophet 모듈을 이용한 forecast예측
# Prophet 모듈은 사용법이 아주 간단합니다. 코드[3]에서 받은 pinkwink_wed 변수에서 날짜(index)와 방문수(hit)만 따로 저장합니다.
# +
df = pd.DataFrame({'ds':pinkwink_wed.index, 'y':pinkwink_wed['hit']})
df.reset_index(inplace=True)
df['ds'] = pd.to_datetime(df['ds'], format="%y. %m. %d.")
del df['date']
m = Prophet(yearly_seasonality=True)
m.fit(df);
# -
future = m.make_future_dataframe(periods=60)
future.tail()
# 이제 이후 60일간의 데이터를 예측하고 싶다고 make_future_dataframe 명령하면 됩니다.
forecast = m.predict(future)
forecast[['ds','yhat','yhat_lower','yhat_upper']].tail()
# 그리고 예측한 데이터를 forecast 변수에 저장해둡니다.
m.plot(forecast);
# 우리가 받은 2017년 6월 말까지의 데이터 이후 약 2개월(60일)의 예측 결과가 코드[12]의 결과에 나타나 있습니다. 단순히 다항식으로 경향을 파악하는 것보다는 뭔가 나아 보입니다.
m.plot_components(forecast);
# 그리고 plot_components 명령으로 몇몇 재미난 결과를 얻을 수 있습니다.<br>
# 전체적인 경향은 직선으로 표현됩니다.<br>
# 그리고 제 블로그는 월,화,수요일 방문자가 많다는 것을 알 수 있습니다. 토요일과 일요일은 아주 낮습니다.
# 그리고 마지막으로 재미있는 것이 제 블로그는 3월부터 상승해서 7월쯤 내려가기 시작합니다. 그 중간 5월에 한 번 숨고르기를 하고 다시 9월경 상승해서 12월경 내려옵니다. 이는 대학교 개강 후 중간고사, 기말고사, 방학과 주기가 비슷합니다. 아무래도 공대에서 좋아하는 포스팅이 많아서인가 봅니다 <br>
# Prophet을 사용해서 간략하게 제 블로그를 대상으로 흥미로운 결과를 얻었습니다. 앞으로의 예측부터 주별,연간 데이터의 현황도 알 수 있게 되었습니다. 비록 빈약하긴 하지만 이러한 과정을 Seasonal 시계열 데이터 분석(Seasonal Time Series Data Anlysis)이라고 합니다.
# ## 7-3 Seasonal 시계열 분석으로 주식 데이터 분석하기
# Pandas는 구글이 제공하는 주가 정보를 받아올 수 있는 기능이 있습니다. 바로 DataReader 함수입니다. 종목 코드를 알면 kospi 주가 정보도 받아올 수 있습니다.
# ## 생략...
# ## 7-4 Growth Model
# 이번 절부터는 prophet의 튜토리얼에 나오는 예제입니다. 흐름상 도움될 듯하여 몇 줄 안 되지만 실행해보겠습니다.
df = pd.read_csv('data/08. example_wp_R.csv')
df['y'] = np.log(df['y'])
df['cap'] = 8.5
m = Prophet(growth='logistic')
m.fit(df)
# 튜토리얼이 배포하는 데이터입니다. 이데이터도 제일 마지막 줄을 삭제해야 잘 동작되기 때문에 [32]번 줄을 빼고 Github에 올렸습니다. 이 데이터는 주기성을 띠면서 점점 성장하는 모습의 데이터입니다. 그 모양새가 마치 성장(Growth)하면서 또 로그함수(logistic)의 모양과 같습니다.
future = m.make_future_dataframe(periods=1826)
future['cap'] = 8.5
fcst = m.predict(future)
m.plot(fcst);
# 이런 종류의 데이터를 예측하는 것도 가능하다는 것을 알 수 있습니다. 거기에 역시 components를 조사하도록 하겠습니다.
forecast = m.predict(future)
m.plot_components(forecast);m.plot_components(forecast);
# 전체적인 경항(trend)이 나타납니다.<br>
# 그리고 주간 분석도 나타나고 있습니다<br>
# 그리고 연간 분석의 결과도 나타납니다.<br>
# 이번 장은 모듈을 소개하고 간단히 모듈의 사용만으로 에측(forecast)이라는 어려운 과정을 손쉽게 확인했습니다. 이 책에서 가장 작은 분량이지만 쓰임새는 작지 않을 겁니다.
# 출처 : "파이썬으로 데이터 주무르기"
| chapter-7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Catalysis-hub database query
#
# This notebook performs a query on the [Catalysis-hub](https://catalysis-hub.org) database to acquire a dataset of heterogeneous catalysis reactions. The data comprises keyword features, such as the names of the reactants and products, the chemical compostion, reaction and activation energies, as well as the complete atomic structures of the reaction configurations.
# +
# Imports
import numpy as np
import requests
import json
import io
import os
import ase.io
from ase.io import read
# Define the Catalysis-hub API path and the project root directory
GRAPHQL = "http://api.catalysis-hub.org/graphql"
ROOT_DIR = os.path.join(os.getcwd(), os.pardir)
# +
# Define the keyvalues used in the query
KEY_VALUES = [
"chemicalComposition",
"surfaceComposition",
"facet",
"sites",
"coverages",
"reactants",
"products",
"Equation",
"reactionEnergy",
"activationEnergy",
"dftCode",
"dftFunctional",
"username",
"pubId",
"id",
]
# -
def query_reactions(endcursor):
"""
The function performs a batch query on the database. The database is
queried in batches of 50 reactions, and all reactions with an
activation enregy under 100 eV are selected.
Parameters:
endcursor: Cursor to indicate the batch on the query.
Returns:
data: The acquired data on each batch.
"""
# Define the query string
query_string = "{"
query_string += f'reactions(first: 50, after: "{endcursor}"'
query_string += ', activationEnergy: 100, op: "<"'
query_string += """) {
totalCount
pageInfo {
endCursor
}
edges {
node {"""
# Add the keywords into the query string
for key_value in KEY_VALUES:
query_string += str("\n" + " "*6 + key_value)
query_string += """
systems {
id
Trajdata
energy
InputFile(format: "xyz")
keyValuePairs
}
}
}
}}"""
data = requests.post(GRAPHQL, {"query": query_string})
try:
# Read the acquired data into a dictionary
data = data.json()["data"]
except Exception as e:
# Handle exceptions in a general manner
print(e)
print("Error: Something went wrong. Please check your query string.")
return data
def parse_reaction(reaction):
"""
The function parses a single reaction. All the missing keyvalues are
labeled as 'None', and the structural data is saved separately.
Parameters:
reaction: A dictionary containing the data for a single reaction.
Returns:
reaction_dict: A parsed data dictionary.
"""
reaction_dict = {}
key_value_pairs = {}
# Go through the keyvalues
for key_value in KEY_VALUES:
try:
key_value_pairs[key_value] = reaction[key_value]
except ValueError:
key_value_pairs[key_value] = "None"
if key_value_pairs["coverages"] is None:
key_value_pairs["coverages"] = "None"
if key_value_pairs["sites"] is None:
key_value_pairs["sites"] = "None"
reaction_dict["key_value_pairs"] = key_value_pairs
# Go through the structural data
structures = []
for structure in reaction["systems"]:
struct = {}
struct["energy"] = structure["energy"]
struct["InputFile"] = structure["InputFile"]
struct["keyValuePairs"] = structure["keyValuePairs"]
structures.append(struct)
reaction_dict["structures"] = structures
return reaction_dict
# +
# Run queries and save results to file
reaction_list = {}
N_fetched = 0
endcursor = ""
n = 0
totalcount = 100000
while n * 50 + 1 < totalcount:
data = query_reactions(endcursor)
for reaction in data["reactions"]["edges"]:
reaction = reaction["node"]
reaction_list[reaction["id"]] = parse_reaction(reaction)
endcursor = data["reactions"]["pageInfo"]["endCursor"]
totalcount = data["reactions"]["totalCount"]
N_fetched += totalcount
count = 50 * (n + 1)
if count >= totalcount:
count = totalcount
print(f"Fetched reactions {50*n+1}-{count}/{totalcount}")
n += 1
print("Done!")
with open (f"{ROOT_DIR}/data/reactions_cathub.json", "w") as outfile:
json.dump(reaction_list, outfile)
# -
| catalysis_hub.ipynb |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Intro to Deep Learning](https://www.kaggle.com/learn/intro-to-deep-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/dropout-and-batch-normalization).**
#
# ---
#
# # Introduction #
#
# In this exercise, you'll add dropout to the *Spotify* model from Exercise 4 and see how batch normalization can let you successfully train models on difficult datasets.
#
# Run the next cell to get started!
# +
# Setup plotting
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('animation', html='html5')
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning_intro.ex5 import *
# -
# First load the *Spotify* dataset.
# +
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.model_selection import GroupShuffleSplit
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import callbacks
spotify = pd.read_csv('../input/dl-course-data/spotify.csv')
X = spotify.copy().dropna()
y = X.pop('track_popularity')
artists = X['track_artist']
features_num = ['danceability', 'energy', 'key', 'loudness', 'mode',
'speechiness', 'acousticness', 'instrumentalness',
'liveness', 'valence', 'tempo', 'duration_ms']
features_cat = ['playlist_genre']
preprocessor = make_column_transformer(
(StandardScaler(), features_num),
(OneHotEncoder(), features_cat),
)
def group_split(X, y, group, train_size=0.75):
splitter = GroupShuffleSplit(train_size=train_size)
train, test = next(splitter.split(X, y, groups=group))
return (X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test])
X_train, X_valid, y_train, y_valid = group_split(X, y, artists)
X_train = preprocessor.fit_transform(X_train)
X_valid = preprocessor.transform(X_valid)
y_train = y_train / 100
y_valid = y_valid / 100
input_shape = [X_train.shape[1]]
print("Input shape: {}".format(input_shape))
# -
# # 1) Add Dropout to Spotify Model
#
# Here is the last model from Exercise 4. Add two dropout layers, one after the `Dense` layer with 128 units, and one after the `Dense` layer with 64 units. Set the dropout rate on both to `0.3`.
# +
# YOUR CODE HERE: Add two 30% dropout layers, one after 128 and one after 64
model = keras.Sequential([
layers.Dense(128, activation='relu', input_shape=input_shape),
layers.Dropout(0.3),
layers.Dense(64, activation='relu'),
layers.Dropout(0.3),
layers.Dense(1)
])
# Check your answer
q_1.check()
# -
# Lines below will give you a hint or solution code
q_1.hint()
q_1.solution()
# Now run this next cell to train the model see the effect of adding dropout.
model.compile(
optimizer='adam',
loss='mae',
)
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=512,
epochs=50,
verbose=0,
)
history_df = pd.DataFrame(history.history)
history_df.loc[:, ['loss', 'val_loss']].plot()
print("Minimum Validation Loss: {:0.4f}".format(history_df['val_loss'].min()))
# # 2) Evaluate Dropout
#
# Recall from Exercise 4 that this model tended to overfit the data around epoch 5. Did adding dropout seem to help prevent overfitting this time?
# View the solution (Run this cell to receive credit!)
q_2.check()
# Now, we'll switch topics to explore how batch normalization can fix problems in training.
#
# Load the *Concrete* dataset. We won't do any standardization this time. This will make the effect of batch normalization much more apparent.
# +
import pandas as pd
concrete = pd.read_csv('../input/dl-course-data/concrete.csv')
df = concrete.copy()
df_train = df.sample(frac=0.7, random_state=0)
df_valid = df.drop(df_train.index)
X_train = df_train.drop('CompressiveStrength', axis=1)
X_valid = df_valid.drop('CompressiveStrength', axis=1)
y_train = df_train['CompressiveStrength']
y_valid = df_valid['CompressiveStrength']
input_shape = [X_train.shape[1]]
# -
# Run the following cell to train the network on the unstandardized *Concrete* data.
# +
model = keras.Sequential([
layers.Dense(512, activation='relu', input_shape=input_shape),
layers.Dense(512, activation='relu'),
layers.Dense(512, activation='relu'),
layers.Dense(1),
])
model.compile(
optimizer='sgd', # SGD is more sensitive to differences of scale
loss='mae',
metrics=['mae'],
)
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=64,
epochs=100,
verbose=0,
)
history_df = pd.DataFrame(history.history)
history_df.loc[0:, ['loss', 'val_loss']].plot()
print(("Minimum Validation Loss: {:0.4f}").format(history_df['val_loss'].min()))
# -
# Did you end up with a blank graph? Trying to train this network on this dataset will usually fail. Even when it does converge (due to a lucky weight initialization), it tends to converge to a very large number.
#
# # 3) Add Batch Normalization Layers
#
# Batch normalization can help correct problems like this.
#
# Add four `BatchNormalization` layers, one before each of the dense layers. (Remember to move the `input_shape` argument to the new first layer.)
# +
# YOUR CODE HERE: Add a BatchNormalization layer before each Dense layer
model = keras.Sequential([
layers.BatchNormalization(input_shape=input_shape),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(1),
])
# Check your answer
q_3.check()
# -
# Lines below will give you a hint or solution code
q_3.hint()
q_3.solution()
# Run the next cell to see if batch normalization will let us train the model.
# +
model.compile(
optimizer='sgd',
loss='mae',
metrics=['mae'],
)
EPOCHS = 100
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=64,
epochs=EPOCHS,
verbose=0,
)
history_df = pd.DataFrame(history.history)
history_df.loc[0:, ['loss', 'val_loss']].plot()
print(("Minimum Validation Loss: {:0.4f}").format(history_df['val_loss'].min()))
# -
# # 4) Evaluate Batch Normalization
#
# Did adding batch normalization help?
# View the solution (Run this cell to receive credit!)
q_4.check()
# # Keep Going #
#
# [**Create neural networks**](https://www.kaggle.com/ryanholbrook/binary-classification) for binary classification.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/191966) to chat with other Learners.*
| Platforms/Kaggle/Courses/Intro_to_Deep_Learning/5.Dropout_and_Batch_Normalization/exercise-dropout-and-batch-normalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Traffic Signs Adversarial Detector with Kfserving
#
# 
#
# Prequisites:
#
# * Running cluster with kfserving installed and authenticated for use with `kubectl`
# * Istio with Istio Gateway exposed on a LoadBalancer
# * Knative eventing installed
# * Download the Traffic Signs model: run `make model_signs` (Requires `gsutils`)
# * Pip install the alibi-detect library.
# ## Setup Resources
# Enabled eventing on default namespace. This will activate a default Knative Broker.
# !kubectl label namespace default knative-eventing-injection=enabled
# Create a Knative service to dump events it receives. This will be the example final sink for adversarial events.
# !pygmentize message-dumper.yaml
# !kubectl apply -f message-dumper.yaml
# Create the Kfserving image classification model for Cifar10. We add in a `logger` for requests - the default destination is the namespace Knative Broker.
# !pygmentize signs.yaml
# !kubectl apply -f signs.yaml
# Create the pretrained Traffic Signs Adversarial detector. We forward replies to the message-dumper we started.
# !pygmentize signsad.yaml
# !kubectl apply -f signsad.yaml
# Create a Knative trigger to forward logging events to our Adversarial Detector.
# !pygmentize trigger.yaml
# !kubectl apply -f trigger.yaml
# Get the IP address of the Istio Ingress Gateway. This assumes you have installed istio with a LoadBalancer.
CLUSTER_IPS=!(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
CLUSTER_IP=CLUSTER_IPS[0]
print(CLUSTER_IP)
SERVICE_HOSTNAMES=!(kubectl get inferenceservice signs -o jsonpath='{.status.url}' | cut -d "/" -f 3)
SERVICE_HOSTNAME_SIGNS=SERVICE_HOSTNAMES[0]
print(SERVICE_HOSTNAME_SIGNS)
SERVICE_HOSTNAMES=!(kubectl get ksvc ad-signs -o jsonpath='{.status.url}' | cut -d "/" -f 3)
SERVICE_HOSTNAME_SIGNSAD=SERVICE_HOSTNAMES[0]
print(SERVICE_HOSTNAME_SIGNSAD)
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
from PIL import Image
import os
def load_signs(train_folder="./traffic_data/train/"):
data=[]
labels=[]
height = 30
width = 30
channels = 3
classes = 43
n_inputs = height * width*channels
for i in range(classes) :
path = train_folder+"{0}/".format(i)
Class=os.listdir(path)
for a in Class:
try:
image=cv2.imread(path+a)
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((height, width))
data.append(np.array(size_image))
labels.append(i)
except AttributeError:
print(" ")
Cells=np.array(data)
labels=np.array(labels)
#Randomize the order of the input images
s=np.arange(Cells.shape[0])
np.random.seed(43)
np.random.shuffle(s)
Cells=Cells[s]
labels=labels[s]
(X_train,X_val)=Cells[(int)(0.2*len(labels)):],Cells[:(int)(0.2*len(labels))]
(y_train,y_val)=labels[(int)(0.2*len(labels)):],labels[:(int)(0.2*len(labels))]
train, test = (X_train, y_train), (X_val, y_val)
return train, test
# -
train, test = load_signs()
# +
import matplotlib.pyplot as plt
import tensorflow as tf
tf.keras.backend.clear_session()
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
import requests
X_train, y_train = train
X_test, y_test = test
X_train = X_train.reshape(-1, 30, 30, 3).astype('float32') / 255
X_test = X_test.reshape(-1, 30, 30, 3).astype('float32') / 255
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
classes = ('0', '1', '2', '3',
'4', '5', '6', '7', '8', '9')
img_shape = (30, 30, 3)
def show(Xs):
for X in Xs:
plt.imshow(np.squeeze(X))
plt.axis('off')
plt.show()
meta_folder="./traffic_data/meta/"
def show_prediction(idxs):
for idx in idxs.tolist():
class_image=cv2.imread(meta_folder+"{}.png".format(idx))
image = Image.fromarray(class_image, 'RGB')
image = np.array(image)
show([image])
def predict(X):
formData = {
'instances': X.tolist()
}
headers = {}
headers["Host"] = SERVICE_HOSTNAME_SIGNS
res = requests.post('http://'+CLUSTER_IP+'/v1/models/signs:predict', json=formData, headers=headers)
if res.status_code == 200:
show_prediction(np.array(res.json()["predictions"]))
else:
print("Failed with ",res.status_code)
def detect(X):
formData = {
'instances': X.tolist()
}
headers = {"alibi-detect-return-instance-score":"true"}
headers["Host"] = SERVICE_HOSTNAME_SIGNSAD
res = requests.post('http://'+CLUSTER_IP+'/', json=formData, headers=headers)
if res.status_code == 200:
ad = res.json()
ad["data"]["instance_score"] = np.array(ad["data"]["instance_score"])
return ad
else:
print("Failed with ",res.status_code)
return []
# -
# ## Normal Prediction
idx = 2
X = X_train[idx:idx+1]
show(X)
predict(X)
# Show logs from message-dumper. The last cloud event should show a line like:
#
# ```JSON
# "{\"data\": {\"feature_score\": null, \"instance_score\": null, \"is_adversarial\": [0]}, \"meta\": {\"name\": \"AdversarialVAE\", \"detector_type\": \"offline\", \"data_type\": null}}"
# ```
#
# This shows the last event was not an adversarial attack.
# !kubectl logs $(kubectl get pod -l serving.knative.dev/configuration=message-dumper -o jsonpath='{.items[0].metadata.name}') user-container
# ## Generate adversarial instances
#
# The `cleverhans` adversarial attack methods assume that the model outputs logits, so we will create a modified model by simply removing the softmax output layer:
from alibi_detect.utils.saving import load_tf_model
filepath = './model_signs/'
model = load_tf_model(filepath)
model_logits = Model(inputs=model.inputs, outputs=model.layers[-2].output)
# Select observations for which we will create adversarial instances:
ids = np.arange(2,7)
X_to_adv = X_test[ids]
print(X_to_adv.shape)
# Launch adversarial attack. Follow the [Basic Iterative Method (Kurakin et al. 2016)](https://arxiv.org/pdf/1607.02533.pdf) when `rand_init` is set to 0 or the [Madry et al. (2017)](https://arxiv.org/pdf/1706.06083.pdf) method when `rand_minmax` is larger than 0:
# Adversarial attack method. The latest release of the `cleverhans` package does
# not support TensrFlow 2 yet, so we need to install from the master branch:
# pip install git+https://github.com/tensorflow/cleverhans.git#egg=cleverhans
from cleverhans.future.tf2.attacks import projected_gradient_descent
X_adv = projected_gradient_descent(model_logits,
X_to_adv,
eps=2.,
eps_iter=1.,
nb_iter=10,
norm=2,
clip_min=X_train.min(),
clip_max=X_train.max(),
rand_init=None,
rand_minmax=.3,
targeted=False,
sanity_checks=False
).numpy()
y_pred = predict(X_to_adv)
y_pred_adv = predict(X_adv)
# We can look at the logs of the message dumper and see the last 2 cloud events which should show the results of outr batch prediction of ordinary examples and our modifed adversarial attacks:
#
# ```JSON
# "{\"data\": {\"feature_score\": null, \"instance_score\": null, \"is_adversarial\": [0, 0, 0, 0, 0]}, \"meta\": {\"name\": \"AdversarialVAE\", \"detector_type\": \"offline\", \"data_type\": null}}"
# ```
#
# and
#
# ```JSON
# "{\"data\": {\"feature_score\": null, \"instance_score\": null, \"is_adversarial\": [1, 1, 1, 1, 1]}, \"meta\": {\"name\": \"AdversarialVAE\", \"detector_type\": \"offline\", \"data_type\": null}}"
# ```
#
# This shows the first batch of 5 were not adversarial but the second 5 were:
#
# * `is_adversarial: [0, 0, 0, 0, 0]`
# * `is_adversarial: [1, 1, 1, 1, 1]`
# !kubectl logs $(kubectl get pod -l serving.knative.dev/configuration=message-dumper -o jsonpath='{.items[0].metadata.name}') user-container
# +
y_pred = np.argmax(model(X_to_adv).numpy(), axis=-1)
y_pred_adv = np.argmax(model(X_adv).numpy(), axis=-1)
n_rows = 5
n_cols = 4
figsize = (15, 20)
img_shape = (30, 30, 3)
fig5 = plt.figure(constrained_layout=False, figsize=figsize)
widths = [5, 1, 5, 1]
heights = [5, 5, 5, 5, 5]
spec5 = fig5.add_gridspec(ncols=4, nrows=5, width_ratios=widths,
height_ratios=heights)
for row in range(n_rows):
ax_0 = fig5.add_subplot(spec5[row, 0])
ax_0.imshow(X_to_adv[row].reshape(img_shape))
if row == 0:
ax_0.title.set_text('Original')
ax_0.axis('off')
ax_1 = fig5.add_subplot(spec5[row, 1])
class_image=cv2.imread(meta_folder+"{}.png".format(y_pred[row]))
image = Image.fromarray(class_image, 'RGB')
image = np.array(image)
ax_1.imshow(image)
ax_1.title.set_text('Pred original: {}'.format(y_pred[row]))
ax_1.axis('off')
ax_2 = fig5.add_subplot(spec5[row, 2])
ax_2.imshow(X_adv[row].reshape(img_shape))
if row == 0:
ax_2.title.set_text('Adversarial')
ax_2.axis('off')
ax_3 = fig5.add_subplot(spec5[row, 3])
class_image_adv=cv2.imread(meta_folder+"{}.png".format(y_pred_adv[row]))
image_adv = Image.fromarray(class_image_adv, 'RGB')
image_adv = np.array(image_adv)
ax_3.imshow(image_adv)
ax_3.title.set_text('Pred adversarial: {}'.format(y_pred_adv[row]))
ax_3.axis('off')
# -
# ## Get Adversarial Scores
#
# We call the adversarial detector directly to get instance scores.
#
from alibi_detect.utils.visualize import plot_instance_score
X = np.concatenate([X_to_adv, X_adv], axis=0)
ad_preds = detect(X)
labels = ['Normal', 'Adversarial']
target = np.array([0 if i < X_to_adv.shape[0] else 1 for i in range(X.shape[0])])
plot_instance_score(ad_preds, target, labels, 0.5)
| integrations/samples/kfserving/ad-signs/signs_adversarial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np;
import matplotlib.pyplot as plt;
import matplotlib as mpl;
import math;
mpl.rcParams['figure.dpi'] = 600;
def printArray(list):
for sub in list:
line = "[";
for el in sub:
line = line + "%7.1f " % (el);
line = line + "]";
print(line);
print();
def getMinIndex(array):
index = 0;
i = 0;
element = math.inf;
for el in array:
#print(el);
if(el < element and el > 0):
index = i;
element = el;
#print(index);
i += 1;
return index;
def divideArrays(array1, array2): #divides array1 by array2
temp = [0] * len(array1);
i = 0;
for x in array2:
if( x == 0 ):
temp[i] = math.inf;
else:
temp[i] = array1[i]/array2[i];
i += 1;
#print(temp);
return temp;
def fillSlack(list):
nIneq = len(list)-1;
nVar = len(list[0])-1;
i = 0;
while(i < nIneq):
i += 1;
list = np.insert(list, len(list[0])-1, 0, axis= 1 );
i = 0;
j = nVar;
while (i < nIneq):
list[i][(j+i)] = 1;
i += 1;
return list;
def maximize(list):
list = fillSlack(list);
list[ len(list)-1 ] *= -1;
objFun = len(list)-1;
bCol = len(list[0])-1;
while(min(list[objFun]) < 0):
#print(min(list[objFun]));
entryColumn = np.argmin(list[objFun]);
#print("Column " + str(entryColumn));
#print(list[0:objFun, bCol]);
#print(list[0:objFun, entryColumn]);
temp = divideArrays(list[0:objFun, bCol] , list[0:objFun, entryColumn]);
#print(temp);
entryRow = getMinIndex(temp);#np.argmin(temp) + 1; #1 is offset. See above
#print("Row " + str(entryRow));
print("Entry point is at: [" + str(entryRow) + ";" + str(entryColumn) + "]");
list[entryRow] = list[entryRow]/list[entryRow][entryColumn];
i = 0;
while i < len(list):
#print(inputArray[i]);
if(i != entryRow):
list[i] = list[i] - list[i][entryColumn]*list[entryRow]
i += 1;
print("Intermediate result: ")
printArray(list)
print();
return list;
def minimize(list):
list = np.transpose(list);
list = maximize(list);
return list;
def fillInMatrix():
rows = int(input("Number of inequalities: ")) + 1; #
nVar = int(input("Number of variables: "));
columns = nVar + 1; # Plus result column
print("Rows: " + str(rows) + " Columns: " + str(columns));
inputArray = np.zeros([rows, columns], dtype=np.float32);
i = 0;
while(i < rows):
j = 0;
if(i == (rows-1)):
print("Enter values for Objective function: ");
objFun = i;
else:
print("Enter values for Constraints");
while(j < (nVar+1)):
if(j != nVar):
inputArray[i][j] = float(input("Input coefficient of var №" + str(j+1) + ": "));
elif (i != (rows-1)):
inputArray[i][columns-1] = float(input("Input resulting variable (b): "));
elif (i == (rows-1)):
inputArray[i][columns-1] = 0;
j += 1;
print();
i += 1;
return inputArray;
print("Fill in initial matrix: ");
inputArray = fillInMatrix();
printArray(inputArray);
#maximize(np.copy(inputArray));
list = minimize(np.copy(inputArray));
print("Final matrix: ");
printArray(list);
| Lab2/Maximization_v3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="ur8xi4C7S06n"
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="eHLV0D7Y5jtU"
# # AI Platform (Unified) SDK: Training a custom image classification model using a training pipeline with a managed dataset input
#
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/notebooks/deepdive/custom/ucaip_customjob_image_pipeline.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/notebooks/deepdive/custom/ucaip_customjob_image_pipeline.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] id="tvgnzT1CKxrO"
# # Overview
#
#
# This tutorial demonstrates how to use the AI Platform (Unified) Python SDK to train a custom image classification model using a [training pipeline job](https://cloud.google.com/ai-platform-unified/docs/training/create-training-pipeline) with a managed dataset input.
#
# ### Dataset
#
# The dataset used for this tutorial is the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, tulip.
#
# ### Objectives
#
#
# - Creating a AI Platform (Unified) dataset.
# - Creating a training pipeline with a custom job training step and an AI Platform dataset input.
# - Starting the training pipeline job.
# - Monitoring the training pipeline job.
# - Deploy the model to a serving endpoint.
# - Make a prediction(s).
# - Undeploy the model.
#
# ### Costs
#
# This tutorial uses billable components of Google Cloud Platform (GCP):
#
# * Cloud AI Platform
# * Cloud Storage
#
# Learn about [Cloud AI Platform
# pricing](https://cloud.google.com/ml-engine/docs/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
#
# + [markdown] id="i7EUnXsZhAGF"
# ## Setting up the environment
#
# This notebook was developed and tested using [AI Platform Notebooks](https://cloud.google.com/ai-platform-notebooks) using the TensorFlow 2.4 image.
#
# ### Setting up your notebook environment
#
# This notebook has been tested with [AI Platform Notebooks](https://cloud.google.com/ai-platform/notebooks/docs) configured with the standard TensorFlow 2.4 image.
#
# It may work on other environments as long as the similar hardware and software configuration is used.
#
# #### Provisioning an instance of AI Platform Notebooks
#
# To provision an instance of AI Platform Notebooks, follow the instructions in the [AI Platform Notebooks documentation](https://cloud.google.com/ai-platform/notebooks/docs/create-new). Configure your instance with multiple GPUs and use the TensorFlow 2.4 image.
#
# #### Installing software pre-requisities
#
# In addition to standard packages pre-installed in the TensorFlow 2.4 image you need the following additional packages:
# - [AI Platform Python client library](https://cloud.google.com/ai-platform-unified/docs/start/client-libraries)
#
# Use `pip` to install the libraries. You can run `pip` from a terminal window of your AI Platform Notebooks instance or execute the following cells. Make sure to restart the notebook after installation.
# -
# %pip install -U google-cloud-aiplatform --user
# + [markdown] id="BF1j6f9HApxa"
# ### Set up your GCP project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a GCP project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the AI Platform APIs, Compute Engine APIs and Container Registry API.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,containerregistry.googleapis.com)
#
# 4. Follow the instructions in the repos' [README file](https://github.com/jarokaz/ucaip/blob/main/README.md) to provision an instance of AI Platform Notebooks and install Cloud AI Platform (Unified) SDK.
#
#
# + [markdown] id="MnPgkOPYGKBA"
# #### Set your Project ID
#
# **If you don't know your project ID**, you might be able to get your project ID using `gcloud` command by executing the second cell below.
# + id="RZwz94fXGKBA"
PROJECT_ID = "jk-mlops-dev"
# + id="zRcWSNx5GKBA"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="v5n9X7yJGKBA"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Make sure to [choose a region where Cloud
# AI Platform services are
# available](https://cloud.google.com/ml-engine/docs/tensorflow/regions). You can
# not use a Multi-Regional Storage bucket for training with AI Platform.
# + id="bgKerUMPGKBA"
REGION = 'us-central1'
# + [markdown] id="zgPO1eR3CYjk"
# ### Create a Cloud Storage bucket
#
# Set the name of your Cloud Storage bucket below. It must be unique across all
# Cloud Storage buckets.
# + id="MzGDU7TWdts_"
BUCKET_NAME = "jk-ucaip-demos"
# + [markdown] id="-EcIXiGsCePi"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="NIq7R4HZCfIc"
# ! gsutil mb -l $REGION gs://$BUCKET_NAME
# + [markdown] id="ucvCsknMCims"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="vhOb7YnwClBb"
# ! gsutil ls -al gs://$BUCKET_NAME
# + [markdown] id="XoEqT2Y4DJmf"
# ### Import libraries and define constants
# -
# %load_ext autoreload
# + [markdown] id="Y9Uo3tifg1kx"
# #### Import AI Platform (Unified) SDK
#
# Import the AI Platform (Unified) SDK into our python environment.
# + id="pRUOFELefqf1"
import base64
import json
import os
import sys
import time
import google.auth
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_io as tfio
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from datetime import datetime
# + [markdown] id="vqKm-raaGKBA"
# ### AI Platform (Unified) constants
#
# Let's now setup some constants for AI Platform (Unified):
#
# #### Endpoints
#
# - `API_ENDPOINT`: The AI Platform (Unified) API service endpoint for dataset, model, job, pipeline and endpoint services.
# - `API_PREDICT_ENDPOINT`: The AI Platform (Unified) API service endpoint for prediction.
# - `PARENT`: The AI Platform (Unified) location root path for dataset, model and endpoint resources.
# + id="py2v2Ig0GKBA"
# API Endpoint
API_ENDPOINT = "us-central1-aiplatform.googleapis.com"
API_PREDICT_ENDPOINT = "us-central1-prediction-aiplatform.googleapis.com"
# AI Platform (Unified) location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Default timeout for API calls
TIMEOUT = 60
# -
# #### Dataset schemas
#
# AI Platform supports four dataset types: tabular, text, image and video. The same dataset type can be used for multiple ML tasks. For example, the image dataset type can be used for single-label classification, multi-label classification or object detection. In this sample, you will create an image dataset for the single-label classificatin task.
#
# The dataset type and the ML task are specified by a set of pre-defined YAML based schemas provided by AI Platform.
IMAGE_SCHEMA = 'gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml'
IMPORT_SCHEMA_IMAGE_CLASSIFICATION = 'gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml'
# #### Training pipeline schemas
#
# When configuring a training pipeline you need to specify a type of a training task - a custom job or AutoML training. In addition, if your pipeline uses an AI Platform dataset as input, you have to specify a type of annotations to use for training - an AI Platform dataset can have multiple annotation types attached to the dataset.
#
# In our case, we will configure a pipeline with a custom training task and image dataset with image classification annotations.
ANNOTATION_SCHEMA = 'gs://google-cloud-aiplatform/schema/dataset/annotation/image_classification_1.0.0.yaml'
CUSTOM_TRAINING_TASK_DEFINITION = 'gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml'
# #### Training and deployment container images
#
# These constants define a base image for a custom training and a serving image.
TRAIN_BASE_IMAGE = 'gcr.io/deeplearning-platform-release/tf2-gpu.2-3:latest'
DEPLOY_IMAGE = 'us-docker.pkg.dev/cloud-aiplatform/prediction/tf2-cpu.2-3:latest'
# #### Machine configurations for training and deployment
# +
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
TRAIN_MACHINE_TYPE = 'n1-standard-4'
DEPLOY_MACHINE_TYPE = 'n1-standard-4'
# + [markdown] id="E6ppE7imft-y"
# # Tutorial
#
# ## Clients
#
# The AI Platform (Unified) SDK works as a client/server model. On your side, the python script, you will create a client that sends requests and receives responses from the server -- AI Platform.
#
# You will use several clients in this tutorial, so set them all up upfront.
#
# - Dataset Service for managed datasets.
# - Model Service for managed models.
# - Pipeline Service for training.
# - Endpoint Service for deployment.
# - Prediction Service for serving. *Note*, prediction has a different service endpoint.
#
# +
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
predict_client_options = {"api_endpoint": API_PREDICT_ENDPOINT}
dataset_client = aip.DatasetServiceClient(client_options=client_options)
model_client = aip.ModelServiceClient(client_options=client_options)
pipeline_client = aip.PipelineServiceClient(client_options=client_options)
endpoint_client = aip.EndpointServiceClient(client_options=client_options)
prediction_client = aip.PredictionServiceClient(client_options=predict_client_options)
# -
# ## Creating an AI Platform dataset
#
# Creating an AI Platform dataset is a two-step process. The first step is to create an empty dataset. During the first step you define the dataset type. The second step is to import the data to the dataset. This is when you specify the ML task supported by the imported data.
#
# ### Create an empty image dataset
#
# Both creating a dataset and importing data are long running operations in AI Platform. The long running operations use asynchronous calls. An asynchronous call does not block a caller and returns an `operation` object that can by subsequently used to monitor/control the operation by invoking methods exposed by the object:
#
#
# | Method | Description |
# | ----------- | ----------- |
# | result() | Waits for the operation to complete and returns a result object in JSON format. |
# | running() | Returns True/False on whether the operation is still running. |
# | done() | Returns True/False on whether the operation is completed. |
# | canceled() | Returns True/False on whether the operation was canceled. |
# | cancel() | Cancels the operation (this may take up to 30 seconds). |
#
#
# +
display_name = 'flowers-dataset'
dataset = aip.Dataset(
display_name=display_name,
metadata_schema_uri=IMAGE_SCHEMA,
labels=None
)
operation = dataset_client.create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
response = operation.result(timeout=TIMEOUT)
print(response)
dataset_name = response.name
# -
# ### Prepare data for import
#
# The data to be imported to an AI Platform image dataset must meet the following requirements:
#
# - Images must be stored in a Cloud Storage bucket.
# - Each image file must be in an image format (PNG, JPEG, BMP, ...).
# - There must be an index file stored in your Cloud Storage bucket that contains the path and annotations for each image.
# - The index file must be either CSV or JSONL.
#
# #### CSV
#
# For image classification, the CSV index file must have the following format:
#
# - No heading
# - First column is the Cloud Storage path to the image.
# - Second column is the label.
#
# #### JSONL
#
# The format of the JSONL index must be as follows:
#
# - Each data item is a separate JSON object, on a separate line.
# - The key/value pair 'image_gcs_uri' is the Cloud Storage path to the image.
# - The key/value pair 'classification_annotation' is the label field.
# - The key/value pair 'display_name' is the label
#
# { 'image_gcs_uri': image, 'classification_annotation': { 'display_name': label } }
#
# *Note*: The dictionary key fields may alternatively be in camelCase. For example, 'image_gcs_uri' can also be 'imageGcsUri'.
#
# #### Dataset splitting
#
# The index files may include information about data splitting.
#
# ##### CSV
#
# Each row entry in a CSV index file can be preceded by a first column that indicates whether the data is part of the training (TRAINING), test (TEST) or validation (VALIDATION) data. Alternatively, AI Platform (Unified) supports the CAIP (pre-AI Platform (Unified)) version of the tags: TRAIN, TEST and VALIDATE. For example:
#
# TRAINING, "this is the data item", "this is the label"
# TEST, "this is the data item", "this is the label"
# VALIDATION, "this is the data item", "this is the label"
#
# ##### JSONL
#
# Each object entry in a JSONL index file can have a 'ml_use' key/value pair that indicates whether the data is part of the training (training), test (test) or validation (validation) data.
#
# { 'image_gcs_uri': image, 'classification_annotation': { 'display_name': label }, 'data_item_resource_labels':{'aiplatform.googleapis.com/ml_use':'training'} }
#
# If the index does not contain data splitting information, AI Platform will automatically split the dataset for you.
# ### Import data
#
# We have preprocessed the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) to the import format required by AI Platform.
#
IMPORT_FILE = 'gs://cloud-samples-data/vision/automl_classification/flowers/flowers.jsonl'
# !gsutil cat -r 0-441 {IMPORT_FILE}
# To import the data, call the `import_data` method exposed by the dataset client.
# +
config = [{
'gcs_source': {'uris': [IMPORT_FILE]},
'import_schema_uri': IMPORT_SCHEMA_IMAGE_CLASSIFICATION
}]
operation = dataset_client.import_data(name=dataset_name, import_configs=config)
print("Long running operation:", operation.operation.name)
response = operation.result()
# -
# ### Get dataset information
#
# You can list all datasets in your project and retrieve detailed information about a specific dataset using the `list_datasets` and `get_datasets` methods.
response = dataset_client.list_datasets(parent=PARENT)
for dataset in response:
print(dataset.display_name, ' ', dataset.name)
dataset_name = dataset.name
response = dataset_client.get_dataset(name=dataset_name)
print(response)
# ### List data items
#
# To retrieve the dataset's data items you can use the `list_data_items` method.
# +
count = 0
response = dataset_client.list_data_items(parent=dataset_name)
for data_item in response:
count += 1
print('Number of images: {}'.format(count))
print('An example of item specification:')
print(data_item)
# + [markdown] id="NTTuoxOfGKBA"
# ## Training a model
#
# The dataset is ready so we can move on to configuring a training job. There are three methods of training a custom model in AI Platform:
# * [Custom jobs](https://cloud.google.com/ai-platform-unified/docs/training/create-custom-job)
# * [Hyperparameter tuning jobs](https://cloud.google.com/ai-platform-unified/docs/training/using-hyperparameter-tuning)
# * [Training pipelines](https://cloud.google.com/ai-platform-unified/docs/training/create-training-pipeline)
#
# A training pipeline encapsulates additional steps in addition to a training step, specifically: accessing data from an AI Platform dataset and uploading the trained model to AI Platform. The training step of a training pipeline can be either a Custom job or a Hyperparameter tuning job.
#
# In this sample, we utilize a training pipeline with a custom job training step.
#
# There are two ways you can configure a custom training job:
#
# - **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.
#
# - **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model.
#
# You will use the second method.
# -
# ### Create a training container image
# #### Create a folder for training images source code and configs
# ! rm -fr trainer; mkdir trainer
# ! touch trainer/__init__.py
# #### Define the training script.
#
# In the next cell, you will write the contents of the training script to the `task.py` file.
#
# Review the script. In summary, the script uses transfer learning to train an image classification model. The model uses the pre-trained ResNet50 as a base and a simple FCNN classifiction top. The script builds a `tf.data` data ingestion pipeline using the AI Platform dataset as a source. Notice how the AI Platform dataset is passed to the script.
#
# At runtime, AI Platform passes metadata about your dataset to your training application by setting the following environment variables in your training container.
#
# * AIP_DATA_FORMAT: The format that your dataset is exported in. Possible values include: jsonl, csv, or bigquery.
# * AIP_TRAINING_DATA_URI: The location that your training data is stored at.
# * AIP_VALIDATION_DATA_URI: The location that your validation data is stored at.
# * AIP_TEST_DATA_URI: The location that your test data is stored at.
#
# If the AIP_DATA_FORMAT of your dataset is jsonl or csv, the data URI values refer to Cloud Storage URIs, like `gs://bucket_name/path/training-*`. To keep the size of each data file relatively small, AI Platform splits your dataset into multiple files. Because your training, validation, or test data may be split into multiple files, the URIs are provided in wildcard format.
#
# Image datasets are passed to your training application in JSONL format. The schema is the same as the schema of the index used to import the image data.
#
# Refer to [AI Platform (Unified)](https://cloud.google.com/ai-platform-unified/docs/training/using-managed-datasets) for more information about using managed datasets in a custom training application.
# +
# %%writefile trainer/task.py
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import argparse
import os
import time
#import hypertune
import numpy as np
import pandas as pd
import tensorflow as tf
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_SHAPE = (IMG_HEIGHT, IMG_WIDTH, 3)
AUTOTUNE = tf.data.experimental.AUTOTUNE
LOCAL_LOG_DIR = '/tmp/logs'
PROBABILITIES_KEY = 'probabilities'
LABELS_KEY = 'labels'
NUM_LABELS = 3
def build_model(num_layers, dropout_ratio, num_classes):
"""
Creates a custom image classificatin model using ResNet50
as a base model.
"""
# Create the base model
base_model = tf.keras.applications.ResNet50(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet',
pooling='avg')
base_model.trainable = False
# Add preprocessing and classification head
inputs = tf.keras.Input(shape=IMG_SHAPE, dtype = tf.uint8)
x = tf.cast(inputs, tf.float32)
x = tf.keras.applications.resnet50.preprocess_input(x)
x = base_model(x)
x = tf.keras.layers.Dense(num_layers, activation='relu')(x)
x = tf.keras.layers.Dropout(dropout_ratio)(x)
outputs = tf.keras.layers.Dense(num_classes)(x)
# Assemble the model
model = tf.keras.Model(inputs, outputs)
# Compile the model
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def image_dataset_from_aip_jsonl(pattern, class_names=None, img_height=224, img_width=224):
"""
Generates a `tf.data.Dataset` from a set of JSONL files
in the AI Platform image dataset index format.
Arguments:
pattern: A wildcard pattern for a list of JSONL files.
E.g. gs://bucket/folder/training-*.
class_names: the list of class names that are expected
in the passed index.
img_height: The height of a generated image
img_width: The width of a generated image
"""
def _get_label(class_name):
"""
Converts a string class name to an integer label.
"""
one_hot = class_name == class_names
return tf.argmax(one_hot)
def _decode_img(file_path):
"""
Loads an image and converts it to a resized 3D tensor.
"""
img = tf.io.read_file(file_path)
img = tf.io.decode_image(img,
expand_animations=False)
img = tf.image.resize(img, [img_height, img_width])
img = tf.cast(img, tf.uint8)
return img
def _process_example(file_path, class_name):
"""
Creates a converted image and a class label from
an image path and class name.
"""
label = _get_label(class_name)
img = _decode_img(file_path)
return img, label
# Read the JSONL index to a pandas DataFrame
df = pd.concat(
[pd.read_json(path, lines=True) for path in tf.io.gfile.glob(pattern)],
ignore_index=True
)
# Parse classifcationAnnotations field
df = pd.concat(
[df, pd.json_normalize(df['classificationAnnotations'].apply(pd.Series)[0])], axis=1)
paths = df['imageGcsUri'].values
labels = df['displayName'].values
inferred_class_names = np.unique(labels)
if class_names is not None:
class_names = np.array(class_names).astype(str)
if set(inferred_class_names) != set(class_names):
raise ValueError(
'The `class_names` passed do not match the '
'names in the image index '
'Expected: %s, received %s' %
(inferred_class_names, class_names))
class_names = tf.constant(inferred_class_names)
dataset = tf.data.Dataset.from_tensor_slices((paths, labels))
dataset = dataset.shuffle(len(labels), reshuffle_each_iteration=False)
dataset = dataset.map(_process_example, num_parallel_calls=AUTOTUNE)
dataset.class_names = class_names
return dataset
def get_datasets(batch_size, img_height, img_width):
"""
Creates training and validation splits as tf.data datasets
from an AI Platform Dataset passed by the training pipeline.
"""
def _configure_for_performance(ds):
"""
Optimizes the performance of a dataset.
"""
ds = ds.cache()
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
if os.environ['AIP_DATA_FORMAT'] != 'jsonl':
raise RuntimeError('Wrong dataset format: {}. Expecting - jsonl'.format(
os.environ['AIP_DATA_FORMAT']))
train_ds = image_dataset_from_aip_jsonl(
pattern=os.environ['AIP_TRAINING_DATA_URI'],
img_height=img_height,
img_width=img_width)
class_names = train_ds.class_names.numpy()
valid_ds = image_dataset_from_aip_jsonl(
pattern=os.environ['AIP_VALIDATION_DATA_URI'],
class_names=class_names,
img_height=img_height,
img_width=img_width)
train_ds = _configure_for_performance(train_ds.batch(batch_size))
valid_ds = _configure_for_performance(valid_ds.batch(batch_size))
return train_ds, valid_ds, class_names
class ServingModule(tf.Module):
"""
A custom tf.Module that adds a serving signature with image preprocessing
and prediction postprocessing to the trained model.
"""
def __init__(self, base_model, output_labels):
super(ServingModule, self).__init__()
self._model = base_model
self._output_labels = tf.constant(output_labels, dtype=tf.string)
def _decode_and_scale(self, raw_image):
"""
Decodes, and resizes a single raw image.
"""
image = tf.image.decode_image(raw_image, expand_animations=False)
image = tf.image.resize(image, [IMG_HEIGHT, IMG_WIDTH])
image = tf.cast(image, tf.uint8)
return image
def _preprocess(self, raw_images):
"""
Preprocesses raw inputs as sent by the client.
"""
# A mitigation for https://github.com/tensorflow/tensorflow/issues/28007
with tf.device('/cpu:0'):
images = tf.map_fn(self._decode_and_scale, raw_images,
dtype=tf.uint8, back_prop=False)
return images
def _postprocess(self, model_outputs):
"""
Postprocesses outputs returned by the base model.
"""
probabilities = tf.nn.softmax(model_outputs)
indices = tf.argsort(probabilities, axis=1, direction='DESCENDING')
return {
LABELS_KEY: tf.gather(self._output_labels, indices, axis=-1)[:,:NUM_LABELS],
PROBABILITIES_KEY: tf.sort(probabilities, direction='DESCENDING')[:,:NUM_LABELS]
}
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def __call__(self, bytes_inputs):
"""
Preprocesses inputs, calls the base model
and postprocesses outputs from the base model.
"""
images = self._preprocess(bytes_inputs)
logits = self._model(images)
outputs = self._postprocess(logits)
return outputs
def get_args():
"""
Returns parsed command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--num-epochs',
type=int,
default=20,
help='number of times to go through the data, default=20')
parser.add_argument(
'--batch-size',
default=32,
type=int,
help='number of records to read during each training step, default=32')
parser.add_argument(
'--num-layers',
default=64,
type=int,
help='number of hidden layers in the classification head , default=64')
parser.add_argument(
'--dropout-ratio',
default=0.5,
type=float,
help='dropout ration in the classification head , default=128')
parser.add_argument(
'--model-dir',
type=str,
default='/tmp/saved_model',
help='model dir , default=/tmp/saved_model')
args, _ = parser.parse_known_args()
return args
def copy_tensorboard_logs(local_path: str, gcs_path: str):
"""Copies Tensorboard logs from a local dir to a GCS location.
After training, batch copy Tensorboard logs locally to a GCS location. This can result
in faster pipeline runtimes over streaming logs per batch to GCS that can get bottlenecked
when streaming large volumes.
Args:
local_path: local filesystem directory uri.
gcs_path: cloud filesystem directory uri.
Returns:
None.
"""
pattern = '{}/*/events.out.tfevents.*'.format(local_path)
local_files = tf.io.gfile.glob(pattern)
gcs_log_files = [local_file.replace(local_path, gcs_path) for local_file in local_files]
for local_file, gcs_file in zip(local_files, gcs_log_files):
tf.io.gfile.copy(local_file, gcs_file)
if __name__ == '__main__':
if 'AIP_DATA_FORMAT' not in os.environ:
raise RuntimeError('No dataset information available.')
# Configure TensorBoard callback
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=LOCAL_LOG_DIR, update_freq='batch')]
args = get_args()
# Create the datasets and the model
train_ds, valid_ds, class_names = get_datasets(args.batch_size, IMG_HEIGHT, IMG_WIDTH)
model = build_model(args.num_layers, args.dropout_ratio, len(class_names))
print(model.summary())
# Start training
history = model.fit(x=train_ds,
validation_data=valid_ds,
epochs=args.num_epochs,
callbacks=callbacks)
# Configure locations for SavedModel and TB logs
if 'AIP_MODEL_DIR' in os.environ:
model_dir = os.environ['AIP_MODEL_DIR']
else:
model_dir = args.model_dir
tb_dir = f'{model_dir}/logs'
# Save the serving SavedModel
print('Saving the model to: {}'.format(model_dir))
serving_module = ServingModule(model, class_names)
signatures = {
'serving_default': serving_module.__call__.get_concrete_function()
}
tf.saved_model.save(serving_module, model_dir, signatures=signatures)
# Copy Tensorboard logs to GCS
copy_tensorboard_logs(LOCAL_LOG_DIR, tb_dir)
# -
# Create a Dockefile
# +
dockerfile = f'''
FROM {TRAIN_BASE_IMAGE}
RUN pip install -U 'h5py<3.0.0'
WORKDIR /
# Copies the trainer code to the docker image.
COPY trainer /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
'''
with open('Dockerfile', 'w') as f:
f.write(dockerfile)
# -
# Build the container image
train_image = f'gcr.io/{PROJECT_ID}/image_classifier'
# ! docker build -t {train_image} .
# Push the image to Container Registry
# ! docker push {train_image}
# ### Define the training pipeline
#
# You will define a training pipeline that uses the Flowers dataset as input, runs training as a custom training job, and uploads the trained model to AI Platform.
#
# Let's start by assembling the custom training job specification.
#
# #### Define container and worker pool specs
#
# Your custom training job will use a custom training container created in the previous step. Recall that the training scripts can be configured through command line parameters. For example you can set a number of training epochs.
#
# In this sample, we will run training on a single GPU equipped node.
#
# +
epochs = 10
container_spec = {
"image_uri": train_image,
"args": [
"--num-epochs=" + str(epochs),
],
}
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": {
"machine_type": TRAIN_MACHINE_TYPE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU
},
"container_spec": container_spec,
}
]
# -
# #### Assemble the job spec
#
# Let's now assemble the custom job specification.
#
# After the custom job completes, the training pipeline finds the model artifacts that your training application creates in the output directory you specified for your Cloud Storage bucket. It uses these artifacts to create a model resource, which sets you up for model deployment.
#
# There are two different ways to set the location for your model artifacts:
#
# * If you set a `baseOutputDirectory` for your training job, make sure your training code saves your model artifacts to that location, using the `AIP_MODEL_DIR` environment variable set by AI Platform. After the training job is completed, AI Platform searches for the resulting model artifacts in `gs://BASE_OUTPUT_DIRECTORY/model`.
# * If you set the `modelToUpload.artifactUri` field, the training pipeline uploads the model artifacts from that URI. You must set this field if you didn't set `baseOutputDirectory`.
#
# If you specify both `baseOutputDirectory` and `modelToUpload.artifactUri`, AI Platform uses `modelToUpload.artifactUri`.
pipeline_display_name = 'image-classifier-pipeline'
base_output_dir = 'gs://{}/{}/'.format(BUCKET_NAME, pipeline_display_name)
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {
"output_uri_prefix": base_output_dir
}
}
job_spec
# #### Assemble the pipeline specification
#
# You can now assemble the pipeline specification.
# +
model_display_name = pipeline_display_name + '-model'
dataset_id = dataset_name.split('/')[-1]
training_task_inputs = json_format.ParseDict(job_spec,Value())
training_pipeline_spec = {
'display_name': pipeline_display_name,
'input_data_config': {
'dataset_id': dataset_id,
'annotation_schema_uri': ANNOTATION_SCHEMA,
'gcs_destination': {
'output_uri_prefix': base_output_dir
},
'fraction_split': {
'training_fraction': 0.5,
'validation_fraction': 0.2
},
},
'training_task_definition': CUSTOM_TRAINING_TASK_DEFINITION,
'training_task_inputs': training_task_inputs,
'model_to_upload': {
'display_name': model_display_name,
'container_spec': {
'image_uri': DEPLOY_IMAGE
}
}
}
training_pipeline_spec
# -
# ### Start the training pipeline
#
pipeline = pipeline_client.create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline_spec)
# Get pipeline info.
response = pipeline_client.get_training_pipeline(name=pipeline.name)
print(response)
# The pipeline will take a few minutes to complete. You can check the status of the pipeline by inspecting the `state` property returned by a call to the `get_training_pipeline` method.
#
response = pipeline_client.get_training_pipeline(name=pipeline.name)
model_name = None
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training pipeline has not completed yet")
else:
print("Training pipeline completed")
print("Model ready for deployment:")
print(response.model_to_upload)
model_name = response.model_to_upload.name
# Before proceeding wait till the training pipeline completes.
# + [markdown] id="P9NsMP_5GKBA"
# ## Deploying a model
#
# Deploying an uploaded model is a two step process. First you need to create or select an AI Platform Prediction endpoint. After the endpoint is ready, you deploy a model to the endpoint.
#
# Refer to AI Platform (Unified) [documentation](https://cloud.google.com/ai-platform-unified/docs/general/deployment) for more information about deploying models.
#
# + [markdown] id="r-uJp-FnGKBB"
# ### Get model information
#
# You can list uploaded models and get detailed information about a specific model using the `list_model` and `get_model` methods.
# +
request = {
'parent': PARENT,
'filter': 'display_name="{}"'.format(model_display_name)
}
response = model_client.list_models(request)
for model in response:
print(model.name)
# + id="-wOE_l4FGKBB"
model_client.get_model(name=model_name)
# + [markdown] id="_0GV5MwiGKBB"
# ### Create an endpoint
#
#
# Creating an endpoint returns a long running operation, since it may take a few moments to provision the endpoint for serving. You call `response.result()`, which is a synchronous call and will return when the endpoint is ready. The helper function will return the AI Platform (Unified) fully qualified identifier for the endpoint -- `response.name`.
#
# +
endpoint_display_name = 'flower_classifier_endpoint'
endpoint = {
'display_name': endpoint_display_name
}
# -
response = endpoint_client.create_endpoint(parent=PARENT, endpoint=endpoint)
print('Long running operation: ', response.operation.name)
result = response.result(timeout=300)
endpoint_name = result.name
print(endpoint_name)
# ### List endpoints
for endpoint in endpoint_client.list_endpoints(parent=PARENT):
print(endpoint)
# + [markdown] id="XmlxD_iXGKBB"
# ### Deploy model to the endpoint
#
# -
# #### Define model deployment specification
# +
display_name = 'flower_classifier'
machine_spec = {
'machine_type': DEPLOY_MACHINE_TYPE,
'accelerator_count': 0,
}
deployed_model_spec = {
'model': model_name,
'display_name': display_name,
'dedicated_resources': {
'min_replica_count': 1,
'machine_spec': machine_spec
},
}
traffic_split={'0': 100}
# -
# #### Deploy the model
# +
response = endpoint_client.deploy_model(endpoint=endpoint_name,
deployed_model=deployed_model_spec,
traffic_split=traffic_split)
print("Long running operation:", response.operation.name)
result = response.result()
deployed_model_id = result.deployed_model.id
print(result)
# -
# #### Inspect the endpoint after deployment
response = endpoint_client.get_endpoint(name=endpoint_name)
print(response)
# ## Invoking a deployed model
# ### Inspect the model
#
# Recall that the training script saved the model with the `serving_default` signature that expects a batch of unprocessed images in their native format as an input. Image preprocessing is embedded in the serving graph.
response = model_client.get_model(name=model_name)
model_artifacts_uri = response.artifact_uri
# !saved_model_cli show --dir {model_artifacts_uri} --tag_set serve --signature_def serving_default
# ### Prepare the prediction request inputs
#
# The `sample_images` folder contains a couple of images of roses and daises.
# +
images_folder = 'sample_images'
raw_images = [tf.io.read_file(os.path.join(images_folder, image_path))
for image_path in os.listdir(images_folder)]
fig, axes = plt.subplots(nrows=1, ncols=len(raw_images), figsize=(10,10))
for axis, image in zip(axes.flat[0:], raw_images):
decoded_image = tf.image.decode_image(image)
axis.set_title(decoded_image.shape)
axis.imshow(decoded_image.numpy())
# -
# The prediction client encapsulates gRPC interface to the [Prediction Service](https://cloud.google.com/ai-platform-unified/docs/reference/rpc/google.cloud.aiplatform.v1beta1#google.cloud.aiplatform.v1beta1.PredictionService.Predict).
#
# To pass a batch of images to the service, the batch needs to be packaged as a [PredictRequest](https://cloud.google.com/ai-platform-unified/docs/reference/rpc/google.cloud.aiplatform.v1beta1#google.cloud.aiplatform.v1beta1.PredictRequest) protocol buffer. The referenced link provides a detailed description of the schema of the PredictRequest message. On a high level, the batch is represented as a list of instances, where each instance is a map of the form:
# ```
# {input_name: value}
# ```
#
# In our case the *input name* - as embedded in the `serving_default` signature - is `bytes_inputs`. Since we are sending raw binary images the *value* is a base 64 encoded representation of an image. So each instance (image) is represented by the following map
#
# ```
# {'bytes_inputs': {'b64': b64_image}}
# ```
#
#
input_name = 'bytes_inputs'
encoded_images = [base64.b64encode(raw_image.numpy()).decode('utf-8') for raw_image in raw_images]
instances_list = [{input_name: {'b64': encoded_image}} for encoded_image in encoded_images]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
# ### Invoking the endpoint
#
# You can now invoke the endpoint using the `predict` method.
response = prediction_client.predict(endpoint=endpoint_name, instances=instances, parameters=None)
# Let's inspect the response.
for prediction in response.predictions:
print('**************')
for item in prediction.items():
print(item)
# Since we sent a batch of two images to the service, we received two predictions. The output of the serving method are 3 most probable labels together with the associated probabilities.
# + [markdown] id="TpV-iwP9qw9c"
# ## Cleaning up
#
# To clean up all GCP resources used in this project, you can [delete the GCP
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
#
# - Dataset
# - Model
# - Endpoint
# - Cloud Storage Bucket
# -
print(dataset_name)
print(model_name)
print(endpoint_name)
print(deployed_model_id)
# + id="sx_vKniMq9ZX"
delete_dataset = True
delete_model = True
undeploy_model = True
delete_endpoint = True
delete_bucket = True
# Undeploye the model
try:
if undeploy_model:
endpoint_client.undeploy_model(endpoint=endpoint_name,
deployed_model_id=deployed_model_id,
traffic_split={})
except Exception as e:
print(e)
# Delete the endpoint using the AI Platform (Unified) fully qualified identifier for the endpoint
try:
if delete_endpoint:
endpoint_client.delete_endpoint(name=endpoint_name)
except Exception as e:
print(e)
# Delete the model using the AI Platform (Unified) fully qualified identifier for the model
try:
if delete_model:
model_client.delete_model(name=model_name)
except Exception as e:
print(e)
# Delete the dataset using the AI Platform (Unified) fully qualified identifier for the dataset
try:
if delete_dataset:
dataset_client.delete_dataset(name=dataset_name)
except Exception as e:
print(e)
if delete_bucket and 'BUCKET_NAME' in globals():
# ! gsutil rm -r gs://$BUCKET_NAME
# -
# ## Code snippets
# + active=""
# display_name = 'test-model'
# model_uri = 'gs://jk-ucaip-demos/image-classifier-pipeline/model'
# image_uri = DEPLOY_IMAGE
# image_uri = 'gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest'
#
# model_spec = {
# "display_name": display_name,
# "metadata_schema_uri": "",
# "artifact_uri": model_uri,
# "container_spec": {
# "image_uri": image_uri
# },
# }
# model_spec
# + active=""
# response = clients['model'].upload_model(parent=PARENT, model=model_spec)
# print("Long running operation:", response.operation.name)
# response = response.result(timeout=180)
# print(response)
# model_name = response.model
| 01-caip-training/01-training_pipeline_custom_job_image_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib
def group_plot(self, results, n, name=""):
data = self.preprocess_results(results)
x, y = [x[0] for x in data], [y[1] for y in data]
[x_unique, y_mean] = npix.group_by(x).mean(y)
data = zip(x, y)
#print data
self.plot_save(data, n, name);
# -
e = resiliences.experiment()
results = e.get_results_internal()
# +
# %matplotlib inline
e.group_plot(results, 100, "plot.svg");
| workshop/src/.ipynb_checkpoints/Graphing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Using Variational Autoencoder to Generate Digital Numbers
# Variational Autoencoders (VAEs) are very popular approaches to unsupervised learning of complicated distributions. In this example, we are going to use VAE to generate digital numbers.
# 
# In standard Autoencoder, we have an encoder network that takes in the original image and encode it into a vector of latent variables and a decoder network that takes in the latent vector and output an generated image that we hope to look similar to the original image.
# 
# In VAE, we constrain the latent variable to be unit gaussian, so that we can sample latent variables from a unit gaussian distribution, then use the decoder network to generate images.
#
# So, we get the architecture above. Instead of generate the latent varibles directly, the encoder network output a mean vector and a variance (or log-variance) vector, and the decoder takes the sampled latent vector to generate the output image. And we add penalty on the latent distribution's KL Divergence to a unit gaussian distribution.
# ## Define the Model
# +
# a bit of setup
import numpy as np
from bigdl.dataset import mnist
from zoo.pipeline.api.keras.layers import *
from zoo.pipeline.api.keras.models import Model
from zoo.pipeline.api.keras.utils import *
import datetime as dt
IMAGE_SIZE = 784
IMAGE_ROWS = 28
IMAGE_COLS = 28
IMAGE_CHANNELS = 1
latent_size = 2
# -
# We are going to use a simple cnn network as our encoder and decoder. In decoder, we use SpatialFullConvolution (aka deconvolution or convolution transpose) layer to upsample the image to the original resolution.
def get_encoder(latent_size):
input0 = Input(shape=(IMAGE_CHANNELS, IMAGE_COLS, IMAGE_ROWS))
#CONV
conv1 = Convolution2D(16, 5, 5, input_shape=(IMAGE_CHANNELS, IMAGE_ROWS, IMAGE_COLS), border_mode='same',
subsample=(2, 2))(input0)
relu1 = LeakyReLU()(conv1)
conv2 = Convolution2D(32, 5, 5, input_shape=(16, 14, 14), border_mode='same', subsample=(2, 2))(relu1)
relu2 = LeakyReLU()(conv2) # 32,7,7
reshape = Flatten()(relu2)
#fully connected to output mean vector and log-variance vector
reshape = Reshape([7*7*32])(relu2)
z_mean = Dense(latent_size)(reshape)
z_log_var = Dense(latent_size)(reshape)
model = Model([input0],[z_mean,z_log_var])
return model
def get_decoder(latent_size):
input0 = Input(shape=(latent_size,))
reshape0 = Dense(1568)(input0)
reshape1 = Reshape((32, 7, 7))(reshape0)
relu0 = Activation('relu')(reshape1)
# use resize and conv layer instead of deconv layer
resize1 = ResizeBilinear(14,14)(relu0)
deconv1 = Convolution2D(16, 5, 5, subsample=(1, 1), activation='relu', border_mode = 'same', input_shape=(32, 14, 14))(resize1)
resize2 = ResizeBilinear(28,28)(deconv1)
deconv2 = Convolution2D(1, 5, 5, subsample=(1, 1), input_shape=(16, 28, 28), border_mode = 'same')(resize2)
outputs = Activation('sigmoid')(deconv2)
model = Model([input0],[outputs])
return model
def get_autoencoder(latent_size):
input0 = Input(shape=(IMAGE_CHANNELS, IMAGE_COLS, IMAGE_ROWS))
encoder = get_encoder(latent_size)(input0)
sample = GaussianSampler()(encoder)
decoder_model = get_decoder(latent_size)
decoder = decoder_model(sample)
model = Model([input0],[encoder,decoder])
return model,decoder_model
init_engine()
autoencoder,decoder_model = get_autoencoder(2)
# ## Get the MNIST Dataset
def get_mnist(sc, mnist_path):
(train_images, train_labels) = mnist.read_data_sets(mnist_path, "train")
train_images = np.reshape(train_images, (60000, 1, 28, 28))
rdd_train_images = sc.parallelize(train_images)
rdd_train_sample = rdd_train_images.map(lambda img:
Sample.from_ndarray(
(img > 128) * 1.0,
[(img > 128) * 1.0, (img > 128) * 1.0]))
return rdd_train_sample
# +
mnist_path = "datasets/mnist" # please replace this
from pyspark import SparkContext
sc =SparkContext.getOrCreate()
train_data = get_mnist(sc, mnist_path)
# (train_images, train_labels) = mnist.read_data_sets(mnist_path, "train")
# -
# ## Define our Training Objective
# The `size_average` parameter in BCECriterion should be False, because when `size_average` is True, the negative_log_likelyhood computed in BCECriterion is average over each observations **as well as dimensions**, while in the `KLDCriterion` the KL-Divergence is **sumed over each observations**, the loss woule be wrong.
batch_size = 100
criterion = ParallelCriterion()
criterion.add(KLDCriterion(), 1.0)
criterion.add(BCECriterion(size_average=False), 1.0/batch_size)
# ## Compile the Model
# +
autoencoder.compile(optimizer=Adam(0.001), loss=criterion)
import os
if not os.path.exists("./log"):
os.makedirs("./log")
app_name='vae-digits-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
autoencoder.set_tensorboard(log_dir='./log/',app_name=app_name)
print("Saving logs to ", app_name)
# -
# ## Start Training
# This step may take a while depending on your system.
autoencoder.fit(x=train_data,
batch_size=batch_size,
nb_epoch = 6)
# Let's show the learning curve.
# +
import matplotlib
matplotlib.use('Agg')
# %pylab inline
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import datetime as dt
# -
train_summary = TrainSummary('./log/', app_name)
loss = np.array(train_summary.read_scalar("Loss"))
plt.figure(figsize = (12,12))
plt.plot(loss[:,0],loss[:,1],label='loss')
plt.xlim(0,loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
# You can also open tensorboard to see this curve.
# ## Sample Some Images from the Decoder
# +
from matplotlib.pyplot import imshow
img = np.column_stack([decoder_model.forward(np.random.randn(1,2)).reshape(28,28) for s in range(8)])
imshow(img, cmap='gray')
# -
# ## Explore the Latent Space
# +
# This code snippet references this keras example (https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py)
from scipy.stats import norm
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = decoder_model.forward(z_sample)
digit = x_decoded.reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
| apps/variational-autoencoder/using_variational_autoencoder_to_generate_digital_numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Forest structure using PDAL + Python
#
# Dr <NAME>, November 2019.
#
# This work is a set of python modules to replace MATLAB code for generating TERN forest metrics from airborne LIDAR.
#
# ## Fundamental ideas
#
# Existing code uses a series of nested loops, meaning we can't take advantage of array operations or easily reformat or paralellise functionality
#
# The approach used here defines a transportable function for each TERN product. These are applied to the data using a single loop (which could be chunked and parallelised).
#
# A simple process step-through looks like:
#
# 1. Read LAS tile using PDAL. This removes an uncompression step. It also removes low outliers and computes normalised height for each point on the fly
# 2. Read numpy labelled arrays from PDAL output into a GeoPandas dataframe, and apply a 2D spatial index
# 3. From LAS file metadata, produce a fishnet grid with cells of size 'output resolution X output resolution'
# 4. Iterate over grid cells, select valid points and generate TERN products for each grid cell
# 5. Assemble an output array for each TERN product and write to GeoTIFF
#
# This set of functions operates per-las-tile. An additional layer may be added to merge mutliple raster outputs into larger datasets
#
# ## to do:
#
# - check calculations
# - make final product set
# - test per-product generation...
# - module-ify
NODATA_VALUE = -9999
LCF_HEIGHTS = [0, 0.5, 1, 2, 3]
# +
#imports
import pdal
import numpy as np
import json
from shapely.geometry import Point
from shapely.geometry import MultiPolygon
from shapely.geometry import box
#from shapely.strtree import STRtree
import geopandas as gpd
import pandas as pd
import osmnx as ox
import os
# not using this, using geopandas instead
from rtree import index
# this is needed to create a raster from the output array
from osgeo import gdal
import osgeo.osr as osr
# +
def write_product_geotiff(griddedpoints, outfile, parameters):
"""
writes out a geotiff from a numpy array of forest metric
results.
inputs:
- a numpy array of metrics [griddedpoints]
- an outfile name [outfile]
- a dictionary of parameters for the raster
outputs:
- a gdal dataset object
- [outfile] written to disk
"""
width = parameters["width"]
height = parameters["height"]
wktcrs = parameters["projection"]
srs = osr.SpatialReference()
srs.ImportFromWkt(wktcrs)
drv = gdal.GetDriverByName("GTiff")
ds = drv.Create(outfile, width, height, 1, gdal.GDT_Float32 )
ds.SetGeoTransform([parameters["upperleft_x"],
parameters["resolution"],
0,
parameters["upperleft_y"],
0,
-parameters["resolution"]])
ds.SetProjection(srs.ExportToWkt())
ds.GetRasterBand(1).WriteArray(np.rot90(griddedpoints))
ds.FlushCache()
ds = None
return()
def pdal2df(points):
"""
Feed me a PDAL pipeline return array, get back a
GeoPandas dataframe
"""
arr = points[0]
description = arr.dtype.descr
cols = [col for col, __ in description]
gdf = gpd.GeoDataFrame({col: arr[col] for col in cols})
gdf.name = 'nodes'
gdf['geometry'] = gdf.apply(lambda row: Point((row['X'], row['Y'], row['Z'])), axis=1)
return(gdf)
def spatialindex(dataframe):
sindex = dataframe.sindex
return(sindex)
#get a pointview from PDAL
def readlasfile(lasfile):
"""
Run a PDAL pipeline. Input is a JSON declaration to
deliver to PDAL. Output is a labelled numpy array.
Data are filtered to:
- label local minima as noise
- compute height above ground using nearest ground point
neighbours (TIN method arriving soon)
- sort using a morton order (space filling curve) to
speed indexing later.
"""
pipeline = {
"pipeline": [
{
"type": "readers.las",
"filename": lasfile
},
{
"type": "filters.hag"
}
]
}
pipeline = pdal.Pipeline(json.dumps(pipeline))
count = pipeline.execute()
#read points into labelled arrays
arrays = pipeline.arrays
#return a numpy array to operate on
return(arrays)
def readlasmetadata(lasfile):
pipeline = {
"pipeline": [
{
"type": "readers.las",
"filename": lasfile,
"count": 1
},
{
"type": "filters.info"
}
]
}
pipeline = pdal.Pipeline(json.dumps(pipeline))
count = pipeline.execute()
#extract metadata into a JSON blob
metadata = json.loads(pipeline.metadata)
return(metadata)
def gen_raster_cells(metadata, resolution):
"""
Generate cells of 'resolution x resolution' for point querying
input:
- PDAL metadata
output:
- shapely geometry containing polygons defining 'resolution x resolution'
boxes covering the LAS tile extent
"""
bbox = box(metadata["metadata"]["readers.las"]["minx"],
metadata["metadata"]["readers.las"]["miny"],
metadata["metadata"]["readers.las"]["maxx"],
metadata["metadata"]["readers.las"]["maxy"])
tiledBBox = ox.quadrat_cut_geometry(bbox, quadrat_width=resolution)
return(tiledBBox)
def get_cell_points(poly, df, sindex):
poly = poly.buffer(1e-14).buffer(0)
possible_matches_index = list(sindex.intersection(poly.bounds))
possible_matches = df.iloc[possible_matches_index]
precise_matches = possible_matches[possible_matches.intersects(poly)]
return(precise_matches)
# -
# Vegetation cover fraction: (Nfirst - Nsingle) / Nfirst
def comp_vcf(points):
"""
Computes vegetation cover fraction according to the TERN product manual.
inputs:
- a labelled array of points from an input LAS tile
outputs:
- a numpy array of grid cells containing the result of:
(Nfirst - Nsingle) / Nfirst
...where:
Nfirst = count of first returns
Nsingle = count of single returns
...per grid cell.
"""
# collect all the first and single return indices
nSingle = np.size(np.where(points["NumberOfReturns"].values == 1))
nFirst = np.size(np.where(points["ReturnNumber"].values == 1))
if (nFirst > 0):
vcf = (nFirst - nSingle) / nFirst
else:
print('no first returns, set vcf to {}'.format(NODATA_VALUE))
vcf = NODATA_VALUE
return(vcf)
# +
# Canopy layering index:
# (sum(weight * MaxNumberOfreturns * numberofpointsatreturn) /
# sum(weight * MaxNumberofReturns)) - 1
def comp_cli(points):
maxreturns = np.max(points["NumberOfReturns"])
minreturns = np.min(points["NumberOfReturns"])
cli_numerator = []
cli_denominator = []
for nreturn in np.arange(minreturns, maxreturns+1, 1):
# I don't really understand the fixed values in the weighting function - we can record
# a lot more returns... I get that it's supposed to give higher return numbers a lower
# weight based on likelihood of being recorded..
weight = 1/min([5,nreturn])
points_at_return = np.where(points["ReturnNumber"] == nreturn)
cli_numerator.append(weight * np.size(points_at_return) * maxreturns )
cli_denominator.append( weight * np.size(points_at_return) )
cli = (np.sum(cli_numerator) / np.sum(cli_denominator)) - 1
return(cli)
# +
# vegetation layer cover fraction: LCF
def comp_lcf(points, heights, vcf):
"""
Compute LCF as per the TERN product manual:
LCF = VCF * (((veg returns below H2) - (veg returns below H1)) / (veg returns below H2))
Inputs:
- a set of points to compute LCF over
- a height threshold pair, containing H1 and H2 as an array [h1, h2]
- a precomputed VCF
Outputs:
- a floating point number denoting LCF
Conditions:
The LCF *must* be computed over the same set of points as the VCF used as input.
Albert's MATLAB code computes a raster for each height in LCF_HEIGHTS
...and uses those to compute LCF later.
"""
#find veg returns - ASPRS classes 3,4,5
veg_returns = np.where(np.logical_or(points["Classification"].values == 3,
points["Classification"].values == 4,
points["Classification"].values == 5))
# how many veg returns have height below the first threshold?
vegbelowh1 = np.size(np.where(points["HeightAboveGround"][vegreturns] < h1))
# how many veg returns have height below the second threshold?
vegbelowh2 = np.size(np.where(points["HeightAboveGround"][vegreturns] < h2))
# compute the LCF for this height pair
lcf = vcf * ( (vegbelowh2 - vegbelowh1) / vegbelowh2)
return(lcf)
# +
#CTH - TERN product manual says 'highest vegetation point', MATLAB code says '0.95 quantile of
# vegetation returns above 2m'. Below 2m is ignored
def comp_cth(points):
# compute the highest vegetation point in each grid cell
veg_returns = np.where(np.logical_or(points["Classification"].values == 3,
points["Classification"].values == 4,
points["Classification"].values == 5))
try:
vegpoints = points["HeightAboveGround"].values[veg_returns]
canopy_index = np.where(vegpoints > 2.0)
if (np.size(canopy_index) > 0):
cth = np.quantile(vegpoints[canopy_index], 0.95)
else:
cth = NODATA_VALUE
except ValueError:
#print('no vegetation returns were present, CTH set to {}'.format(NODATA_VALUE))
cth = NODATA_VALUE
return(cth)
# -
#CBH - ambiguous in TERN docs, will pull from MATLAB code
# there, it states that CBH is the 0.1th percentile of vegetation with normalised height
# above 2m
def comp_cbh(points):
# compute the canopy base height in each cell.
# grab an index of vegetation returns
veg_returns = np.where(np.logical_or(points["Classification"].values == 3,
points["Classification"].values == 4,
points["Classification"].values == 5))
try:
#create an array of vegetation point normalised heights
vegpoints = points["HeightAboveGround"].values[veg_returns]
canopy_index = np.where(vegpoints > 2.0)
if(np.size(canopy_index) > 0 ):
#find the 0.1 quantile
cbh = np.quantile(vegpoints[canopy_index], 0.10)
else:
cbh = NODATA_VALUE
except ValueError:
#if there are no veg points, set cth to NODATA
#print('no vegetation returns were present, CTH set to {}'.format(NODATA_VALUE))
cbh = NODATA_VALUE
return(cbh)
def comp_dem(points):
# interpolate ground returns in a grid and output a raster
# this is likely to be handled by PDAL... or GDAL
return()
def comp_fbf(points):
# if building classes exist, compute a fractional cover per grid cell...
# if no buildings exist return 0
building_returns = np.where(points["Classification"].values == 6)
totalpoints = np.size(points["Classification"])
buildingpoints = np.size(building_returns)
if (buildingpoints > 0):
fbf = buildingpoints/totalpoints
else:
fbf = 0
return(fbf)
def comp_density(points, resolution):
# compute mean point density
# npoints / area
# of this
npoints = np.size(points["Classification"])
mean_density = npoints / resolution**2
return(mean_density)
def read_data(lasfile):
"""
wrapper to read in LAS data and produce a dataframe + spatial index
"""
metadata = readlasmetadata(lasfile)
points = readlasfile(lasfile)
dataframe = pdal2df(points)
spatial_index = spatialindex(dataframe)
return(metadata, dataframe, spatial_index)
def make_file_rootname(lasfile):
filebits = lasfile.split("/")
infilename = filebits[-1]
fileroot = infilename[:-4]
return(fileroot)
def compute_tern_products(metadata, points, sindex, resolution, lasfile, outpath):
"""
Wrapper to iterate over the input data and generate rasters for each product.
*note this part could be paralellised - maybe per-product, or per-cell
Each grid square processed in this loop corresponds to one pixel in an output raster.
"""
#set up an 'output resolution' sized grid - like a fishnet grid.
# each polygon in the resulting set covers an area of 'resolution X resolution'
pixel_grid = gen_raster_cells(metadata, resolution)
#set up output rasters
# get tile width and height
tile_width = metadata["metadata"]["readers.las"]["maxx"] - metadata["metadata"]["readers.las"]["minx"]
tile_height = metadata["metadata"]["readers.las"]["maxy"] - metadata["metadata"]["readers.las"]["miny"]
raster_xsize = int(np.ceil(tile_width) / resolution)
raster_ysize = int(np.ceil(tile_height) / resolution)
#replicate for all products...
vcf_raster = np.zeros((raster_xsize, raster_ysize))
cth_raster = np.zeros((raster_xsize, raster_ysize))
cbh_raster = np.zeros((raster_xsize, raster_ysize))
fbf_raster = np.zeros((raster_xsize, raster_ysize))
cli_raster = np.zeros((raster_xsize, raster_ysize))
#internal loop around grid squares covering the LAS tile.
# this is another ppoint for parallelisation - since we can set up a list of geometries
# and cast that at multipuple processes, setting up one process per grid square
# another way to do this would be to recast this loop block into a function which can
# be called by one process per product
# the second strategy seems easier, then only one process is trying to write into each
# output array.
for pixel in pixel_grid:
#compute output array index for this cell:
poly_x, poly_y = pixel.centroid.xy
poly_base_x = poly_x[0] - metadata["metadata"]["readers.las"]["minx"]
poly_base_y = poly_y[0] - metadata["metadata"]["readers.las"]["miny"]
array_x = int(np.floor((poly_base_x / (resolution)) ))
array_y = int(np.floor((poly_base_y / (resolution)) ))
#get points for this cell
matches = get_cell_points(pixel, points, sindex)
#compute in order
#VCF
vcf_raster[array_x, array_y] = comp_vcf(matches)
#LCF - need stuff about levels here...
#lcf_raster[array_x, array_y] = comp_lcf(points)
#CTH
cth_raster[array_x, array_y] = comp_cth(matches)
#CBH
cbh_raster[array_x, array_y] = comp_cbh(matches)
#FBF
fbf_raster[array_x, array_y] = comp_fbf(matches)
#CLI
cli_raster[array_x, array_y] = comp_cli(matches)
#end of computing stuff, time to make outputs...
#set up GDAL parameters
wktcrs = metadata["metadata"]["readers.las"]["comp_spatialreference"]
raster_parameters = {}
raster_parameters["width"] = np.shape(vcf_raster)[0]
raster_parameters["height"] = np.shape(vcf_raster)[1]
raster_parameters["upperleft_x"] = metadata["metadata"]["readers.las"]["minx"]
raster_parameters["upperleft_y"] = metadata["metadata"]["readers.las"]["maxy"]
raster_parameters["resolution"] = resolution
raster_parameters["projection"] = wktcrs
if (not os.path.isdir(outpath + "/vcf")):
os.mkdir(outpath + "/vcf")
if (not os.path.isdir(outpath + "/cth")):
os.mkdir(outpath + "/cth")
if (not os.path.isdir(outpath + "/cbh")):
os.mkdir(outpath + "/cbh")
if (not os.path.isdir(outpath + "/fbf")):
os.mkdir(outpath + "/fbf")
if (not os.path.isdir(outpath + "/cli")):
os.mkdir(outpath + "/cli")
fileroot = make_file_rootname(lasfile)
#set output filenames
vcf_raster_path = outpath + "/vcf/" + fileroot + "-VCF-" + str(resolution) + "m.tiff"
cth_raster_path = outpath + "/cth/" + fileroot + "-CTH-" + str(resolution) + "m.tiff"
cbh_raster_path = outpath + "/cbh/" + fileroot + "-CBH-" + str(resolution) + "m.tiff"
fbf_raster_path = outpath + "/fbf/" + fileroot + "-FBF-" + str(resolution) + "m.tiff"
cli_raster_path = outpath + "/cli/" + fileroot + "-CLI-" + str(resolution) + "m.tiff"
#write geotiffs and return arrays for inspection...
write_product_geotiff(vcf_raster, vcf_raster_path, raster_parameters)
write_product_geotiff(cth_raster, cth_raster_path, raster_parameters)
write_product_geotiff(cbh_raster, cbh_raster_path, raster_parameters)
write_product_geotiff(fbf_raster, fbf_raster_path, raster_parameters)
write_product_geotiff(cli_raster, cli_raster_path, raster_parameters)
tern_products = {}
tern_products["vcf"] = vcf_raster
tern_products["cth"] = cth_raster
tern_products["cbh"] = cbh_raster
tern_products["fbf"] = fbf_raster
tern_products["cli"] = cli_raster
return(tern_products)
# ## Testing functionality using a local file
# The following section generates metrics from a local LAZ file. Plugging in download mechanics from ELVIS will be added later
# +
#lidar test file - <NAME>, chosen for varied vegetation cover and topography
# this is pretty big, try it out if you've got more resources than my macbook pro!
# thinking ahead, there will probably end up being a file splitting pre-process for
# tiles like these... capping at say, 20 mill points. Sorting data before splitting
# will be essential.
#lasfile = "/Volumes/Antares/ACT-lidar/8ppm/callingelvis-testdata/ACT2015_8ppm-C3-AHD_6966094_55.laz"
# +
#lasfile = "/Volumes/Antares/fire-test/NSW Government - Spatial Services-2/Point Clouds/AHD/StAlbans201709-LID2-C3-AHD_2866308_56_0002_0002/StAlbans201709-LID2-C3-AHD_2866308_56_0002_0002.las"
# -
lasfile = "../../callingelvis-sampledata/Berridale201802-LID2-C3-AHD_6585974_55_0002_0002.las"
# dump everything from memory
points = None
df = None
vcf_raster = None
cth_raster = None
fbf_raster = None
metadata = readlasmetadata(lasfile)
# +
# %%time
# this part of the process is simply reading from the source file. No analysis yet.
points = readlasfile(lasfile)
# +
# uncomment to examine LAS metadata
#metadata
# +
# %%time
#here we read points into a GeoDataFrame and dump the labelled array.
# this is a pretty expensive step RAM wise, we're duplicating all the points...
df = pdal2df(points)
# set the points structured array to None, it isn't used anymore
points = None
# +
# %%time
# here we generate an RTree index on the dataframe using GeoPandas.
# also pretty expensive...
sindex = spatialindex(df)
# +
# set an output resolution
resolution = 25
# -
metadata["metadata"]["readers.las"]["minx"]
# +
# %%time
#produce some rasters!
tern_products = compute_tern_products(metadata, df, sindex, resolution, lasfile, "../../")
# -
this = np.array([1,2,3,4,6])
that = np.where(this > 7.0)
np.size(that)
# +
from matplotlib import pyplot as plt
# %matplotlib inline
# -
plt.imshow(np.rot90(tern_products["vcf"]))
plt.colorbar()
plt.title("Vegetation cover fraction (VCF)")
plt.imshow(np.rot90(tern_products["cth"]), vmin = 0)
plt.colorbar()
plt.title("Canopy top height (CTH)")
plt.imshow(np.rot90(tern_products["cbh"]), vmin = 0)
plt.colorbar()
plt.title("Canopy base height (CBH)")
plt.imshow(np.rot90(tern_products["cth"] -tern_products["cbh"]))
plt.colorbar()
plt.title("Canopy top and base height difference")
plt.imshow(np.rot90(tern_products["fbf"]))
plt.colorbar()
plt.title("Building fraction (FBF)")
plt.imshow(np.rot90(tern_products["cli"]))
plt.colorbar()
plt.title("Canopy Layering Index (CLI)")
polygons = gen_raster_cells(metadata, resolution)
polygons[40]
matches = get_cell_points(polygons[55], df, sindex)
comp_cli(matches)
# +
# Canopy layering index:
# (sum(weight * MaxNumberOfreturns * numberofpointsatreturn) / sum(weight * MaxNumberofReturns)) - 1
def comp_cli(points):
maxreturns = np.max(points["NumberOfReturns"])
print(maxreturns)
minreturns = np.min(points["NumberOfReturns"])
print(minreturns)
npoints = []
weight = 1/min([5,maxreturns])
print("weight: {}".format(weight))
for nreturn in np.arange(minreturns, maxreturns+1, 1):
# I don't really understand the fixed values in the weighting function - we can record
# a lot more returns... I get that it's supposed to give higher return numbers a lower
# weight based on likelihood of being recorded..
points_at_return = np.where(points["ReturnNumber"] == nreturn)
print("Nr: {}".format(np.size(points_at_return)))
npoints.append( np.size(points_at_return) )
print(np.sum(npoints))
cli = ((weight * np.sum(npoints) * maxreturns) / (weight * np.sum(npoints))) - 1
return(cli)
# -
def cli_alt(points):
maxreturns = np.max(points["NumberOfReturns"])
print(maxreturns)
weight = 1/min([5,maxreturns])
points_at_return = np.size(np.where(points["ReturnNumber"] == maxreturns))
print("Nr: {}".format(np.size(points_at_return)))
cli = ( weight * points_at_return * maxreturns ) / (weight * points_at_return)
return(cli)
comp_cli(matches)
np.divide(854.0, 284.6666666666667)
cli_alt(matches)
comp_density(matches, resolution)
# ## code purgatory
# +
# an attempt at building an index without pandas. Pandas was far easier
"""
%%time
## rtree index building straight from the point dataset... which also duplicates the point set...
idx = index.Index()
for pid, point in enumerate(points[0]):
idx.insert(pid, (point[0], point[1],point[0], point[1]), point)
"""
# -
| notebooks/.ipynb_checkpoints/forest-structure-by-product-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
# Notebook to test a **boosting** model in the context of traceability between features and bug reports.
# # Load Libraries and Datasets
# +
from mod_finder_util import mod_finder_util
mod_finder_util.add_modules_origin_search_path()
import pandas as pd
import numpy as np
from modules.models_runner.feat_br_models_runner import Feat_BR_Models_Runner
from modules.utils import firefox_dataset_p2 as fd
from modules.utils import aux_functions
from modules.utils import model_evaluator as m_eval
from modules.utils import similarity_measures as sm
from sklearn.model_selection import train_test_split
from sklearn.metrics import recall_score, precision_score, f1_score
from sklearn.linear_model import LogisticRegressionCV
from imblearn.over_sampling import SMOTE, ADASYN
from enum import Enum
from collections import Counter
import warnings; warnings.simplefilter('ignore')
# -
# # Run All Models
# ## Volunteers Only Strategy
models_runner_4 = Feat_BR_Models_Runner()
lsi_model_4 = models_runner_4.run_lsi_model()
lda_model_4 = models_runner_4.run_lda_model()
bm25_model_4 = models_runner_4.run_bm25_model()
w2v_model_4 = models_runner_4.run_word2vec_model()
# # Ensemble Model
# ## Tranform Results Matrices to Vectors
# +
def transform_sim_matrix_to_sim_vec(sim_matrix_df, model_name):
sim_vec_df = pd.DataFrame(columns=[model_name])
for col in sim_matrix_df.columns:
for idx, row in sim_matrix_df.iterrows():
artfs_names = '{}_{}'.format(idx, col)
sim_vec_df.at[artfs_names, model_name] = row[col]
return sim_vec_df
sim_vec_lsi = transform_sim_matrix_to_sim_vec(lsi_model_4.get_sim_matrix(), 'lsi')
sim_vec_lda = transform_sim_matrix_to_sim_vec(lda_model_4.get_sim_matrix(), 'lda')
sim_vec_bm25 = transform_sim_matrix_to_sim_vec(bm25_model_4.get_sim_matrix(), 'bm25')
sim_vec_wv = transform_sim_matrix_to_sim_vec(w2v_model_4.get_sim_matrix(), 'wv')
# -
# ## Transform Vectors to DataFrame
# +
ensemble_input_df = pd.DataFrame(columns=['pred'], index=sim_vec_lsi.index)
out_df = pd.merge(ensemble_input_df, sim_vec_lsi, left_index=True, right_index=True)
out_df = pd.merge(out_df, sim_vec_lda, left_index=True, right_index=True)
out_df = pd.merge(out_df, sim_vec_bm25, left_index=True, right_index=True)
out_df = pd.merge(out_df, sim_vec_wv, left_index=True, right_index=True)
new_order = [1,2,3,4,0]
out_df = out_df[out_df.columns[new_order]]
ensemble_input_df = out_df.copy()
ensemble_input_df.head()
# -
# ## Insert Oracle Data
# +
orc_vec_df = transform_sim_matrix_to_sim_vec(fd.Feat_BR_Oracles.read_feat_br_volunteers_df().T, 'oracle')
ensemble_input_df = pd.merge(ensemble_input_df, orc_vec_df, left_index=True, right_index=True)
new_order = [0,1,2,3,5,4]
ensemble_input_df = ensemble_input_df[ensemble_input_df.columns[new_order]]
ensemble_input_df.head(15)
# -
# ## Balancing Dataset and Split Data on Train and Test
# +
ensemble_input_df = ensemble_input_df.infer_objects()
X = ensemble_input_df[['lsi','lda','bm25','wv']]
y = ensemble_input_df['oracle']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, shuffle=True)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
X_train, y_train = SMOTE().fit_resample(X_train, y_train)
print("Train SMOTE: {}".format(sorted(Counter(y_train).items())))
X_train = pd.DataFrame(X_train, columns=['lsi','lda','bm25','wv'])
y_train = pd.DataFrame(y_train)
# -
# ## Discretizer Function
def discretizer(x):
return 0 if x < 0.5 else 1
# ## Logistic Regressor
# +
ensemb_model = LogisticRegressionCV(cv=3, scoring='recall').fit(X_train, y_train)
preds = ensemb_model.predict_proba(X_test)[:,1]
preds = list(map(discretizer, preds))
precision = precision_score(y_true=y_test, y_pred=preds)
recall = recall_score(y_true=y_test, y_pred=preds)
fscore = f1_score(y_true=y_test, y_pred=preds)
print('Recall - Test Data: {:2.3%}'.format(recall))
print('Precision - Test Data: {:2.3%}'.format(precision))
print('F-Score - Test Data: {:2.3%}'.format(fscore))
print(ensemb_model.coef_)
# -
# # Test with Other Model Types
# ## XGBoost
# +
from xgboost import XGBClassifier
xgb = XGBClassifier(seed=42).fit(X_train, y_train)
preds = xgb.predict_proba(X_test)[:,1]
preds = list(map(discretizer, preds))
precision = precision_score(y_true=y_test, y_pred=preds)
recall = recall_score(y_true=y_test, y_pred=preds)
fscore = f1_score(y_true=y_test, y_pred=preds)
print('Recall - Test Data: {:2.3%}'.format(recall))
print('Precision - Test Data: {:2.3%}'.format(precision))
print('F-Score - Test Data: {:2.3%}'.format(fscore))
print(xgb.feature_importances_)
# -
# ## Extra Trees Classifier
# +
from sklearn.ensemble import ExtraTreesClassifier
et = ExtraTreesClassifier(random_state=42).fit(X_train, y_train)
preds = et.predict_proba(X_test)[:,1]
preds = list(map(discretizer, preds))
precision = precision_score(y_true=y_test, y_pred=preds)
recall = recall_score(y_true=y_test, y_pred=preds)
fscore = f1_score(y_true=y_test, y_pred=preds)
print('Recall - Test Data {:2.3%}'.format(recall))
print('Precision - Test Data: {:2.3%}'.format(precision))
print('F-Score - Test Data: {:2.3%}'.format(fscore))
print(et.feature_importances_)
| notebooks/firefox_p2/feat_br_tracing/boosting-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import argparse
from typing import Dict
from tempfile import gettempdir
import numpy as np
import torch
from torch import nn, optim
from torchvision.models.resnet import resnet50
from tqdm import tqdm
from avkit.configs import load_config_data
from avkit.data import LocalDataManager
from avkit.dataset import AgentDataset, EgoDataset
from avkit.dataset.utilities import build_dataloader
from avkit.rasterization import build_rasterizer
from avkit.evaluation import write_coords_as_csv, compute_mse_error_csv
from avkit.geometry import transform_points
from avkit.visualization import PREDICTED_POINTS_COLOR, TARGET_POINTS_COLOR, draw_trajectory
from matplotlib import pyplot as plt
import os
# -
# ## Prepare Data path and load cfg
#
# By setting the `AVKIT_DATA_FOLDER` variable, we can point the script to the folder where the data lie.
#
# Then, we load our config file with relative paths and other configurations (rasterer, training params...).
# set env variable for data
os.environ["AVKIT_DATA_FOLDER"] = "/Users/pondruska/prediction-dataset"
# get config
cfg = load_config_data("./prediction_config.yaml")
print(cfg)
# ## Model
#
# Our baseline is a simple `resnet50` pretrained on `imagenet`. We must replace the input and the final layer to address our requirements.
def build_model(cfg: Dict) -> torch.nn.Module:
# load pre-trained Conv2D model
model = resnet50(pretrained=True)
# change input size
num_history_channels = (cfg["model_params"]["history_num_frames"] + 1) * 2
num_in_channels = 3 + num_history_channels
model.conv1 = nn.Conv2d(
num_in_channels,
model.conv1.out_channels,
kernel_size=model.conv1.kernel_size,
stride=model.conv1.stride,
padding=model.conv1.padding,
bias=False,
)
# change output size
# X, Y * number of future states
num_targets = 2 * cfg["model_params"]["future_num_frames"]
model.fc = nn.Linear(in_features=2048, out_features=num_targets)
return model
def forward(data, model, device, criterion):
inputs = data["image"].to(device)
targets = data["target_positions"].to(device).reshape(len(data["target_positions"]), -1)
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
loss = loss.mean() # weighted average
return loss, outputs
# ## Load some stuff
dm = LocalDataManager(None)
# ===== INIT DATASETS
rasterizer = build_rasterizer(cfg, dm)
train_dataloader = build_dataloader(cfg, "train", dm, AgentDataset, rasterizer)
eval_dataloader = build_dataloader(cfg, "val", dm, AgentDataset, rasterizer)
# ==== INIT MODEL
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = build_model(cfg).to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.MSELoss(reduction="none")
# # Training
# ==== TRAIN LOOP
tr_it = iter(train_dataloader)
progress_bar = tqdm(range(cfg["train_params"]["max_num_steps"]))
losses_train = []
for _ in progress_bar:
try:
data = next(tr_it)
except StopIteration:
tr_it = iter(train_dataloader)
data = next(tr_it)
model.train()
torch.set_grad_enabled(True)
loss, _ = forward(data, model, device, criterion)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses_train.append(loss.item())
progress_bar.set_description(f"loss: {loss.item()} loss(avg): {np.mean(losses_train)}")
# # Evaluation
# we can now run inference and store predicted and annotated trajectories.
#
# In this example we run it on a single scene from the eval dataset for computationl constraints.
# +
# ==== EVAL LOOP
model.eval()
torch.set_grad_enabled(False)
losses_eval = []
# store information for evaluation
future_coords_offsets_pd = []
future_coords_offsets_gt = []
timestamps = []
agent_ids = []
progress_bar = tqdm(eval_dataloader)
for data in progress_bar:
loss, ouputs = forward(data, model, device, criterion)
losses_eval.append(loss.item())
progress_bar.set_description(f"Running EVAL, loss: {loss.item()} loss(avg): {np.mean(losses_eval)}")
future_coords_offsets_pd.append(ouputs.reshape(len(ouputs), -1, 2).cpu().numpy())
future_coords_offsets_gt.append(data["target_positions"].reshape(len(ouputs), -1, 2).cpu().numpy())
timestamps.append(data["timestamp"].numpy())
agent_ids.append(data["track_id"].numpy())
# -
# ### Save results in the competition format and perform evaluation
# After the model has predicted trajectories for our evaluation set, we can save them in a `csv` file in the competiion format. To simulate a complete evaluation session we can also save the GT in another `csv` and get the score.
# +
# ==== COMPUTE CSV
pred_path = f"{gettempdir()}/pred.csv"
gt_path = f"{gettempdir()}/gt.csv"
write_coords_as_csv(pred_path, future_num_frames=cfg["model_params"]["future_num_frames"],
future_coords_offsets=np.concatenate(future_coords_offsets_pd),
timestamps=np.concatenate(timestamps),
agent_ids=np.concatenate(agent_ids))
write_coords_as_csv(gt_path, future_num_frames=cfg["model_params"]["future_num_frames"],
future_coords_offsets=np.concatenate(future_coords_offsets_gt),
timestamps=np.concatenate(timestamps),
agent_ids=np.concatenate(agent_ids))
print(f"current error is {compute_mse_error_csv(gt_path, pred_path)}")
# -
# ### Visualise results
# We can also visualise some result from the ego(AV) point of view. Let's have a look at the frame number `5198`
# +
eval_agent_dataset = eval_dataloader.dataset.datasets[0].dataset
eval_ego_dataset = EgoDataset(cfg, eval_agent_dataset.dataset, rasterizer)
frame_number = 5198
model.eval()
torch.set_grad_enabled(False)
# get AV point-of-view frame
data_ego = eval_ego_dataset[frame_number]
im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0))
center = np.asarray(cfg["raster_params"]["ego_center"]) * cfg["raster_params"]["raster_size"]
agent_indices = eval_agent_dataset.get_frame_indices(frame_number)
predicted_positions = []
target_positions = []
for v_index in agent_indices:
data_agent = eval_agent_dataset[v_index]
out_net = model(torch.from_numpy(data_agent["image"]).unsqueeze(0).to(device))
out_pos = out_net[0].reshape(-1, 2).detach().cpu().numpy()
# store absolute world coordinates
image_to_world = np.linalg.inv(data_agent["world_to_image"])
predicted_positions.append(transform_points(out_pos + center, image_to_world))
target_positions.append(transform_points(data_agent["target_positions"] + center, image_to_world))
# convert coordinates to AV point-of-view so we can draw them
predicted_positions = transform_points(np.concatenate(predicted_positions), data_ego["world_to_image"]) - center
target_positions = transform_points(np.concatenate(target_positions), data_ego["world_to_image"]) - center
yaws = np.zeros((len(predicted_positions), 1))
draw_trajectory(im_ego, center, predicted_positions, yaws, PREDICTED_POINTS_COLOR)
draw_trajectory(im_ego, center, target_positions, yaws, TARGET_POINTS_COLOR)
plt.imshow(im_ego[::-1])
| examples/prediction/.ipynb_checkpoints/agent_prediction_train-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import xgboost as xgb
from xgboost import XGBClassifier, XGBRegressor
from xgboost import plot_importance
from catboost import CatBoostRegressor
from matplotlib import pyplot
import shap
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from time import time
from tqdm import tqdm_notebook as tqdm
from collections import Counter
from scipy import stats
import lightgbm as lgb
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.model_selection import KFold, StratifiedKFold
import gc
import json
pd.set_option('display.max_columns', 1000)
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# # Objective
#
# * In the last notebook we create our baseline model including a feature selection part.
# * Cohen cappa score of 0.456 (lb) with a local cv score of 0.529
# * In this notebook we are going to add more features and remove others that i think they overfitt the train set and then check if our local cv score improve.
# * Next, we will check if this improvement aligns with the lb.
# -
# # Notes
# * Check the distribution of the target variable of the out of folds score and the prediction distribution. A good model should more or less have the same distribution.
def eval_qwk_lgb_regr(y_true, y_pred):
"""
Fast cappa eval function for lgb.
"""
dist = Counter(reduce_train['accuracy_group'])
for k in dist:
dist[k] /= len(reduce_train)
reduce_train['accuracy_group'].hist()
acum = 0
bound = {}
for i in range(3):
acum += dist[i]
bound[i] = np.percentile(y_pred, acum * 100)
def classify(x):
if x <= bound[0]:
return 0
elif x <= bound[1]:
return 1
elif x <= bound[2]:
return 2
else:
return 3
y_pred = np.array(list(map(classify, y_pred))).reshape(y_true.shape)
return 'cappa', cohen_kappa_score(y_true, y_pred, weights='quadratic'), True
def cohenkappa(ypred, y):
y = y.get_label().astype("int")
ypred = ypred.reshape((4, -1)).argmax(axis = 0)
loss = cohenkappascore(y, y_pred, weights = 'quadratic')
return "cappa", loss, True
def read_data():
print('Reading train.csv file....')
train = pd.read_csv('/kaggle/input/data-science-bowl-2019/train.csv')
print('Training.csv file have {} rows and {} columns'.format(train.shape[0], train.shape[1]))
print('Reading test.csv file....')
test = pd.read_csv('/kaggle/input/data-science-bowl-2019/test.csv')
print('Test.csv file have {} rows and {} columns'.format(test.shape[0], test.shape[1]))
print('Reading train_labels.csv file....')
train_labels = pd.read_csv('/kaggle/input/data-science-bowl-2019/train_labels.csv')
print('Train_labels.csv file have {} rows and {} columns'.format(train_labels.shape[0], train_labels.shape[1]))
print('Reading specs.csv file....')
specs = pd.read_csv('/kaggle/input/data-science-bowl-2019/specs.csv')
print('Specs.csv file have {} rows and {} columns'.format(specs.shape[0], specs.shape[1]))
print('Reading sample_submission.csv file....')
sample_submission = pd.read_csv('/kaggle/input/data-science-bowl-2019/sample_submission.csv')
print('Sample_submission.csv file have {} rows and {} columns'.format(sample_submission.shape[0], sample_submission.shape[1]))
return train, test, train_labels, specs, sample_submission
def encode_title(train, test, train_labels):
# encode title
train['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), train['title'], train['event_code']))
test['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), test['title'], test['event_code']))
all_title_event_code = list(set(train["title_event_code"].unique()).union(test["title_event_code"].unique()))
# make a list with all the unique 'titles' from the train and test set
list_of_user_activities = list(set(train['title'].unique()).union(set(test['title'].unique())))
# make a list with all the unique 'event_code' from the train and test set
list_of_event_code = list(set(train['event_code'].unique()).union(set(test['event_code'].unique())))
list_of_event_id = list(set(train['event_id'].unique()).union(set(test['event_id'].unique())))
# make a list with all the unique worlds from the train and test set
list_of_worlds = list(set(train['world'].unique()).union(set(test['world'].unique())))
# create a dictionary numerating the titles
activities_map = dict(zip(list_of_user_activities, np.arange(len(list_of_user_activities))))
activities_labels = dict(zip(np.arange(len(list_of_user_activities)), list_of_user_activities))
activities_world = dict(zip(list_of_worlds, np.arange(len(list_of_worlds))))
assess_titles = list(set(train[train['type'] == 'Assessment']['title'].value_counts().index).union(set(test[test['type'] == 'Assessment']['title'].value_counts().index)))
# replace the text titles with the number titles from the dict
train['title'] = train['title'].map(activities_map)
test['title'] = test['title'].map(activities_map)
train['world'] = train['world'].map(activities_world)
test['world'] = test['world'].map(activities_world)
train_labels['title'] = train_labels['title'].map(activities_map)
win_code = dict(zip(activities_map.values(), (4100*np.ones(len(activities_map))).astype('int')))
# then, it set one element, the 'Bird Measurer (Assessment)' as 4110, 10 more than the rest
win_code[activities_map['Bird Measurer (Assessment)']] = 4110
# convert text into datetime
train['timestamp'] = pd.to_datetime(train['timestamp'])
test['timestamp'] = pd.to_datetime(test['timestamp'])
return train, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code
# this is the function that convert the raw data into processed features
def get_data(user_sample, test_set=False):
'''
The user_sample is a DataFrame from train or test where the only one
installation_id is filtered
And the test_set parameter is related with the labels processing, that is only requered
if test_set=False
'''
# Constants and parameters declaration
last_activity = 0
user_activities_count = {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
# new features: time spent in each activity
last_session_time_sec = 0
accuracy_groups = {0:0, 1:0, 2:0, 3:0}
all_assessments = []
accumulated_accuracy_group = 0
accumulated_accuracy = 0
accumulated_correct_attempts = 0
accumulated_uncorrect_attempts = 0
accumulated_actions = 0
counter = 0
time_first_activity = float(user_sample['timestamp'].values[0])
durations = []
last_accuracy_title = {'acc_' + title: -1 for title in assess_titles}
event_code_count: Dict[str, int] = {ev: 0 for ev in list_of_event_code}
event_id_count: Dict[str, int] = {eve: 0 for eve in list_of_event_id}
title_count: Dict[str, int] = {eve: 0 for eve in activities_labels.values()}
title_event_code_count: Dict[str, int] = {t_eve: 0 for t_eve in all_title_event_code}
# last features
sessions_count = 0
# itarates through each session of one instalation_id
for i, session in user_sample.groupby('game_session', sort=False):
# i = game_session_id
# session is a DataFrame that contain only one game_session
# get some sessions information
session_type = session['type'].iloc[0]
session_title = session['title'].iloc[0]
session_title_text = activities_labels[session_title]
# for each assessment, and only this kind off session, the features below are processed
# and a register are generated
if (session_type == 'Assessment') & (test_set or len(session)>1):
# search for event_code 4100, that represents the assessments trial
all_attempts = session.query(f'event_code == {win_code[session_title]}')
# then, check the numbers of wins and the number of losses
true_attempts = all_attempts['event_data'].str.contains('true').sum()
false_attempts = all_attempts['event_data'].str.contains('false').sum()
# copy a dict to use as feature template, it's initialized with some itens:
# {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
features = user_activities_count.copy()
features.update(last_accuracy_title.copy())
features.update(event_code_count.copy())
features.update(event_id_count.copy())
features.update(title_count.copy())
features.update(title_event_code_count.copy())
features.update(last_accuracy_title.copy())
features['installation_session_count'] = sessions_count
variety_features = [('var_event_code', event_code_count),
('var_event_id', event_id_count),
('var_title', title_count),
('var_title_event_code', title_event_code_count)]
for name, dict_counts in variety_features:
arr = np.array(list(dict_counts.values()))
features[name] = np.count_nonzero(arr)
# get installation_id for aggregated features
features['installation_id'] = session['installation_id'].iloc[-1]
# add title as feature, remembering that title represents the name of the game
features['session_title'] = session['title'].iloc[0]
# the 4 lines below add the feature of the history of the trials of this player
# this is based on the all time attempts so far, at the moment of this assessment
features['accumulated_correct_attempts'] = accumulated_correct_attempts
features['accumulated_uncorrect_attempts'] = accumulated_uncorrect_attempts
accumulated_correct_attempts += true_attempts
accumulated_uncorrect_attempts += false_attempts
# the time spent in the app so far
if durations == []:
features['duration_mean'] = 0
features['duration_std'] = 0
else:
features['duration_mean'] = np.mean(durations)
features['duration_std'] = np.std(durations)
durations.append((session.iloc[-1, 2] - session.iloc[0, 2] ).seconds)
# the accurace is the all time wins divided by the all time attempts
features['accumulated_accuracy'] = accumulated_accuracy/counter if counter > 0 else 0
accuracy = true_attempts/(true_attempts+false_attempts) if (true_attempts+false_attempts) != 0 else 0
accumulated_accuracy += accuracy
last_accuracy_title['acc_' + session_title_text] = accuracy
# a feature of the current accuracy categorized
# it is a counter of how many times this player was in each accuracy group
if accuracy == 0:
features['accuracy_group'] = 0
elif accuracy == 1:
features['accuracy_group'] = 3
elif accuracy == 0.5:
features['accuracy_group'] = 2
else:
features['accuracy_group'] = 1
features.update(accuracy_groups)
accuracy_groups[features['accuracy_group']] += 1
# mean of the all accuracy groups of this player
features['accumulated_accuracy_group'] = accumulated_accuracy_group/counter if counter > 0 else 0
accumulated_accuracy_group += features['accuracy_group']
# how many actions the player has done so far, it is initialized as 0 and updated some lines below
features['accumulated_actions'] = accumulated_actions
# there are some conditions to allow this features to be inserted in the datasets
# if it's a test set, all sessions belong to the final dataset
# it it's a train, needs to be passed throught this clausule: session.query(f'event_code == {win_code[session_title]}')
# that means, must exist an event_code 4100 or 4110
if test_set:
all_assessments.append(features)
elif true_attempts+false_attempts > 0:
all_assessments.append(features)
counter += 1
sessions_count += 1
# this piece counts how many actions was made in each event_code so far
def update_counters(counter: dict, col: str):
num_of_session_count = Counter(session[col])
for k in num_of_session_count.keys():
x = k
if col == 'title':
x = activities_labels[k]
counter[x] += num_of_session_count[k]
return counter
event_code_count = update_counters(event_code_count, "event_code")
event_id_count = update_counters(event_id_count, "event_id")
title_count = update_counters(title_count, 'title')
title_event_code_count = update_counters(title_event_code_count, 'title_event_code')
# counts how many actions the player has done so far, used in the feature of the same name
accumulated_actions += len(session)
if last_activity != session_type:
user_activities_count[session_type] += 1
last_activitiy = session_type
# if it't the test_set, only the last assessment must be predicted, the previous are scraped
if test_set:
return all_assessments[-1]
# in the train_set, all assessments goes to the dataset
return all_assessments
def get_train_and_test(train, test):
compiled_train = []
compiled_test = []
for i, (ins_id, user_sample) in tqdm(enumerate(train.groupby('installation_id', sort = False)), total = 17000):
compiled_train += get_data(user_sample)
for ins_id, user_sample in tqdm(test.groupby('installation_id', sort = False), total = 1000):
test_data = get_data(user_sample, test_set = True)
compiled_test.append(test_data)
reduce_train = pd.DataFrame(compiled_train)
reduce_test = pd.DataFrame(compiled_test)
categoricals = ['session_title']
return reduce_train, reduce_test, categoricals
class Base_Model(object):
def __init__(self, train_df, test_df, features, categoricals=[], n_splits=5, verbose=True):
self.train_df = train_df
self.test_df = test_df
self.features = features
self.n_splits = n_splits
self.categoricals = categoricals
self.target = 'accuracy_group'
self.cv = self.get_cv()
self.verbose = verbose
self.params = self.get_params()
self.y_pred, self.score, self.model = self.fit()
def train_model(self, train_set, val_set):
raise NotImplementedError
def get_cv(self):
cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target])
def get_params(self):
raise NotImplementedError
def convert_dataset(self, x_train, y_train, x_val, y_val):
raise NotImplementedError
def convert_x(self, x):
return x
def fit(self):
oof_pred = np.zeros((len(reduce_train), ))
y_pred = np.zeros((len(reduce_test), ))
for fold, (train_idx, val_idx) in enumerate(self.cv):
x_train, x_val = self.train_df[self.features].iloc[train_idx], self.train_df[self.features].iloc[val_idx]
y_train, y_val = self.train_df[self.target][train_idx], self.train_df[self.target][val_idx]
train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val)
model = self.train_model(train_set, val_set)
conv_x_val = self.convert_x(x_val)
oof_pred[val_idx] = model.predict(conv_x_val).reshape(oof_pred[val_idx].shape)
x_test = self.convert_x(self.test_df[self.features])
y_pred += model.predict(x_test).reshape(y_pred.shape) / self.n_splits
print('Partial score of fold {} is: {}'.format(fold, eval_qwk_lgb_regr(y_val, oof_pred[val_idx])[1]))
_, loss_score, _ = eval_qwk_lgb_regr(self.train_df[self.target], oof_pred)
if self.verbose:
print('Our oof cohen kappa score is: ', loss_score)
return y_pred, loss_score, model
class Lgb_Model(Base_Model):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
return lgb.train(self.params, train_set, valid_sets=[train_set, val_set], verbose_eval=verbosity)
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals)
val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals)
return train_set, val_set
def get_params(self):
params = {'n_estimators':5000,
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'subsample': 0.75,
'subsample_freq': 1,
'learning_rate': 0.01,
'feature_fraction': 0.9,
'max_depth': 15,
'lambda_l1': 1,
'lambda_l2': 1,
'early_stopping_rounds': 100
}
return params
class Xgb_Model(Base_Model):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
return xgb.train(self.params, train_set,
num_boost_round=5000, evals=[(train_set, 'train'), (val_set, 'val')],
verbose_eval=verbosity, early_stopping_rounds=100)
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = xgb.DMatrix(x_train, y_train)
val_set = xgb.DMatrix(x_val, y_val)
return train_set, val_set
def convert_x(self, x):
return xgb.DMatrix(x)
def get_params(self):
params = {'colsample_bytree': 0.8,
'learning_rate': 0.01,
'max_depth': 10,
'subsample': 1,
'objective':'reg:squarederror',
#'eval_metric':'rmse',
'min_child_weight':3,
'gamma':0.25,
'n_estimators':5000}
return params
class Catb_Model(Base_Model):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
clf = CatBoostRegressor(**self.params)
clf.fit(train_set['X'],
train_set['y'],
eval_set=(val_set['X'], val_set['y']),
verbose=verbosity,
cat_features=self.categoricals)
return clf
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
params = {'loss_function': 'RMSE',
'task_type': "CPU",
'iterations': 5000,
'od_type': "Iter",
'depth': 10,
'colsample_bylevel': 0.5,
'early_stopping_rounds': 300,
'l2_leaf_reg': 18,
'random_seed': 42,
'use_best_model': True
}
return params
# +
import tensorflow as tf
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
class Nn_Model(Base_Model):
def __init__(self, train_df, test_df, features, categoricals=[], n_splits=5, verbose=True):
features = features.copy()
if len(categoricals) > 0:
for cat in categoricals:
enc = OneHotEncoder()
train_cats = enc.fit_transform(train_df[[cat]])
test_cats = enc.transform(test_df[[cat]])
cat_cols = ['{}_{}'.format(cat, str(col)) for col in enc.active_features_]
features += cat_cols
train_cats = pd.DataFrame(train_cats.toarray(), columns=cat_cols)
test_cats = pd.DataFrame(test_cats.toarray(), columns=cat_cols)
train_df = pd.concat([train_df, train_cats], axis=1)
test_df = pd.concat([test_df, test_cats], axis=1)
scalar = MinMaxScaler()
train_df[features] = scalar.fit_transform(train_df[features])
test_df[features] = scalar.transform(test_df[features])
print(train_df[features].shape)
super().__init__(train_df, test_df, features, categoricals, n_splits, verbose)
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(train_set['X'].shape[1],)),
tf.keras.layers.Dense(200, activation='relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(50, activation='relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(25, activation='relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1, activation='relu')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=4e-4), loss='mse')
print(model.summary())
save_best = tf.keras.callbacks.ModelCheckpoint('nn_model.w8', save_weights_only=True, save_best_only=True, verbose=1)
early_stop = tf.keras.callbacks.EarlyStopping(patience=20)
model.fit(train_set['X'],
train_set['y'],
validation_data=(val_set['X'], val_set['y']),
epochs=100,
callbacks=[save_best, early_stop])
model.load_weights('nn_model.w8')
return model
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
return None
# +
from random import choice
class Cnn_Model(Base_Model):
def __init__(self, train_df, test_df, features, categoricals=[], n_splits=5, verbose=True):
features = features.copy()
if len(categoricals) > 0:
for cat in categoricals:
enc = OneHotEncoder()
train_cats = enc.fit_transform(train_df[[cat]])
test_cats = enc.transform(test_df[[cat]])
cat_cols = ['{}_{}'.format(cat, str(col)) for col in enc.active_features_]
features += cat_cols
train_cats = pd.DataFrame(train_cats.toarray(), columns=cat_cols)
test_cats = pd.DataFrame(test_cats.toarray(), columns=cat_cols)
train_df = pd.concat([train_df, train_cats], axis=1)
test_df = pd.concat([test_df, test_cats], axis=1)
scalar = MinMaxScaler()
train_df[features] = scalar.fit_transform(train_df[features])
test_df[features] = scalar.transform(test_df[features])
self.create_feat_2d(features)
super().__init__(train_df, test_df, features, categoricals, n_splits, verbose)
def create_feat_2d(self, features, n_feats_repeat=50):
self.n_feats = len(features)
self.n_feats_repeat = n_feats_repeat
self.mask = np.zeros((self.n_feats_repeat, self.n_feats), dtype=np.int32)
for i in range(self.n_feats_repeat):
l = list(range(self.n_feats))
for j in range(self.n_feats):
c = l.pop(choice(range(len(l))))
self.mask[i, j] = c
self.mask = tf.convert_to_tensor(self.mask)
print(self.mask.shape)
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
inp = tf.keras.layers.Input(shape=(self.n_feats))
x = tf.keras.layers.Lambda(lambda x: tf.gather(x, self.mask, axis=1))(inp)
x = tf.keras.layers.Reshape((self.n_feats_repeat, self.n_feats, 1))(x)
x = tf.keras.layers.Conv2D(18, (50, 50), strides=50, activation='relu')(x)
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.Dense(200, activation='relu')(x)
#x = tf.keras.layers.LayerNormalization()(x)
#x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(100, activation='relu')(x)
x = tf.keras.layers.LayerNormalization()(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(50, activation='relu')(x)
x = tf.keras.layers.LayerNormalization()(x)
x = tf.keras.layers.Dropout(0.3)(x)
out = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inp, out)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss='mse')
print(model.summary())
save_best = tf.keras.callbacks.ModelCheckpoint('nn_model.w8', save_weights_only=True, save_best_only=True, verbose=1)
early_stop = tf.keras.callbacks.EarlyStopping(patience=20)
model.fit(train_set['X'],
train_set['y'],
validation_data=(val_set['X'], val_set['y']),
epochs=100,
callbacks=[save_best, early_stop])
model.load_weights('nn_model.w8')
return model
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
return None
# -
# read data
train, test, train_labels, specs, sample_submission = read_data()
# get usefull dict with maping encode
train, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code = encode_title(train, test, train_labels)
# tranform function to get the train and test set
reduce_train, reduce_test, categoricals = get_train_and_test(train, test)
def stract_hists(feature, train=reduce_train, test=reduce_test, adjust=False, plot=False):
n_bins = 10
train_data = train[feature]
test_data = test[feature]
if adjust:
test_data *= train_data.mean() / test_data.mean()
perc_90 = np.percentile(train_data, 95)
train_data = np.clip(train_data, 0, perc_90)
test_data = np.clip(test_data, 0, perc_90)
train_hist = np.histogram(train_data, bins=n_bins)[0] / len(train_data)
test_hist = np.histogram(test_data, bins=n_bins)[0] / len(test_data)
msre = mean_squared_error(train_hist, test_hist)
if plot:
print(msre)
plt.bar(range(n_bins), train_hist, color='blue', alpha=0.5)
plt.bar(range(n_bins), test_hist, color='red', alpha=0.5)
plt.show()
return msre
stract_hists('Magma Peak - Level 1_2000', adjust=False, plot=True)
# call feature engineering function
features = reduce_train.loc[(reduce_train.sum(axis=1) != 0), (reduce_train.sum(axis=0) != 0)].columns # delete useless columns
features = [x for x in features if x not in ['accuracy_group', 'installation_id']]
counter = 0
to_remove = []
for feat_a in features:
for feat_b in features:
if feat_a != feat_b and feat_a not in to_remove and feat_b not in to_remove:
c = np.corrcoef(reduce_train[feat_a], reduce_train[feat_b])[0][1]
if c > 0.995:
counter += 1
to_remove.append(feat_b)
print('{}: FEAT_A: {} FEAT_B: {} - Correlation: {}'.format(counter, feat_a, feat_b, c))
to_exclude = []
ajusted_test = reduce_test.copy()
for feature in ajusted_test.columns:
if feature not in ['accuracy_group', 'installation_id', 'accuracy_group', 'session_title']:
data = reduce_train[feature]
train_mean = data.mean()
data = ajusted_test[feature]
test_mean = data.mean()
try:
error = stract_hists(feature, adjust=True)
ajust_factor = train_mean / test_mean
if ajust_factor > 10 or ajust_factor < 0.1:# or error > 0.01:
to_exclude.append(feature)
print(feature, train_mean, test_mean, error)
else:
ajusted_test[feature] *= ajust_factor
except:
to_exclude.append(feature)
print(feature, train_mean, test_mean)
features = [x for x in features if x not in (to_exclude + to_remove)]
reduce_train[features].shape
#cat_model = Catb_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
lgb_model = Lgb_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
xgb_model = Xgb_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
#cnn_model = Cnn_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
nn_model = Nn_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
# +
weights = {'lbg': 0.60, 'cat': 0, 'xgb': 0.20, 'nn': 0.20}
final_pred = (lgb_model.y_pred * weights['lbg']) + (xgb_model.y_pred * weights['xgb']) + (nn_model.y_pred * weights['nn'])
#final_pred = cnn_model.y_pred
print(final_pred.shape)
# +
#pd.DataFrame([(round(a, 2), round(b, 2), round(c, 2), round(d, 2)) for a, b, c, d in zip(lgb_model.y_pred, cat_model.y_pred, xgb_model.y_pred, nn_model.y_pred)], columns=['lgb', 'cat', 'xgb', 'nn']).head(50)
# +
dist = Counter(reduce_train['accuracy_group'])
for k in dist:
dist[k] /= len(reduce_train)
reduce_train['accuracy_group'].hist()
acum = 0
bound = {}
for i in range(3):
acum += dist[i]
bound[i] = np.percentile(final_pred, acum * 100)
print(bound)
def classify(x):
if x <= bound[0]:
return 0
elif x <= bound[1]:
return 1
elif x <= bound[2]:
return 2
else:
return 3
final_pred = np.array(list(map(classify, final_pred)))
sample_submission['accuracy_group'] = final_pred.astype(int)
sample_submission.to_csv('submission.csv', index=False)
sample_submission['accuracy_group'].value_counts(normalize=True)
# -
| ChungHyunhee/6_2019 Data Science Bowl/convert-to-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import glob
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers.experimental.preprocessing import Resizing
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from IPython.display import SVG
from tensorflow.keras.utils import plot_model
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.optimizers import SGD
from matplotlib.pyplot import imshow
import tensorflow.keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.metrics import roc_curve, auc
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters
# +
########### insert hyperparameters ################
train_batch_size = 32
val_batch_size = 32
###################################################
base_dir = r'C:\Users\20153761\Documents\TUe\4e jaar\3e kwartiel\BIA'
test_dir = r'C:\Users\20153761\Documents\TUe\4e jaar\3e kwartiel\BIA\test\test'
# dataset parameters
TRAIN_PATH = os.path.join(base_dir, 'train+val', 'train')
VALID_PATH = os.path.join(base_dir, 'train+val', 'valid')
TEST_FILES = glob.glob(test_dir + '\*.tif')
RESCALING_FACTOR = 1./255
IMAGE_SIZE = 96
# instantiate data generators
datagen = ImageDataGenerator(rescale=RESCALING_FACTOR)
train_gen = datagen.flow_from_directory(TRAIN_PATH,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=train_batch_size,
class_mode='binary')
val_gen = datagen.flow_from_directory(VALID_PATH,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=val_batch_size,
class_mode='binary',
shuffle=False)
# form steps
train_steps = train_gen.n//train_gen.batch_size
val_steps = val_gen.n//val_gen.batch_size
# -
def identity_block_s(X, f, filters, stage, block):
#DOES NOT WORK YET
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F3, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
def convolutional_block_s(X, f, filters, stage, block, s = 2):
# DOES NOT WORK YET
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F3, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
##### SHORTCUT PATH ####
X_shortcut = Conv2D(filters = F3, kernel_size = (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '1',
kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
train_gen.labels.astype('uint8')
train_gen.labels.dtype
# +
input_shape=(96, 96, 3)
classes=1
def build_model(hp):
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Preprocess input to the correct size
#X = Resizing(64,64)
# Zero-Padding
X = ZeroPadding2D((3,3))(X_input)
print('Hello World')
# Stage 1
X = Conv2D(64, (7,7), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3,3), strides=(2,2))(X)
# Stage 2
X = convolutional_block_s(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)
X = identity_block_s(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block_s(X, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
X = convolutional_block_s(X, f=3, filters=[128,128,512], stage=3, block='a', s=2)
for i in range(hp.Int('n_IDblocks', 1, 5)):
X = identity_block_s(X, 3, [128,128,512], stage=3, block='b')
# AVGPOOL
X = AveragePooling2D((2,2), name="avg_pool")(X)
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet12')
model.compile(SGD(lr=0.01, momentum=0.95), loss = 'binary_crossentropy', metrics=['accuracy'])
return model
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=1, # how many model variations to test?
executions_per_trial=1, # how many trials per variation? (same model could perform differently)
directory='First_Resnet',
project_name='Resnet_Check'
)
tuner.search(x=train_gen,
y=train_gen.labels.astype('uint8'),
verbose=2, # just slapping this here bc jupyter notebook. The console out was getting messy.
epochs=1,
batch_size=64,
#callbacks=[tensorboard], # if you have callbacks like tensorboard, they go here.
validation_data=(val_gen, val_gen.labels.astype('uint8')))
# +
# save the model and weights
model_name = 'ResNet12'
model_filepath = model_name + '.json'
weights_filepath = model_name + '_weights.hdf5'
# serialize model to JSON
model_json = model.to_json()
with open(model_filepath, 'w') as json_file:
json_file.write(model_json)
# define the model checkpoint and Tensorboard callbacks
checkpoint = ModelCheckpoint(weights_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
tensorboard = TensorBoard(os.path.join('logs', model_name))
callbacks_list = [checkpoint, tensorboard]
| Final project/Resnet_Tuner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] cell_style="center"
# <img src="../images/demos/FIUM.png" width="350px" class="pull-right" style="display: inline-block">
#
# # Visión Artificial
#
# ### 4º de Grado en Ingeniería Informática
#
# Curso 2021-2022<br>
# Prof. [*<NAME>*](http://dis.um.es/profesores/alberto)
# -
# 
# ## Recursos
# - [libro de Szeliski](https://szeliski.org/Book/)
#
#
# - [OpenCV](https://opencv.org/), [tutoriales en Python](https://docs.opencv.org/4.1.0/d6/d00/tutorial_py_root.html), [documentación](https://docs.opencv.org/4.1.0/)
#
# - [libro](https://books.google.es/books?id=seAgiOfu2EIC&printsec=frontcover)
#
# - [libro1](https://books.google.es/books?id=9uVOCwAAQBAJ&printsec=frontcover), [libro2](https://books.google.es/books?id=iNlOCwAAQBAJ&printsec=frontcover)
#
#
# - [Bishop](https://www.microsoft.com/en-us/research/uploads/prod/2006/01/Bishop-Pattern-Recognition-and-Machine-Learning-2006.pdf)
#
#
# - [scikit-image](http://scikit-image.org/), [scikit-learn](http://scikit-learn.org)
#
#
# - [datasets](https://en.wikipedia.org/wiki/List_of_datasets_for_machine_learning_research#Image_data)
#
#
# - [Python](https://docs.python.org/3.8/)
#
# - [numpy](http://www.numpy.org/), [scipy](http://docs.scipy.org/doc/scipy/reference/)
#
# - [matplotlib](http://matplotlib.org/index.html)
# ## Prácticas
# - [Preguntas frecuentes](FAQ.ipynb)
#
#
# - [Guión de las sesiones](guionpracticas.ipynb)
# ## Clases
# ### 0. Presentación (24/1/22)
#
# [introducción](intro.ipynb), [instalación](install.ipynb), [Python](python.ipynb)
#
# - Introducción a la asignatura
#
# - Repaso de Python, numpy y matplotib
# ### 1. Introducción a la imagen digital (31/1/22)
# [imagen](imagen.ipynb), [gráficas](graphs.ipynb), [indexado/stacks](stacks.ipynb), [dispositivos de captura](captura.ipynb)
# - Modelo pinhole. Campo de visión (FOV, *field of view*, parámetro $f$)
#
# - Imagen digital: rows, cols, depth, step. Planar or pixel order. Tipo de pixel: byte vs float
#
# - Color encoding: RGB vs YUV vs HSV
#
# - Coordendas de pixel, coordenadas normalizadas (indep de resolución), coordenadas calibradas (independiente del FOV).
#
# - Aspect ratio. Resize.
#
# - Manipulación: slice regions, "stack" de imágenes
#
# - primitivas gráficas
#
# - captura: webcams, cameras ip, archivos de vídeo, v4l2-ctl, etc. Load / save.
#
# - entornos de conda, pyqtgraph, pycharm, spyder
#
# - Herramientas: formatos de imagen, imagemagick, gimp, mplayer/mencoder/ffmpeg, mpv, gstreamer, etc.
# ### 2. Segmentación por color (7/2/22)
# [canales de color](color.ipynb), [histograma](histogram.ipynb), [efecto chroma](chroma.ipynb), [segmentación por color](colorseg.ipynb)
# <br>
# [cuantización de color](codebook.ipynb)
# - Teoría del color
#
# - ROIs, masks, probability map, label map
#
# - Componentes conexas vs contornos.
#
# - inRange
#
# - Chroma key
#
# - Histograma, transformaciones de valor (brillo, contraste), ecualización
#
# - Histograma nD
#
# - Distancia entre histogramas. Reproyección de histograma
#
# - background subtraction
#
# - activity detection
# ### 3. Filtros digitales (14/2/22, 21/2/22)
# [filtros de imagen](filtros.ipynb)
# - lineal
#
# - convolution
# - máscaras para paso alto, bajo, etc.
# - separabilidad
# - integral image, box filter
# - dominio frecuencial
# - filtrado inverso
#
#
# - no lineal
#
# - mediana
# - min, max
# - algoritmos generales
#
#
# - Gaussian filter
#
# - separabilidad
# - cascading
# - Fourier
# - scale space
#
#
#
# - [morphological operations](http://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html#gsc.tab=0)
#
# - structuring element
# - dilate, erode
# - open, close
# - gradient
# - fill holes
#
# ### 3b. Análisis frecuencial
# [análisis frecuencial](fourier.ipynb), [filtrado inverso](inversefilt.ipynb)
# ### 4. Detección de bordes (28/2/22)
# [detección de bordes](bordes.ipynb), [Canny nms en C](cannyC.ipynb)
# - gradiente: visualización como *vector field*
#
# - operador de Canny
#
# - transformada de Hough
#
# - Histograma de orientaciones del gradiente (HOG)
#
# - implementación simple de HOG
#
# - detección de *pedestrians*
#
# - face landmarks (dlib)
# ### 5a. Flujo óptico
# [elipse de incertidumbre](covarianza.ipynb), [optical flow](harris.ipynb)
#
# - elipse de incertidumbre
#
# - cross-correlation
#
# - corners (Harris)
#
# - Lucas-Kanade
# ### 5b. *Keypoints*
# [keypoints](keypoints.ipynb), [bag of visual words](bag-of-words.ipynb)
# - modelo cuadrático
#
# - blobs / saddle points (Hessian)
#
# - SIFT
# ### 6. Reconocimiento de formas
# [shapes](shapes.ipynb)
# - umbralización
#
# - análisis de regiones (componentes conexas, transformada de distancia)
#
# - manipulación de contornos
#
# - invariantes frecuenciales de forma
# ### 7. Otras técnicas
# [textura](textura.ipynb), [varios](varios.ipynb), [transformada de distancia](transf-dist.ipynb)
# - Clasificación de texturas mediante *LBP* (Wang and He, 1990, [wiki](https://en.wikipedia.org/wiki/Local_binary_patterns))
#
# - Detección de caras mediante *adaboost* ([Viola & Jones, 2001](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.6807), [wiki](https://en.wikipedia.org/wiki/Viola%E2%80%93Jones_object_detection_framework))
#
# - Herramientas para OCR (*[tesseract](https://github.com/tesseract-ocr)*)
#
# - Herramientas para códigos de barras y QR (*[zbar](http://zbar.sourceforge.net/)*)
#
# - Segmentación de objetos mediante *GrabCut* ([Rother et al. 2004](https://cvg.ethz.ch/teaching/cvl/2012/grabcut-siggraph04.pdf), [tutorial](http://docs.opencv.org/3.2.0/d8/d83/tutorial_py_grabcut.html))
#
# - Transformada de distancia
#
# - Detección de elipses
# ### 8b. *Machine learning*
# [machine learning](machine-learning.ipynb)
#
# - Repaso de *Machine Learning* y *Pattern Recognition*
#
# - Repaso de computación neuronal
#
# - Introducción a la redes convolucionales
# ### 8b. *Deep learning* en visión artificial
# [modelos avanzados](deep.ipynb)
# - Modelos preentrenados
#
# - YOLO
#
# - face recognition
#
# - openpose (body landmarks)
#
# - Transfer learning
#
# - Data augmentation
#
# - UNET
# ### 9. Coordenadas homogéneas
# Comenzamos el estudio de la geometría visual.
# [perspectiva](geovis.ipynb), [coordenadas homogéneas](coordhomog.ipynb)
# Transformaciones lineales
#
# - espacios lineales, vectores
#
# - transformaciones lineales, matrices
#
# - producto escalar (**dot** product)
#
# - producto vectorial (**cross** product)
#
# - puntos, rectas, planos, meet & join
# Geometría del plano
#
# - coordenadas homogéneas
#
# - interpretación como rayos
#
# - puntos y rectas del plano
#
# - incidencia e intersección, dualidad
#
# - puntos del infinito, recta del infinito
#
# - manejo natural de puntos del infinito
#
# - horizonte de un plano
# ### 13. Transformaciones del plano
# [transformaciones del plano](transf2D.ipynb), [sistemas de ecuaciones](sistecs.ipynb), [transformaciones de dominio](lookup.ipynb)
# - Desplazamientos, rotaciones, escalado uniforme, escalado general, proyectividad.
#
# - Grupos euclídeo, similar, afín, proyectivo.
#
# - Propiedades invariantes de cada grupo.
#
# - Representación como matriz homogénea $3\times 3$ y tipos de matriz de cada grupo.
#
# - *Cross ratio* de 4 puntos en una recta. De 5 rectas.
#
# - Estimación de transformaciones a partir de correspondencias.
#
# - Aplicaciones: rectificación de planos, mosaico de imágenes.
#
# - Transformaciones de dominio (deformaciones), lookup table.
#
# Avanzado
#
# - Transformación de rectas. Covarianza y contravarianza.
#
# - Cónicas: incidencia, tangencia, (pole-polar), cónica dual, transformación.
#
# - Objetos invariantes en cada grupo de transformaciones.
# ### 14. Modelo de cámara
# [modelo de la cámara](camera.ipynb)
# - Espacio proyectivo: puntos y líneas 3D, planos, grados de libertad, plano del infinito, analogía con 2D.
#
# - Grupos de transformaciones 3D: y sus invariantes.
#
# - Modelo pinhole (proyección), cámara oscura, lente.
#
# - Transformación de perspectiva: proyección $\mathcal P^3 \rightarrow\mathcal P ^2$.
#
# - cámara calibrada C=PRT, 6 dof, parámetros extrínsecos o pose.
#
# - calibración, distorsión radial.
#
# - Matriz de cámara estándar $M=K[R|t]$.
#
# - Matriz de calibración $K$ y campo visual.
#
# - PnP (*pose from n points*).
#
# - Realidad aumentada.
#
# - Anatomía de la cámara
#
# - Rotaciones sintéticas
# ### 15. Visión estéreo
# [stereo](stereo.ipynb), [stereo-challenge](stereo-challenge.ipynb)
# - Triangulación
#
# - Geometría epipolar
#
# - Extracción de cámaras
#
# - Rectificación estéreo
#
# - Mapas de profundidad
#
#
# Experimentos
#
# - Reproduce los experimentos con un par estéreo tomado con tu propia cámara usando el *tracker* de puntos estudiado en una clase anterior.
#
# - Intenta poner en marcha el sistema [VisualSFM](http://ccwu.me/vsfm/).
# ## Notebooks
# 1. [introducción](intro.ipynb)
# 1. [instalación](install.ipynb)
# 1. [Python](python.ipynb)
#
# 1. [dispositivos de captura](captura.ipynb)
#
# 1. [imagen](imagen.ipynb)
# 1. [gráficas](graphs.ipynb)
# 1. [canales de color](color.ipynb)
#
# 1. [indexado, stacks](stacks.ipynb)
# 1. [histograma](histogram.ipynb)
# 1. [efecto chroma](chroma.ipynb)
# 1. [segmentación por color](colorseg.ipynb)
# 1. [cuantización de color](codebook.ipynb)
#
# 1. [transformaciones de dominio](lookup.ipynb)
#
# 1. [filtros de imagen](filtros.ipynb)
# 1. [análisis frecuencial](fourier.ipynb)
# 1. [filtrado inverso](inversefilt.ipynb)
#
# 1. [transformada de distancia](transf-dist.ipynb)
#
# 1. [detección de bordes](bordes.ipynb)
#
# 1. [técnicas auxiliares](ipmisc.ipynb)
# 1. [Canny nms en C](cannyC.ipynb)
#
# 1. [elipse de incertidumbre](covarianza.ipynb)
# 1. [optical flow](harris.ipynb)
#
# 1. [keypoints](keypoints.ipynb)
# 1. [bag of visual words](bag-of-words.ipynb)
#
# 1. [machine learning](machine-learning.ipynb)
# 1. [deep learning](deep.ipynb)
# 1. [tensorflow](tensorflow.ipynb)
#
# 1. [sistemas de ecuaciones](sistecs.ipynb)
#
# 1. [textura](textura.ipynb)
#
# 1. [shapes](shapes.ipynb)
#
# 1. [varios](varios.ipynb)
#
# 1. [perspectiva](geovis.ipynb)
# 1. [coordenadas homogéneas](coordhomog.ipynb)
# 1. [transformaciones del plano](transf2D.ipynb)
# 1. [DLT](DLT.ipynb)
#
# 1. [modelo de cámara](camera.ipynb)
#
# 1. [visión stereo](stereo.ipynb)
# 1. [stereo-challenge](stereo-challenge.ipynb)
#
# ## Ejemplos de código
# Comprobación inicial
#
# 1. [`hello.py`](../code/hello.py): lee imagen de archivo, la reescala, muestra y sobreescribe un texto.
#
# 1. [`webcam.py`](../code/webcam.py): muestra la secuencia de imágenes capturadas por una webcam.
#
# 1. [`2cams.py`](../code/2cams.py): combina las imágenes tomadas por dos cámaras.
#
# 1. [`stream.py`](../code/stream.py): ejemplo de uso de la fuente genérica de imágenes.
#
# 1. [`surface.py`](../code/surface.py): superficie 3D de niveles de gris en vivo usando pyqtgraph.
#
# 1. [`facemesh.py`](../code/facemesh.py): malla de una cara usando mediapipe.
# Utilidades
#
# 1. [`save_video.py`](../code/save_video.py): ejemplo de uso de la utilidad de grabación de vídeo.
#
# 1. [`mouse.py`](../code/mouse.py), [`medidor.py`](../code/medidor.py): ejemplo de captura de eventos de ratón.
#
# 1. [`roi.py`](../code/roi.py): ejemplo de selección de región rectangular.
#
# 1. [`trackbar.py`](../code/trackbar.py): ejemplo de parámetro interactivo.
#
# 1. [`help_window.py`](../code/help_window.py): ejemplo de ventana de ayuda.
#
# 1. [`wzoom.py`](../code/wzoom.py): ejemplo de ventana con zoom.
# Actividad
#
# 1. [`deque.py`](../code/deque.py): procesamiento de las $n$ imágenes más recientes.
#
# 1. [`backsub0.py`](../code/backsub0.py), [`backsub.py`](../code/backsub.py): eliminación de fondo mediante MOG2.
# Color
#
# 1. [`histogram.py`](../code/histogram.py): histograma en vivo con opencv.
#
# 1. [`histogram2.py`](../code/histogram2.py): histograma en vivo con matplotlib.
#
# 1. [`inrange0.py`](../code/inrange0.py), [`inrange.py`](../code/inrange.py): umbralización de color, máscaras, componentes conexas y contornos.
#
# 1. [`reprohist.py`](../code/reprohist.py), [`mean-shift.py`](../code/mean-shift.py), [`camshift.py`](../code/camshift.py): reproyección de histograma y tracking.
# 1. [`surface2.py`](../code/surface2.py): superficie 3D de niveles de gris suavizada y manejo de teclado con pyqtgraph y opengl.
# 1. [`server.py`](../code/server.py): ejemplo de servidor web de imágenes capturadas con la webcam.
#
# 1. [`mjpegserver.py`](../code/mjpegserver.py): servidor de secuencias de video en formato mjpeg.
#
# 1. [`bot`](../code/bot): bots de [Telegram](https://python-telegram-bot.org/).
# 1. [`grabcut.py`](../code/grabcut.py): segmentación de objetos interactiva mediante GrabCut.
#
# 1. [`spectral.py`](../code/spectral.py): FFT en vivo.
#
# 1. [`thread`](../code/thread): captura y procesamiento concurrente.
#
# 1. [`testC.py`](../code/testC.py), [`inC`](../code/inC): Interfaz C-numpy.
# 1. [`hog/pedestrian.py`](../code/hog/pedestrian.py): detector de peatones de opencv.
#
# 1. [`hog/facelandmarks.py`](../code/hog/facelandmarks.py): detector de caras y landmarks de dlib.
#
# 1. [`hog/hog0.py`](../code/hog/hog0.py): experimentos con hog.
#
# 1. [`regressor.py`](../code/regressor.py): predictor directo de la posición de una región.
#
# 1. [`crosscorr.py`](../code/crosscorr.py): ejemplo de match template.
#
# 1. [`LK/*.py`](../code/LK): seguimiento de puntos con el método de Lucas-Kanade.
#
# 1. [`SIFT/*.py`](../code/sift.py): demostración de la detección de keypoints y búsqueda de coincidencias en imágenes en vivo.
#
# 1. [`shape/*.py`](../code/shape): reconocimiento de formas mediante descriptores frecuenciales.
#
# 1. [`ocr.py`](../code/ocr.py): reconocimiento de caracteres impresos con tesseract/tesserocr sobre imagen en vivo.
#
# 1. [`zbardemo.py`](../code/zbardemo.py): detección de códigos de barras y QR sobre imagen en vivo.
#
# 1. [`code/DL`](../code/DL): Modelos avanzados de deep learning para visión artificial (inception, YOLO, FaceDeep, openpose).
#
# 1. [`code/polygons`](../code/polygons) y [`code/elipses`](../code/elipses): Rectificación de planos en base a marcadores artificiales.
#
# 1. [`stitcher.py`](../code/stitcher.py): construcción automática de panoramas.
#
# 1. [`code/pose`](../code/pose): estimación de la matriz de cámara y realidad aumentada.
# ## Ejercicios
# La entrega de los ejercicios se hará en una tarea del aula virtual dentro de un archivo comprimido.
#
# Debe incluir el **código** completo .py de todos los ejercicios, los ficheros auxiliares (siempre que no sean muy pesados), y una **memoria** con una **explicación** detallada de las soluciones propuestas, las funciones o trozos de código más importantes, y **resultados** de funcionamiento con imágenes de evaluación **originales** en forma de pantallazos o videos de demostración. También es conveniente incluir información sobre tiempos de cómputo, limitaciones de las soluciones propuestas y casos de fallo.
#
# La memoria se presentará en un formato **pdf** o **jupyter** (en este caso se debe adjuntar también una versión **html** del notebook completamente evaluado).
#
# Lo importante, además de la evaluación de la asignatura, es que os quede un buen documento de referencia para el futuro.
#
# Ejercicios propuestos hasta este momento:
# ### Obligatorios
# **CALIBRACIÓN**. a) Realiza una calibración precisa de tu cámara mediante múltiples imágenes de un *chessboard*. b) Haz una calibración aproximada con un objeto de tamaño conocido y compara con el resultado anterior. c) Determina a qué altura hay que poner la cámara para obtener una vista cenital completa de un campo de baloncesto. d) Haz una aplicación para medir el ángulo que definen dos puntos marcados con el ratón en el imagen. e) Opcional: determina la posición aproximada desde la que se ha tomado una foto a partir ángulos observados respecto a puntos de referencia conocidos. [Más informacion](imagen.ipynb#Calibración).
# **ACTIVIDAD**. Construye un detector de movimiento en una región de interés de la imagen marcada manualmente. Guarda 2 ó 3 segundos de la secuencia detectada en un archivo de vídeo. Opcional: muestra el objeto seleccionado anulando el fondo.
# **COLOR**. Construye un clasificador de objetos en base a la similitud de los histogramas de color del ROI (de los 3 canales por separado). [Más información](FAQ.ipynb#Ejercicio-COLOR). Opcional: Segmentación densa por reproyección de histograma.
# ### Opcionales
# **FILTROS**. Amplía el código de la práctica 4 para mostrar en vivo el efecto de diferentes filtros, seleccionando con el teclado el filtro deseado y modificando sus parámetros (p.ej. el nivel de suavizado) con trackbars. **a)** Aplica el filtro en un ROI para comparar el resultado con el resto de la imagen ([ejemplo](../images/demos/ej-c4.png)). **b)** Comprueba la propiedad de "cascading" del filtro gaussiano. **c)** Comprueba la propiedad de "separabilidad" del filtro gaussiano. **d)** Implementa en Python dede cero (usando bucles) el algoritmo de convolución con una máscara general y compara su eficiencia con la versión de OpenCV. **e)** Impleméntalo en C y haz un wrapper para utilizarlo desde Python (consulta al profesor). **f)** Implementa el box filter con la imagen integral.
# El resto se irá añadiendo a lo largo de las siguientes semanas.
#
# Como orientación aquí están los [ejercicios propuestos el curso anterior](ejercicios-curso-anterior.ipynb):
| notebooks/VIA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## crnn ocr 模型训练
# + pycharm={"is_executing": true}
import os
import numpy as np
import torch
from PIL import Image
import numpy as np
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import numpy as np
from warpctc_pytorch import CTCLoss
# -
# ## 创建数据软连接
# !ln -s /home/lywen/data/ocr ../data/ocr/1
# ## 加载数据集
# +
import os
os.chdir('../../')
from train.ocr.dataset import PathDataset,randomSequentialSampler,alignCollate
from glob import glob
from sklearn.model_selection import train_test_split
roots = glob('./train/data/ocr/*/*.jpg')
# -
# ## 训练字符集
alphabetChinese = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
# +
trainP,testP = train_test_split(roots,test_size=0.1)##此处未考虑字符平衡划分
traindataset = PathDataset(trainP,alphabetChinese)
testdataset = PathDataset(testP,alphabetChinese)
batchSize = 32
workers = 1
imgH = 32
imgW = 280
keep_ratio = True
cuda = True
ngpu = 1
nh =256
sampler = randomSequentialSampler(traindataset, batchSize)
train_loader = torch.utils.data.DataLoader(
traindataset, batch_size=batchSize,
shuffle=False, sampler=None,
num_workers=int(workers),
collate_fn=alignCollate(imgH=imgH, imgW=imgW, keep_ratio=keep_ratio))
train_iter = iter(train_loader)
# -
# ## 加载预训练模型权重
# +
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
from crnn.models.crnn import CRNN
from config import ocrModel,LSTMFLAG,GPU
model = CRNN(32, 1, len(alphabetChinese)+1, 256, 1,lstmFlag=LSTMFLAG)
model.apply(weights_init)
preWeightDict = torch.load(ocrModel,map_location=lambda storage, loc: storage)##加入项目训练的权重
modelWeightDict = model.state_dict()
for k, v in preWeightDict.items():
name = k.replace('module.','') # remove `module.`
if 'rnn.1.embedding' not in name:##不加载最后一层权重
modelWeightDict[name] = v
model.load_state_dict(modelWeightDict)
# -
model
##优化器
from crnn.util import strLabelConverter
lr = 0.1
optimizer = optim.Adadelta(model.parameters(), lr=lr)
converter = strLabelConverter(''.join(alphabetChinese))
criterion = CTCLoss()
# +
from train.ocr.dataset import resizeNormalize
from crnn.util import loadData
image = torch.FloatTensor(batchSize, 3, imgH, imgH)
text = torch.IntTensor(batchSize * 5)
length = torch.IntTensor(batchSize)
if torch.cuda.is_available():
model.cuda()
model = torch.nn.DataParallel(model, device_ids=[0])##转换为多GPU训练模型
image = image.cuda()
criterion = criterion.cuda()
# -
# +
def trainBatch(net, criterion, optimizer,cpu_images, cpu_texts):
#data = train_iter.next()
#cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
loadData(text, t)
loadData(length, l)
preds = net(image)
preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
net.zero_grad()
cost.backward()
optimizer.step()
return cost
def predict(im):
"""
预测
"""
image = im.convert('L')
scale = image.size[1]*1.0 / 32
w = image.size[0] / scale
w = int(w)
transformer = resizeNormalize((w, 32))
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
return sim_pred
def val(net, dataset, max_iter=100):
for p in net.parameters():
p.requires_grad = False
net.eval()
i = 0
n_correct = 0
N = len(dataset)
max_iter = min(max_iter, N)
for i in range(max_iter):
im,label = dataset[np.random.randint(0,N)]
if im.size[0]>1024:
continue
pred = predict(im)
if pred.strip() ==label:
n_correct += 1
accuracy = n_correct / float(max_iter )
return accuracy
# -
from train.ocr.generic_utils import Progbar
##进度条参考 https://github.com/keras-team/keras/blob/master/keras/utils/generic_utils.py
# ## 模型训练
# ## 冻结预训练模型层参数
# +
nepochs = 10
acc = 0
interval = len(train_loader)//2##评估模型
for i in range(nepochs):
print('epoch:{}/{}'.format(i,nepochs))
n = len(train_loader)
pbar = Progbar(target=n)
train_iter = iter(train_loader)
loss = 0
for j in range(n):
for p in model.named_parameters():
p[1].requires_grad = True
if 'rnn.1.embedding' in p[0]:
p[1].requires_grad = True
else:
p[1].requires_grad = False##冻结模型层
model.train()
cpu_images, cpu_texts = train_iter.next()
cost = trainBatch(model, criterion, optimizer,cpu_images, cpu_texts)
loss += cost.data.numpy()
if (j+1)%interval==0:
curAcc = val(model, testdataset, max_iter=1024)
if curAcc>acc:
acc = curAcc
torch.save(model.state_dict(), 'train/ocr/modellstm.pth')
pbar.update(j+1,values=[('loss',loss/((j+1)*batchSize)),('acc',acc)])
# -
# ## 释放模型层参数
# +
nepochs = 10
#acc = 0
interval = len(train_loader)//2##评估模型
for i in range(10,10+nepochs):
print('epoch:{}/{}'.format(i,nepochs))
n = len(train_loader)
pbar = Progbar(target=n)
train_iter = iter(train_loader)
loss = 0
for j in range(n):
for p in model.named_parameters():
p[1].requires_grad = True
model.train()
cpu_images, cpu_texts = train_iter.next()
cost = trainBatch(model, criterion, optimizer,cpu_images, cpu_texts)
loss += cost.data.numpy()
if (j+1)%interval==0:
curAcc = val(model, testdataset, max_iter=1024)
if curAcc>acc:
acc = curAcc
torch.save(model.state_dict(), 'train/ocr/modellstm.pth')
pbar.update(j+1,values=[('loss',loss/((j+1)*batchSize)),('acc',acc)])
# -
# ## 预测demo
model.eval()
N = len(testdataset)
im,label = testdataset[np.random.randint(0,N)]
pred = predict(im)
print('true:{},pred:{}'.format(label,pred))
im
| train/ocr/train-ocr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib notebook
# %load_ext autoreload
# %autoreload 2
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display,Markdown
import numpy as np
import math
from scipy.integrate import solve_ivp
from hjb import *
#reverse tree works quite badly without integration
INTEGRATE = True
INTEGRATION_REL_TOLERANCE = 1e-2
INTEGRATION_ABS_TOLERANCE = 1e-4
class PendulumDynamics(DynamicSystem):
"""The state space is (theta, theta') and the control is the torque.
"""
def __init__(self,m=1,L=1,g=1):
self.m = m
self.L = L
self.g = g
def dynamics(self,state,control):
return np.array([state[1],-self.g/self.L*np.cos(state[0]) + control/(self.m*self.L)])
def nextState(self,state,control,dt):
if dt < 0:
#allow reverse dynamics
if INTEGRATE:
res = solve_ivp((lambda t,y:-self.dynamics(y,control)),[0,-dt],state,rtol=INTEGRATION_REL_TOLERANCE,atol=INTEGRATION_ABS_TOLERANCE)
x = res.y[:,-1]
else:
x = state + dt*self.dynamics(state,control)
else:
if INTEGRATE:
res = solve_ivp((lambda t,y:self.dynamics(y,control)),[0,dt],state,rtol=INTEGRATION_REL_TOLERANCE,atol=INTEGRATION_ABS_TOLERANCE)
x = res.y[:,-1]
else:
x = state + dt*self.dynamics(state,control)
#normalize the angle
x[0] = x[0]%(2.0*math.pi)
if x[0] < 0:
x[0] += (2.0*math.pi)
return x
def validState(self,state):
return True
def validControl(self,state,control):
return True
class PendulumControlSampler(ControlSampler):
def __init__(self,umin,umax):
self.umin = umin
self.umax = umax
def sample(self,state):
return [self.umin,0,self.umax]
class TimeObjectiveFunction(ObjectiveFunction):
def edgeCost(self,state,control,dt,nextState):
return abs(dt)
class EffortObjectiveFunction(ObjectiveFunction):
def edgeCost(self,state,control,dt,nextState):
return np.linalg.norm(control)**2*dt
# -
#create the dynamics function, terminal conditions, and control bounds
dynamics = PendulumDynamics()
umin = -0.25
umax = 0.25
start = np.array([math.pi*3/2,0])
right = np.array([0,0])
goal = np.array([math.pi/2,0])
bounds = [(0,math.pi*2),(-5,5)]
controlSampler = PendulumControlSampler(umin,umax)
objective = TimeObjectiveFunction()
#some debugging of the dynamics function
print "NextState from right, 0.1s",dynamics.nextState(right,0,0.1)
print "NextState from right, 0.5s",dynamics.nextState(right,0,0.5)
x = right
t = 0
while t < 0.5:
x = dynamics.nextState(x,0,0.01)
t += 0.01
print "Euler integration 0.5s, 0.01s substep",x
print "Reverse 0.1s",dynamics.nextState(right,0,-0.1)
print "Forward then reverse 0.1s",dynamics.nextState(dynamics.nextState(right,0,0.1),0,-0.1)
print "Reverse then forward 0.1s",dynamics.nextState(dynamics.nextState(right,0,-0.1),0,0.1)
print "Forward then reverse 0.1s, umin",dynamics.nextState(dynamics.nextState(right,umin,0.1),umin,-0.1)
print "Reverse then forward 0.1s, umin",dynamics.nextState(dynamics.nextState(right,umin,-0.1),umin,0.1)
#testing the RobustRegularGridInterpolator
xdivs = np.array([0,1,2])
ydivs = np.array([0,1,2])
values = np.arange(9,dtype=float).reshape((3,3))
values[0,0] = float('inf')
values[0,1] = float('inf')
print values
grid = RobustRegularGridInterpolator((xdivs,ydivs),values)
print grid([0.1,0.5]),"should be inf"
print grid([0.6,0.5]),"should be 3.5"
hjb = HJBSolver(dynamics,controlSampler,0.1,objective,
bounds,[200,200],
goal=goal)
scell = hjb.stateToCell(start)
print bounds
print "Start cell",scell
print "Start cell center",hjb.cellToCenterState(scell)
print "cell of Start cell center",hjb.stateToCell(hjb.cellToCenterState(scell))
hjb.value[scell] = 0
print "Start interpolator index",hjb.valueInterpolator.getCell(start)
print "Start interpolator value",hjb.valueInterpolator(start)
print "Start cell center interpolator index",hjb.valueInterpolator.getCell(hjb.cellToCenterState(scell))
print "Start cell center interpolator value",hjb.valueInterpolator(hjb.cellToCenterState(scell))
# +
#need to set dt large enough to have a chance to jump cells
dt = 0.3
hjb = HJBSolver(dynamics,controlSampler,dt,objective,
bounds,[50,60],
goal=goal)
display(Markdown("# HJB Solver"))
hjbdisplay = GridCostFunctionDisplay(hjb,hjb.value,hjb.policy,policyDims=None)
hjbdisplay.show()
def do_value_iteration(i):
print "Running",i,"HJB iterations"
hjb.valueIteration(iters=i)
hjbdisplay.refresh(hjb.value,hjb.policy)
if hjb.getPolicy(start) is not None:
#show the HJB policy
xs,us = rolloutPolicy(dynamics,start,(lambda x:hjb.getPolicy(x)),dt*0.5,200)
hjbdisplay.plotTrajectory(xs,color='r',zorder=3)
la_policy = LookaheadPolicy(dynamics,dt,controlSampler,objective,(lambda x:False),hjb.interpolateValue)
xs,us = rolloutPolicy(dynamics,start,la_policy,dt,200)
hjbdisplay.plotTrajectory(xs,color='y',zorder=4)
hjbdisplay.plotFlow(lambda x:hjb.getPolicy(x))
interact_manual(do_value_iteration,i=widgets.IntSlider(min=1, max=101, step=10, value=11));
# +
#this does backward search with a TreeSolver
bwtree = OptimalControlTreeSolver(dynamics,controlSampler,-dt*0.5,objective,
bounds,[50,60],
start=goal,goal=(lambda x:False),terminalAsStartCost=True)
bwtree.maxVisitedPerCell = 10
display(Markdown("# Backward Solver"))
bwtreedisplay = GridCostFunctionDisplay(bwtree,bwtree.costToCome(),bwtree.reversePolicy(),policyDims=None)
bwtreedisplay.show()
def do_bw_search(N):
for i in range(N):
bwtree.search_step()
#bwtree.search()
print "Generated",bwtree.num_nodes(),"nodes"
bwtreedisplay.refresh(bwtree.costToCome(),bwtree.reversePolicy())
if bwtree.getReversePolicy(start) is not None:
xs,us = rolloutPolicy(dynamics,start,(lambda x:bwtree.getReversePolicy(x)),-bwtree.dt,200)
bwtreedisplay.plotTrajectory(xs,color='r',zorder=3)
ctc_interp = bwtree.costToComeInterpolator()
#la_policy = LookaheadPolicy(dynamics,-bwtree.dt,bwtree.controlSampler,bwtree.objective,(lambda x:False),bwtree.getCostToCome)
la_policy = LookaheadPolicy(dynamics,-bwtree.dt,bwtree.controlSampler,bwtree.objective,(lambda x:False),ctc_interp)
xs,us = rolloutPolicy(dynamics,start,la_policy,-bwtree.dt,200)
bwtreedisplay.plotTrajectory(xs,color='y',zorder=4)
else:
#bwtreedisplay.plotGraph(color='r',lw='0.5')
pass
interact_manual(do_bw_search,N=widgets.IntSlider(min=1, max=10001, step=100, value=1001));
# +
#this does forward search
tree = OptimalControlTreeSolver(dynamics,controlSampler,dt,objective,
bounds,[50,60],
start=start,goal=goal)
tree.maxVisitedPerCell = 15
display(Markdown("# Forward Solver"))
treedisplay = GridCostFunctionDisplay(tree,tree.costToCome(),tree.reversePolicy(),policyDims=None)
treedisplay.show()
def do_fw_search(N):
for i in range(N):
tree.search_step()
treedisplay.refresh(tree.costToCome(),tree.reversePolicy())
path = tree.result_path()
if tree.goal is not None:
assert len(path) > 0
if len(path) > 0:
if len(path[0].state)==0:
path = path[1:]
if path[-1].state == None:
path = path[:-1]
xs = np.array([n.state for n in path])
treedisplay.plotTrajectory(xs,color='r',zorder=3)
interact_manual(do_fw_search,N=widgets.IntSlider(min=1, max=10001, step=100, value=1001));
# -
| code/HJBTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from testplan import Testplan
from testplan.common.utils.logger import TEST_INFO, DEBUG
from my_tests.mtest import make_multitest
# Initialize a plan with interactive mode flag set.
plan = Testplan(name='MyPlan',
interactive_port=0,
parse_cmdline=False,
logger_level=TEST_INFO)
# Interactive mode serving interactive requests.
ihandler = plan.run()
# Adding a test.
test1_uid = plan.add(make_multitest(idx='1'))
print('Test uid: {}'.format(test1_uid))
print('Testplan interactive handler: '.format(plan.i))
test = plan.i.test(test1_uid)
print('Test added: {}'.format(test))
print('Test environment drivers: {}'.format(test))
# Start the test envirorment resources (Server & client).
plan.i.start_test_resources(test_uid='Test1')
print('Server status: {}'.format(test.resources.server.status.tag))
print('Client status: {}'.format(test.resources.client.status.tag))
# Run a test case.
plan.i.run_test_case(test_uid=test1_uid, suite_uid='TCPSuite', case_uid='send_and_receive_msg')
# Stop test resources.
plan.i.stop_test_resources(test_uid='Test1')
print('Server status: {}'.format(test.resources.server.status.tag))
print('Client status: {}'.format(test.resources.client.status.tag))
# +
# Add an independent environment.
from testplan.environment import LocalEnvironment
from testplan.testing.multitest.driver.tcp import TCPServer, TCPClient
from testplan.common.utils.context import context
plan.add_environment(
LocalEnvironment(
'my_env1',
[TCPServer(name='server'),
TCPClient(name='client',
host=context('server', '{{host}}'),
port=context('server', '{{port}}'))]))
# -
# Operate my_env1
env1 = plan.i.get_environment('my_env1')
env1.start()
env1.server.accept_connection()
print('Client sends msg of length in bytes: {}'.format(
env1.client.send_text('Hello server!')))
print('Server receives: {}'.format(
env1.server.receive_text()))
env1.stop()
# +
# You can add an environment using HTTP requests from another tool i.e a UI.
# To demonstrate that:
import requests
addr = 'http://{}:{}'.format(*plan.i.http_handler_info)
print('HTTP listener: {}'.format(addr))
response = requests.post('{}/sync/create_new_environment'.format(addr),
json={'env_uid': 'my_env2'})
response = requests.post('{}/sync/add_environment_resource'.format(addr),
json={'env_uid': 'my_env2',
'target_class_name': 'TCPServer',
'name': 'server'})
response = requests.post('{}/sync/add_environment_resource'.format(addr),
json={'env_uid': 'my_env2',
'target_class_name': 'TCPClient',
'name': 'client',
'_ctx_host_ctx_driver': 'server',
'_ctx_host_ctx_value': '{{host}}',
'_ctx_port_ctx_driver': 'server',
'_ctx_port_ctx_value': '{{port}}'})
response = requests.post('{}/sync/add_created_environment'.format(addr),
json={'env_uid': 'my_env2'})
# +
# Operate my_env1 using HTTP.
# Start and initialize connection.
response = requests.post('{}/sync/start_environment'.format(addr),
json={'env_uid': 'my_env2'})
response = requests.post('{}/sync/environment_resource_operation'.format(addr),
json={'env_uid': 'my_env2',
'resource_uid': 'server',
'operation': 'accept_connection'})
# Drivers operations example - send and receive a message.
msg = 'Hello world'
response = requests.post('{}/sync/environment_resource_operation'.format(addr),
json={'env_uid': 'my_env2',
'resource_uid': 'client',
'operation': 'send_text',
'msg': msg})
response = requests.post('{}/sync/environment_resource_operation'.format(addr),
json={'env_uid': 'my_env2',
'resource_uid': 'server',
'operation': 'receive_text'})
print('Servers receives: {}'.format(response.json()['result']))
# Stop the environment.
response = requests.post('{}/sync/stop_environment'.format(addr),
json={'env_uid': 'my_env2'})
# -
print('Independent environments added: {}'.format(
list(plan.resources.environments.envs.keys())))
# Abort the plan.
plan.abort()
| examples/Interactive/Environments/test_plan_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Intro to Artificial Neural Networks with Keras
#
# ANNs are the core of **Deep Learning**
#
# ### Why this wave of interest in ANN's is unlike to die out like died the 1960s and 1980s
# * ANN's frequently outperform other ML techniques on very large and complex problems;
# * The increase in computer power since 1990s and cloud platforms have made training large neural networks accessible;
# * The training algorithms have been improved since 1990s;
# * ANNs seem to have entered a virtuous circle of funding and progress, as new products based on ANNs are launched more attention towards them are pulled.
#
# ## Logical Computations with Neurons
#
# A simple model of a artificial neuron has on or more binary inputs and one binary output. The AN activates its output when more than a certain number of its inputs are active.
#
# *Assumption: a neuron is activated when at least two inputs are active*
#
# ### Identity function
# $C = A$
#
# $A \Rightarrow C$
#
# *if* A is activated *then* C is activated as well (since it receives two inputs signal)
#
# ### AND
# $C = A \land B$
#
# $A \rightarrow C \leftarrow B$
#
# Neuron C is activated *if and only if* both A *and* B are activated.
#
# ### OR
# $C = A \lor B$
#
# $A \Rightarrow C \Leftarrow B$
#
# Neuron C gets activated *if at least* neuron A *or* B is activated.
#
# ### When a input connection can inhibit the neuron's activity
# $C = A \land \neg B$
#
# $A \Rightarrow C \leftarrow \neg B$
#
# Neuron C is activated *only if* A is activated *and* B is deactivated.
#
# ## The Perceptron
# One of the simplest ANN architectures and it is based on a slightly different artificial neuron called *threshold logic unit* (TLU) or *linear threshold unit* (LTU). The inputs and outputs are numbers (instead of binary) and each input is associated with a weight. The TLU computes a weighted sum of its inputs
# $$z = w_1x_1+w_2x_2+\cdots+w_nx_n = \mathbf{X}^{\top}\mathbf{W}$$
# then applies a step function to that sum and outputs the result
# $$h_{\mathbf{W}}(\mathbf{X}) = step(z)$$
#
# Most common step function used in Perceptrons
#
# $$ Heaviside (z) =
# \begin{cases}
# 0 & \quad \text{if } z < t\\
# 1 & \quad \text{if } z \geq t
# \end{cases}
# $$
#
#
# $$
# sgn(z)=
# \begin{cases}
# -1 & \quad \text{if} z < t\\
# 0 & \quad \text{if} z = t\\
# +1 &\quad \text{if} z> t
# \end{cases}
# $$
#
#
# $$
# \text{t: threshold}
# $$
#
# A single TLU would be used for simple linear classification like Logistic Regression or SVM classifier. Training a TLU in this case means finding the right values for $\mathbf{W}$
#
# ### Composition
#
# A **Perceptron** is composed of a single layer of TLUs with each TLU connected to all inputs (when all neurons in a layer are connected to every single in the previous layer, the layer is called a *fully connected layer* or *dense layer*)
#
# The inputs of the Perceptron are fed to special passthrough neurons called input neurons: they output whatever input they are fed. In addition, an extra bias feature is generelly added ($x_0=1$), it's represented using a neuron called *bias neuron*, which outputs 1 all the time.
#
# $$h_{\mathbf{W, b}}=\phi(\mathbf{XW}+b)$$
# Where:
# $\mathbf{X}$: matrix($m\times n$) of input features.
# $\mathbf{W}$: matrix($n\times j$) of connection weights one column ($j$) per artificial neuron in the layer.
# $\mathbf{b}$: bias terms vector ($j$) contains all the connection weights between the bias neuron and the artificial neurons. It has one bias term per artificial neuron.$
#
# The function $\phi$ is called activation function
#
# ### How is a Perceptron trained?
# Hebb's rule: The connection weight between two neurons tends to increase when they fire simultaneously
#
# A variant of the rule takes into account the error made by the network when making a prediction. **The Perceptron learning rule reinforces connections that help reduce the error**.
#
# $$W_{i, j}^{\text{next step}}=W_{i, j}+\eta(y_j-\hat{y}_j)x_i$$
#
# Where:
# $w_{i, j}$ is the connection weight between the $i^{th}$ input neuron and the $j^{th}$ output neuron.
# $x_i$ is the $i^{th}$ input value of the current training instance.
# $\hat{y}_j$ is the output of the $j^{th}$ output neuron for the current training instance.
# $y_j$ is the target output of the $j^{th}$ output neuron for the current training instance.
# $\eta$ is the learning rate.
#
# The decision boundary of each output neuron is linear, so Perceptron are incapable of learning complex patterns. However, if the training instances are linearly separables the algorithm would converge to a solution (*Perceptron convergence theorem*)
#
# +
# Imports
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
import numpy as np
import os
# -
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
# +
iris = load_iris()
X = iris.data[:, (2, 3)] # petal length and petal width
y = (iris.target == 0).astype('int')
per_clf = Perceptron()
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
# -
# `Perceptron` in scikit-learn is equivalent to using an `SGDClassifier` with the fallowing hyperparameters:
# `loss='perceptron'`
# `learning_rate='constant'`
# `eta0='1'`
# `penalty=None`
#
# *Contrary to Logistic Regression classifier, Perceptrons do not output a class probability, rather they make predictions based on hard threshold. This is one reason to **prefer** Logistic Regression over Perceptrons*
#
# **Perceptron are incabable of solving some trivial problems like *Exclusive OR (XOR)* classification problem. However some of the limitations of perceptrons can be solved by stacking multiple Perceptrons (called Multilayer Perceptron (MLP)).
#
# ## The Multilayer Perceptron and Backpropagation
# An MLP is composed of one input layer, one or more layers of TLUs (hidden layers) plus a final TLUs' layer called the output layer.
#
# The layers close the input are called *lower layers* and those close to the output *upper layers*. Every layer except the output one includes a bias neuron and is fully connected to the next layer.
#
# **Note**: The signal flows only in one direction (from the inputs to outputs), this architecture is an example of *feedforward neural network (FNN)*.
#
# **The backpropagation** training algorithm in short is a Gradient Descent using an efficient technique for computing the gradients automatically. In just two pass through the (one forward and one backward), the backpropagation algorithm is able to compute the gradient of the network's error with regard to every single model parameter. In other words, it can find out how each connection weight and each bias term should be tweaked in order to reduce the error. Once it has there gradients it just performs a regular gradient descent step, and the whole process is repeated until the network converge to the solution.
#
# **Note**: Automatically computing gradients is called *automatic differentiation*, or *autodiff*. There are various techniques. the one used by backpropagation is called *reverse-mode autodiff*
#
# ### The algorithm
# * handles one mini-batch at a time. It goes through the training set multiple times (**Epochs**).
# * The weights must be randomly initiated.
# * The algorithm computes the output of all neurons in each layer until the last layer (**forward pass**) and all intermediates results are preserved.
# * The algorithm computes the network's output error (using a loss function).
# * Compute how much each output connection contributed to the error (chain rule) and how much of these error contributions come from each connection in the layer below and so on until reaches the input layer. This measures the error gradient across all connection weights in the network by propagating the error backward (**backward pass**).
# * Finally the algorithm performs a Gradient Descent step to tweak all connection weights in the network using error gradient computed.
#
# **Gradiant Descent needs a well-defined non-zero derivative function to make progress at every step. Initially this function was the sigmoid function**
# $$\sigma(z)=\frac{1}{1+e^{-z}}$$
# **Other choices:**
# $$tanh(z)=2\sigma(2z)-1$$
# Unlike the sigmoid its output range from $-1$ to $1$ (instead of $0$ to $1$), and the range tends to make each layer's output centered around $0$ at the beginning of training speeding up convergence.
# $$ReLU(z)=max(0,z)$$
# Not differentiable at $z=0$ and the derivative is $0$ for $z<0$, but in practice it works well and is fast to compute (has become the default).
#
# **A large enough DNN with nonlinear activations can theoretically approximate any continuous function**
#
# ## Regression MLPs
#
# When building an MLP for regression, one don't want use any activation function for the output neurons and they can output any value. To Guarantee positive outputs use *ReLU* activation function or *softplus* ($log(1+exp(z))$).
#
# **TIP:** The Huber loss is quadratic when the error is smaller than a threshold $\delta$ (tipically 1) but linear when larger than $\delta$.
#
# ### Typical regression MLP architecture
#
# |**Hyperparameter**|**Typical value**|
# |-|-|
# |input neurons|One per feature|
# |hidden layers| Typically 1 to 5|
# |neurons per hidden layer|Typically 10 to 100|
# |output neurons|1 per prediction dimension|
# |Hidden activation|ReLU or SELU|
# |Output Activation|None, Or ReLU/softplus(if positive) or logistic/tanh (if bounded)|
# |loss function| MSE or MAE/Huber|
#
# ## Classification MLPs
# * For binary classification problem: Single output neuron using the logistic activation function: the output will be a number between 0 and 1 (probability estimation of the positive class).
# * For multilabel binary classification: One neuron per positive class.
# * For multiclass classification: One neuron per class and a softmax activation function.
#
# Regarding the loss function, cross-entropy (log loss) is usually good. as the objective is to predict probability distributions.
#
# ### Typical classification MLP architecture
#
# |**Hyperparameter**|**Binary**|**Multilabel Binary**|**Multiclass**|
# |-|-|-|-|
# |input neurons and hidden layers|Same as regression|Same as regression|Same as regression|
# |output neurons|1|1 per label|1 per class|
# |Output Activation|logistic|logistic|softmax|
# |loss function|Cross entropy|Cross entropy|Cross entropy|
#
# ## Implementing MLPs with Keras
#
# Docs: [Keras](https://keras.io/)
#
#
import tensorflow as tf
from tensorflow import keras
tf.__version__
keras.__version__
# ### Building an Image Classifier Using the sequencial API
#
# +
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
# -
X_train_full.shape
X_train_full.dtype
# +
# Create validation set and scaling the input features
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.0
# Class names
class_names = ['T-shirt/top', 'Trouser', 'pullover', 'dress',
'coat', 'sandall', 'shirt', 'sneaker', 'bag', 'ankle boot']
# -
class_names[y_train[0]]
# +
# Create the model
model = keras.models.Sequential() # Simplest keras model for neural nets.
# Composed of a single stack of layers connected sequentially.
model.add(keras.layers.Flatten(input_shape=[28, 28])) # First layer. `flatten` to convert each
# image into 1D array. Compute X.reshape(-1, 28*28)
model.add(keras.layers.Dense(300, activation='relu')) # `Dense` hidden layer with 300 neurons with ReLU
# as activation function. Each one manages its own
# weight matrix and a vector of bias terms (one per neuron)
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax')) # a `Dense` output layer with 10 neurons (one per class)
# using softmax activation because the classes are exclusive
# -
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
# Can be set as follow as well
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation='relu'),
keras.layers.Dense(100, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.summary()
keras.utils.plot_model(model, show_shapes=True)
model.layers
model.layers[1].name
model.get_layer('dense')
# ## Activation Functions
# (ageron notebook)
#
# +
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
# +
import numpy as np
import matplotlib.pyplot as plt
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=1, label="Step")
plt.plot(z, sigmoid(z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=1, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(sigmoid, z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
plt.show()
# -
# Paramets from a layer
# Weights
model.layers[1].get_weights()[0].shape
# Biases
model.layers[1].get_weights()[1].shape
# ### Compiling the model
# Call the `compile()` to specify the loss function and the optimizer to use.
model.compile(loss='sparse_categorical_crossentropy',
optimizer=keras.optimizers.SGD(learning_rate=0.01),
metrics=['accuracy'])
# **TIP** to convert sparse labels (class indices) to one-hot vector labels, use `keras.utils.to_categorical()` function. other way, `np.argmax()` function with `axis=1`
# ### Training and evaluating the model
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid))
# **TIP** instead of using the argument `validation_data` one can use `validation_split` to the ratio of the training set.
#
# `class_weight` argument handles unbalanced classes.
#
# `sample_weight` argument is usefull when some instances are labeled by experts and others by other source. This can also be used inside `validation_data` as a third item in a tuple.
#
# `fit()`returns a `History` object with the following attributes.
# `.params`, `.epoch`, `.history`
history.params
history.epoch
import pandas as pd
pd.DataFrame(data=history.history).plot()
# An overfiting can be stated after the tenth epoch because the training loss is stedly decreasing but the validation loss is in the stedy state.
# The first hyperparameter to check out is the learning rate. After trying another hyperparameter return the learning rate.).
#
# If the performance is still poor, one can try to change the number of layers, neurons per layers and activation function used in each hidden layer.
#
# Batch size can also be tuned in `fit(batch_size=32)`.
#
# To evaluate the model `evaluate()`
model.evaluate(X_test, y_test)
# ### Making predictions
#
model.predict(X_test[:3]).round(2)
# ### Regression MLP with Sequential API
# +
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
# -
model = keras.models.Sequential([
keras.layers.Dense(30, activation='relu',
input_shape=X_train.shape[1:]), # input must be interable
keras.layers.Dense(1)
])
# +
model.compile(loss='mean_squared_error',
optimizer=keras.optimizers.SGD(learning_rate=0.01))
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
# -
pd.DataFrame(data=history.history).plot()
mse_test = model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_pred = model.predict(X_new)
# ## Functional API
#
# ### Wide & Deep Neural Net (<NAME> et al 2016)
#
# It connects all or part of the inputs directly to the output layer. This architecture makes it possible for neural networks to learn both deep patterns and simple rules.
np.random.seed(42)
tf.random.set_seed(42)
input_ = keras.layers.Input(shape=X_train.shape[1:])
hidden_1 = keras.layers.Dense(30, activation='relu')(input_)
hidden_2 = keras.layers.Dense(30, activation='relu')(hidden_1)
concat = keras.layers.Concatenate()([input_, hidden_2]) # Layer with hidden and inputs
output = keras.layers.Dense(1)(concat)
model = keras.Model(inputs=[input_], outputs=[output])
model.summary()
model.compile(loss="mean_squared_error",
optimizer=keras.optimizers.SGD(learning_rate=1e-3))
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
y_pred = model.predict(X_new)
# One can also send some features directly through the wide path (overlapping allowed) and a diferent subset through the deep path.
#
#
# +
input_a = keras.layers.Input(shape=[5],
name='wide')
input_b = keras.layers.Input(shape=[6],
name='deep')
hidden_1 = keras.layers.Dense(30, activation='relu')(input_b)
hidden_2 = keras.layers.Dense(30, activation='relu')(hidden_1)
concat = keras.layers.concatenate([input_a, hidden_2])
output = keras.layers.Dense(1,
name='output')(concat)
model = keras.Model(inputs=[input_a, input_b], outputs=[output])
# -
model.summary()
keras.utils.plot_model(model, show_shapes=True)
model.compile(loss='mse',
optimizer=keras.optimizers.SGD(learning_rate=1e-3))
X_train_a, X_train_b = X_train[:, :5], X_train[:, 2:]
X_valid_a, X_valid_b = X_valid[:, :5], X_valid[:, 2:]
X_test_a, X_test_b = X_test[:, :5], X_test[:, 2:]
history = model.fit((X_train_a, X_train_b), y_train, epochs=20,
validation_data=((X_valid_a, X_valid_b), y_valid))
# +
mse_test = model.evaluate((X_test_a, X_test_b), y_test)
X_new_A, X_new_B = X_test_a[:3], X_test_b[:3]
y_pred = model.predict((X_new_A, X_new_B))
# -
# ### There are cases in which one may want multiple outputs
#
# **Examples**
# * Locate and classify the main object in a picture
# * Multiple independent tasks but based on same data
# * Regularization technique
#
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="main_output")(concat)
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2)
model = keras.models.Model(inputs=[input_A, input_B],
outputs=[output, aux_output])
# each output needs its own loss function. We can also weighted the losses as we are
# interested more in the first one (the second is just regularization)
model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1],
optimizer=keras.optimizers.SGD(learning_rate=1e-3))
history = model.fit((X_train_a, X_train_b), [y_train, y_train],
epochs=20,
validation_data=((X_valid_a, X_valid_b), (y_valid, y_valid)))
total_loss, main_loss, aux_loss = model.evaluate([X_test_a, X_test_b],
[y_test, y_test])
model.summary()
keras.utils.plot_model(model, show_shapes=True)
# ## Subclassing API to Build Dynamic Models
#
# Until now we've been working on static models, which have a lot of pros, but some models involves loops, varying shapes, conditional branching and dynamic behaviors.
class WideAndDeepModel(keras.Model):
def __init__(self, units=30, activation='relu', **kwargs):
super().__init__(**kwargs)
self.hidden1 = keras.layers.Dense(units, activation=activation)
self.hidden2 = keras.layers.Dense(units, activation=activation)
self.main_output = keras.layers.Dense(1)
self.aux_output = keras.layers.Dense(1)
def call(self, inputs):
input_a, input_b = inputs
hidden1 = self.hidden1(input_b)
hidden2 = self.hidden2(hidden1)
concat = keras.layers.concatenate([input_a, hidden2])
main_output = self.main_output(concat)
aux_output = self.aux_output(hidden2)
return main_output, aux_output
model = WideAndDeepModel()
model.compile(loss='mse', loss_weights=[0.9, 0.1],
optimizer=keras.optimizers.SGD(learning_rate=1e-3))
model.fit([X_train_a, X_train_b], [y_train, y_train], epochs=20,
validation_data=([X_valid_a, X_valid_b], [y_valid, y_valid]))
model.summary()
# ## Saving and Restoring a Model
#
# ### Functional API
#
# `model.save('xxx.h5')`
#
# Kera use the HDF5 format to save model's architecture, all model parameters and optimizer.
#
# `keras.models.load_model('xxxx.h5')`
#
# **This works only with Sequential and Functional, but noo with model subclass. It can be used `save_weights()` and `load_weights()` to save and load the model parameters**
#
# ### Using Callbacks
#
# The `fit()` method has a `callbacks` argument that lets one specify a list of objects that Keras will call at the start and end of the training, start and end of each epoch or before and after processing each batch.
#
# `ModelCheckpoint` callback saves checkpoints of the model at regular intervals during training, by default at the end of each epoch.
#
# Using a validation set during training can be set `save_best_only=True`. This is a simple way to implement *early stopping*
#
# **There are much more callbacks available in the package**
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
# +
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
# -
model.compile(loss='mse',
optimizer=keras.optimizers.SGD(learning_rate=1e-3))
checkpoint_cb = keras.callbacks.ModelCheckpoint('my_model.h5',
save_best_only=True)
history = model.fit(X_train, y_train,
epochs=10,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb])
model = keras.models.load_model("my_model.h5") # rollback to best model
mse_test = model.evaluate(X_test, y_test)
# Another way to implement early stop
model.compile(loss="mse", optimizer=keras.optimizers.SGD(learning_rate=1e-3))
early_stopping_cb = keras.callbacks.EarlyStopping(patience=5,
restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb])
mse_test = model.evaluate(X_test, y_test)
# Custom callback to detect overfitting
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print(f"\nval/train:{logs['val_loss']/logs['loss']:.2f}")
# Could be `on_train_begin()`, `on_train_end()`, `on_epoch_begin()`,
# `on_epoch_end()`, `on_batch_begin()` and `on_batch_end()`
val_train_ratio_cb = PrintValTrainRatioCallback()
history = model.fit(X_train, y_train, epochs=3,
validation_data=(X_valid, y_valid),
callbacks=[val_train_ratio_cb])
# Callbacks can also be used during evaluation and predictions.
# ## TensorBoard
import os
root_logdir = os.makedirs('my_logs', exist_ok=True)
def get_run_logdir():
import time
run_id = time.strftime('run_%Y_%m_%d-%H_%M_%S')
return os.path.join('my_logs', run_id)
run_logdir = get_run_logdir()
run_logdir
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(learning_rate=1e-3))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
# `$ tensorboard --logdir=<log dir> --port=<6006>`
# ## Fine-Tuning Neutral Nets Hyperparameters
#
#
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3,
input_shape=[8]):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation='relu'))
model.add(keras.layers.Dense(1))
optimizer = keras.optimizers.SGD(learning_rate=learning_rate)
model.compile(loss='mse', optimizer=optimizer)
return model
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
# `keras_reg` works as a regular scikit-learn regressor.
# + tags=[]
keras_reg.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
mse_test = keras_reg.score(X_test, y_test)
y_pred = keras_reg.predict(X_new)
# + tags=[]
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_hidden": [0, 1, 2, 3],
"n_neurons": np.arange(1, 100) .tolist(),
"learning_rate": reciprocal(3e-4, 3e-2) .rvs(1000).tolist(),
}
rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
# -
rnd_search_cv.best_params_
rnd_search_cv.best_score_
model = rnd_search_cv.best_estimator_.model
# ## Number of Hidden Layers
#
# Neural Nets take advantage of the hierarchical way of real-world data are usually structured. Therefore, lower hidden layers models low-level structure (e.g., line, segments of various shapes and orientations), intermediate hidden layers combine these low-level structure to model intermediate-level structure (e.g., squares, circles) and the highest hidden layers and the output layer combine these intermediate sctructure to model high-level structure (e.g., faces).
#
#
# ## Number of neurons per Hidden Layer
#
# For the hidden layers, it used to be common to size them to form a pyramid - the rationale is that many low-level features can coalesce into far fewer-high level features. This practice is now not that popular and that seems that with the same number of neurons per layer it possible to hit as good result as with the pyramid approach but with fewer parameters to tune.
#
# **Stretch pants approach**
#
# In general, more layers outweigh more than more neurons per layer.
#
# ## Learning rate
# Usually the optimal learning rate is half the way of the maximun learning rate.
#
# ## Optimizer
#
# ## Batch size
# whether small batch size (<= 32) or Large with learning rate warmup
#
# ## Activation function
# Generally ReLU is a good fit for most of the problems in hidden layers and for the output one, depends on the objective of the model
#
#
#
# # Exercises
#
# 10.
train, test = keras.datasets.mnist.load_data()
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
train[0].shape
np.count_nonzero(np.unique(train[1]))
# +
x_train = train[0][:50_000]
y_train = train[1][:50_000]
x_valid = train[0][50_000:]
y_valid = train[1][50_000:]
x_test = test[0]
y_test = test[1]
# -
def build_model(n_hidden=2, n_neurons=30, learning_rate=3e-2, input_shape=[28, 28]):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
optimizer = keras.optimizers.SGD(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
return model
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
param_distrib = {
"n_hidden": [0, 1, 2, 3],
"n_neurons": np.arange(10, 50) .tolist(),
"learning_rate": reciprocal(3e-4, 3e-1) .rvs(1000).tolist(),
}
rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(x_train, y_train, epochs=100,
validation_data=(x_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
| ann_w_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Algorithm training, testing, validation, and experiment tracking
# import libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set() # Revert to matplotlib defaults
plt.rcParams['figure.figsize'] = (16, 12)
# ### Model training and testing
# import libraries for algorithms traininng, and metrics to judge performance
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
# +
# load clean dataset
data_load_path = '../data/clean/'
# training data
train_df = pd.read_csv(data_load_path + 'train.csv')
X_train = train_df.drop(['Daily_radiation'], axis = 1)
y_train = train_df['Daily_radiation']
# test data
test_df = pd.read_csv(data_load_path + 'test.csv')
X_test = test_df.drop(['Daily_radiation'], axis = 1)
y_test = test_df['Daily_radiation']
# -
# ### Linear Regression
# +
# Setup the pipeline steps for linear regression
steps = [
('scaler', StandardScaler()),
('lr', LinearRegression())
]
# Create the pipeline
pipeline_lr = Pipeline(steps)
# Fit the pipeline to the train set
pipeline_lr.fit(X_train, y_train)
# Predict the labels of the test set
y_pred_lr = pipeline_lr.predict(X_test)
# -
# Evaluating algorithm performance
mse = mean_squared_error(y_test, y_pred_lr, squared = False)
print('r2_score: ', r2_score(y_test, y_pred_lr))
print('Mean Squared Error: %.2f' % (mse))
# +
#Run the model against the test data presented through a plot
fig, pX = plt.subplots()
pX.scatter(y_test, y_pred_lr, edgecolors = (0, 0, 0))
pX.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'm--', lw = 3)
pX.set_xlabel('Actual solar irradiation')
pX.set_ylabel('Predicted solar irradiation')
pX.set_title('Linear regression: Verified vs Predicted solar irradiation')
plt.savefig('../notebooks_figures/lr_line_of_fit.jpg')
plt.show()
# -
sns.jointplot(y_test, y_pred_lr, kind = 'reg')
plt.savefig('../notebooks_figures/lr_jointplot.jpg')
plt.show()
# ### Random Forest Regressor
# +
# Setup the random forest model: rft
rfr = RandomForestRegressor()
# Fit the pipeline to the train set
rfr.fit(X_train, y_train)
# Predict the labels of the test set
y_pred_rfr = rfr.predict(X_test)
# -
# Evaluating algorithm performance
mse_rf = mean_squared_error(y_test, y_pred_rfr, squared = False)
print('r2_score: ', r2_score(y_test, y_pred_rfr))
print('Mean Squared Error: %.2f' % (mse_rf))
# +
# Run the model against the test data presented through a plot
fig, pX = plt.subplots()
pX.scatter(y_test, y_pred_rfr, edgecolors = (0, 0, 0))
pX.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'm--', lw = 3)
pX.set_xlabel('Actual solar irradiation')
pX.set_ylabel('Predicted solar irradiation')
pX.set_title(' Random Forest: Verified vs Predicted solar irradiation')
plt.savefig('../notebooks_figures/rfr_line_of_fit.jpg')
plt.show()
# -
sns.jointplot(y_test, y_pred_rfr, kind = 'reg')
plt.savefig('../notebooks_figures/rfr_jointplot.jpg')
plt.show()
# ### GradientBoosting Regressor
# +
# Setup the gradient boosting model: gbr
gbr = GradientBoostingRegressor()
# Fit the pipeline to the train set
gbr.fit(X_train, y_train)
# Predict the labels of the test set
y_pred_gbr = gbr.predict(X_test)
# -
# Evaluating algorithm performance
mse_gr = mean_squared_error(y_test, y_pred_gbr, squared = False)
print('r2_score: ', r2_score(y_test, y_pred_gbr))
print('Mean Squared Error: %.2f' % (mse_gr))
# +
# Run the model against the test data presented through a plot
fig, pX = plt.subplots()
pX.scatter(y_test, y_pred_gbr, edgecolors = (0, 0, 0))
pX.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'm--', lw = 3)
pX.set_xlabel('Actual solar irradiation')
pX.set_ylabel('Predicted solar irradiation')
pX.set_title('Gradient Boost: Verified vs Predicted solar irradiation')
plt.savefig('../notebooks_figures/gbr_line_of_fit.jpg')
plt.show()
# -
sns.jointplot(y_test, y_pred_gbr, kind = 'reg')
plt.savefig('../notebooks_figures/gbr_jointplot.jpg')
plt.show()
# ### Model Validation
# validation data
val_df = pd.read_csv(data_load_path + 'val.csv')
X_val = val_df.drop(['Daily_radiation'], axis=1)
y_val = val_df['Daily_radiation']
# validate models
y_val_lr = pipeline_lr.predict(X_val)
y_val_rfr = rfr.predict(X_val)
y_val_gbr = gbr.predict(X_val)
# Evaluating algorithm performance for linear regression
mse_lr_val = mean_squared_error(y_val, y_val_lr, squared = False)
print('r2_score: ', r2_score(y_val, y_val_lr))
print('Linear Regression - Mean Squared Error: %.2f' % (mse_lr_val))
# Evaluating algorithm performance for random forest regression
mse_rf_val = mean_squared_error(y_val, y_val_rfr, squared = False)
print('r2_score: ', r2_score(y_val, y_val_rfr))
print('Random Forest - Mean Squared Error: %.2f' % (mse_rf_val))
# Evaluating algorithm performance for gradient boost regression
mse_gbr_val = mean_squared_error(y_val, y_val_gbr, squared = False)
print('r2_score: ', r2_score(y_val, y_val_gbr))
print('Gradient Boost - Mean Squared Error: %.2f' % (mse_gbr_val))
# +
## Export model and hook it with api
# -
# ## Experiment tracking with neptune
# import neptune libraries and sklearn integration
import neptune.new as neptune
import neptune.new.integrations.sklearn as npt_utils
# +
nept_project="maximilien-vicari/solarcast"
nept_api_token="<KEY>
run = neptune.init(api_token=nept_api_token,
project=nept_project,
name='solar-irradiation',
source_files=["model_dev_sr.ipynb"])
# +
# Track files, models, and folders
# File
path_to_raw_data = '../data/raw/'
run["raw_datasets"].track_files(path_to_raw_data)
path_to_clean_data = '../data/clean/'
run["cleaned_datasets"].track_files(path_to_clean_data)
# Folder
path_to_notebooks = '../notebooks/'
run["notebooks"].track_files(path_to_notebooks)
# -
# train, and test; while tracking the experiment
run['lr_summary'] = npt_utils.create_regressor_summary(pipeline_lr, X_train, X_test, y_train, y_test)
run['gbr_summary'] = npt_utils.create_regressor_summary(gbr, X_train, X_test, y_train, y_test)
run['rfr_summary'] = npt_utils.create_regressor_summary(rfr, X_train, X_test, y_train, y_test)
a =
# stop experiment
run.stop()
# ## Experiment tracking with mlflow
# import libraries
import mlflow
import mlflow.sklearn
# +
# model metrics for linear regression
rmse_lr_pred = np.sqrt(mean_squared_error(y_test, y_pred_lr, squared = False))
rmae_lr_pred = np.sqrt(mean_absolute_error(y_test, y_pred_lr))
R2_score_lr = r2_score(y_test, y_pred_lr)
# model metrics for random forest
rmse_rfr_pred = np.sqrt(mean_squared_error(y_test, y_pred_rfr, squared = False))
rmae_rfr_pred = np.sqrt(mean_absolute_error(y_test, y_pred_rfr))
R2_score_rfr = r2_score(y_test, y_pred_rfr)
# model metrics for gradient boost
rmse_gbr_pred = np.sqrt(mean_squared_error(y_test, y_pred_gbr, squared = False))
rmae_gbr_pred = np.sqrt(mean_absolute_error(y_test, y_pred_gbr))
R2_score_gbr = r2_score(y_test, y_pred_gbr)
# -
# #### manually log experiment
# set the tracking uri
mlflow.set_tracking_uri("http://127.0.0.1:5000/")
experiment = mlflow.get_experiment('0')
print("Name of experiment: {}".format(experiment.name))
print("Location of Artifact: {}".format(experiment.artifact_location))
print("Life cycle phase: {}".format(experiment.lifecycle_stage))
print("Experiment_ID: {}".format(experiment.experiment_id))
# start experiment tracking
run = mlflow.start_run(run_name = 'solar_irradiation')
run_id = run.info.run_id
# +
# Log mlflow attributes for mlflow UI
# log metrics for linear regression
mlflow.log_metric("rmse_lr", rmse_lr_pred)
mlflow.log_metric("rmae_lr", rmae_lr_pred)
mlflow.log_metric("r2_score_lr", R2_score_lr)
# log metrics for random forest
mlflow.log_metric("rmse_rfr", rmse_rfr_pred)
mlflow.log_metric("rmae_rfr", rmae_rfr_pred)
mlflow.log_metric("r2_score_rfr", R2_score_rfr)
# log metrics for gradientboost
mlflow.log_metric("rmse_gbr", rmse_gbr_pred)
mlflow.log_metric("rmae_gbr", rmae_gbr_pred)
mlflow.log_metric("r2_score_gbr", R2_score_gbr)
# -
# log models experiments
artifact_path = "default"
mlflow.sklearn.log_model(pipeline_lr, artifact_path = artifact_path, registered_model_name = "linear-regression-model")
mlflow.sklearn.log_model(pipeline_rfr, artifact_path = artifact_path, registered_model_name = "sklearn-random-forest-model")
mlflow.sklearn.log_model(pipeline_gbr, artifact_path = artifact_path, registered_model_name = "sklearn-gradientboost-model")
# +
# Log artifacts (output files)
mlflow.log_artifact("lr_line_of_fit.jpg", artifact_path = 'features')
# Log Features
mlflow.log_artifact('train.csv', artifact_path = 'features')
# -
# End tracking
mlflow.end_run()
# #### Auto log experiment
# enable auto logging
mlflow.sklearn.autolog(log_models = True)
with mlflow.start_run(experiment_id = experiment.experiment_id, run_name = 'auto_lr_model') as run:
pipeline_lr.fit(X_train,
y_train)
y_pred_lr = pipeline_lr.predict(X_test)
pipeline_rfr.fit(X_train,
y_train)
y_pred_rfr = pipeline_rfr.predict(X_test)
pipeline_gbr.fit(X_train,
y_train)
y_pred_gbr = pipeline_gbr.predict(X_test)
# End tracking
mlflow.end_run()
# ### Export models
# +
# check sklearn version
from sklearn import __version__
print(__version__)
# +
# loading libraries
import joblib
# Save Models to file in the model directory
# save linear regression model
joblib_lr = "../ml_models/model_lr.pkl"
joblib.dump(pipeline_lr, joblib_lr)
# save random forest model
joblib_rfr = "../ml_models/model_rfr.pkl"
joblib.dump(rfr, joblib_rfr)
# save gradientboost model
joblib_gbr = "../ml_models/model_gbr.pkl"
joblib.dump(gbr, joblib_gbr)
| solar-app-project/notebooks/.ipynb_checkpoints/model_val_sr-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:TFG]
# language: python
# name: conda-env-TFG-py
# ---
# # Mask R-CNN - Test on Shapes Dataset
#
# Run the Mask R-CNN net in inference mode, with the additional PCILayer that generates the context-based tensor
# +
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import pprint
import keras.backend as KB
sys.path.append('../')
import mrcnn.model as modellib
import mrcnn.visualize as visualize
# import mrcnn.new_shapes as shapes
import mrcnn.new_shapes as new_shapes
# from mrcnn.new_shapes import NewShapesDataset, NewShapesConfig
from mrcnn.config import Config
from mrcnn.model import log
from mrcnn.dataset import Dataset
# from mrcnn.pc_prototype import PCTensor
# from mrcnn.pcn_layer import PCNLayer, PCILayer
from mrcnn.datagen import data_generator, load_image_gt
# Root directory of the project
ROOT_DIR = os.getcwd()
MODEL_PATH = 'E:\Models'
# Directory to save logs and trained model
MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_logs")
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
print("Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
pp = pprint.PrettyPrinter(indent=2, width=100)
np.set_printoptions(linewidth=100)
# Build configuration object -----------------------------------------------
config = new_shapes.NewShapesConfig()
config.BATCH_SIZE = 2 #Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = 2
config.STEPS_PER_EPOCH = 7
config.IMAGES_PER_GPU = 1
config.display()
# Build shape dataset -----------------------------------------------
# from mrcnn.datagen import data_generator, load_image_gt
# Training dataset generate 500 shapes
dataset_test = new_shapes.NewShapesDataset()
dataset_test.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_test.prepare()
# Validation dataset
# dataset_val = shapes.NewShapesDataset()
# dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
# dataset_val.prepare()
# Load and display random samples
# image_ids = np.random.choice(dataset_train.image_ids, 3)
# for image_id in [3]:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
import pprint
pp = pprint.PrettyPrinter(indent=2, width=100)
np.set_printoptions(linewidth=130 , precision=4, threshold=2000)
# -
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# ## Detection
# +
class InferenceConfig(new_shapes.NewShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
inference_config.display()
# -
# ### Build the model
# +
# Recreate the model in inference mode
try :
del model
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# -
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]
print(' last weights file found:', model_path )
model_path = 'E:\\Models\\mrcnn_logs\\shapes20180509T1928\\mask_rcnn_shapes_2192.h5'
print(' last weights to be used:', model_path )
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# ### Print some information about the model
# model.compile_only(learning_rate=config.LEARNING_RATE, layers='heads')
# print('\n Metrics (_get_deduped_metrics_names():) ')
# pp.pprint(mm._get_deduped_metrics_names())
# print('\n Losses (model.metrics_names): ')
# pp.pprint(mm.metrics_names)
KB.set_learning_phase(0)
print(' Learning phase values is L ' ,KB.learning_phase())
print('\n Inputs: ')
pp.pprint(model.keras_model.inputs)
print('\n Outputs: ')
pp.pprint(model.keras_model.outputs)
# print('\Layers')
# pp.pprint(model.keras_model.layers)
# weights = model.keras_model.get_weights()
# print(' Number of weights arrays: ',len(weights))
print('\n Weights: ')
print('length of model.keras_model.weights', len(model.keras_model.weights))
pp.pprint(model.keras_model.weights)
# pp.pprint(dir(model.keras_model))
print(model.keras_model.weights[1])
# ### Test on a random image
# +
# Test on a random image
# Validation dataset
# dataset_val = shapes.ShapesDataset()
# dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
# dataset_val.prepare()
image_id = random.choice(dataset_test.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
load_image_gt(dataset_test, inference_config, image_id, use_mini_mask=False)
# +
print('Image Id :', image_id)
shape_list = dataset_test.image_info[image_id]['shapes']
pp.pprint(shape_list)
log("original_image", original_image)
log("image_meta", image_meta)
print(image_meta)
log("gt_class_id", gt_bbox)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
print(" 1: person 2: car 3: sun 4: building 5: tree 6: cloud ")
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_test.class_names, figsize=(8, 8))
# -
results = model.detect([original_image], verbose=1)
r = results[0]
print(' rois : ', r['rois'])
print(' masks : ', r['masks'].shape)
print(' class ids : ', r['class_ids'])
print(' class names: ', dataset_test.class_names)
print(' scores : ', r['scores'])
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_test.class_names, r['scores'], ax=get_ax())
# ## Evaluation
# +
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
from mrcnn.datagen import data_generator, load_image_gt
import mrcnn.utils as utils
image_ids = np.random.choice(dataset_test.image_ids, 100)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
load_image_gt(dataset_test, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(utils.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id,
r["rois"], r["class_ids"], r["scores"])
APs.append(AP)
print("mAP: ", np.mean(APs))
# -
# ### Get next shapes from generator and display loaded shapes
# ## Save
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
model_path = os.path.join(MODEL_DIR, "mask_rcnn_shapes.h5")
model.keras_model.save_weights(model_path)
# ## Notebook Preferences
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# ## Configurations
# +
# from keras import backend as KB
# if 'tensorflow' == KB.backend():
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# # tfconfig = tf.ConfigProto(
# # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
# # device_count = {'GPU': 1}
# # )
# tfconfig = tf.ConfigProto()
# tfconfig.gpu_options.allow_growth=True
# tfconfig.gpu_options.visible_device_list = "0"
# tfconfig.gpu_options.per_process_gpu_memory_fraction=0.5
# tf_sess = tf.Session(config=tfconfig)
# set_session(tf_sess)
# -
# ### Simulation of `detect()` routine
# +
# print('>>> model detect()')
verbose = 1
images = [original_image]
assert model.mode == "inference", "Create model in inference mode."
assert len(images) == model.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = model.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas" , image_metas)
## Run object detection pipeline
# print(' call predict()')
detections, rpn_rois, rpn_class, rpn_bbox,\
mrcnn_class, mrcnn_bbox, mrcnn_mask \
= model.keras_model.predict([molded_images, image_metas], verbose=0)
print(' return from predict()')
print(' Length of detections : ', len(detections))
print(' Length of rpn_rois : ', len(rpn_rois ))
print(' Length of rpn_class : ', len(rpn_class ))
print(' Length of rpn_bbox : ', len(rpn_bbox ))
print(' Length of mrcnn_class: ', len(mrcnn_class))
print(' Length of mrcnn_bbox : ', len(mrcnn_bbox ))
print(' Length of mrcnn_mask : ', len(mrcnn_mask ))
#### detection array layout is `[ y1, x1, y2, x2, class, score]`
detections[0].shape
print(detections[0])
## Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
model.unmold_detections(detections[i],
mrcnn_mask[i],
image.shape ,
windows[i])
results.append({
"rois" : final_rois,
"class_ids": final_class_ids,
"scores" : final_scores,
"masks" : final_masks,
})
| notebooks/Shapes_NewShapes/TEST on NewShapes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# #!/usr/bin/python3
# -
from collections import Counter
#import re
#import os
#import time
from collections import defaultdict
#from collections import deque
date = 9
dev = 0 # extra prints
part = 3 # 1,2, or 3 for both
# 0 or 1:
samp = 0
# + [markdown] tags=[]
# ## Read the input data
# +
#time0 = time.time()
if samp == 1:
filename = "/sample.txt"
else:
filename = "/input.txt"
try:
with open(str(date) + filename,"r") as f:
t = f.readlines()
except FileNotFoundError:
with open("." + filename,"r") as f:
t = f.readlines()
t = [(x.strip().replace(' ',' ')) for x in t]
#t = [int(x) for x in t]
# +
## Run the program
if 0:
if part == 1:
print("Part 1: ", day(t))
elif part == 2:
print("Part 2: ", day2(t))
elif part == 3:
pass
#run both
#print("Part 1: ", day(t))
#print("Part 2: ", day2(t))
#tdif = time.time() - time0
#print("Elapsed time: {:.4f} s".format(tdif))
# -
# ## Part one
# return a set of neighbor coordinates:
# (3,3) -> [(3,4), (3,2), (2,3), (4,3)]
def giveneighbors(cx,cy):
nb = set()
for (i,j) in [(-1,0), (1,0), (0,1), (0,-1)]:
if (cx+i >= width) or (cy+j >= height):
continue
elif (cx+i < 0) or (cy+j < 0):
continue
else:
nb.add((cx+i, cy+j))
return nb
# + tags=[]
width = len(t[0])
height = len(t)
def day(te):
hm = {}
width = len(te[0])
height = len(te)
for r in range(height):
hm[r] = []
for c in range(width):
hm[r].append(int(te[r][c]))
risk = 0
lows = []
for r in range(height):
for c in range(width):
val = int(te[r][c])
ismin = 1
n = giveneighbors(c, r)
#print(n)
for [x,y] in n:
if int(te[y][x]) <= val:
ismin = 0
if ismin:
risk += (1 + val)
lows.append((c,r))
#print("Risky: {},{}: {}. N: {}".format(c,r, val, n))
if(0):
for k in hm.keys():
print(hm[k])
#print(lows)
return risk
day(t)
# -
# ## Part two
def step(checked, data, x, y):
checked.add((x,y))
#print(checked)
tocheck = [(x,y)]
size = 0
while len(tocheck) > 0:
#print(tocheck)
size += 1
(x,y) = tocheck.pop()
for (x,y) in giveneighbors(x,y):
if (data[y][x] != 9) and ((x,y) not in checked):
checked.add((x,y))
tocheck.append((x,y))
#print("size {}, xy {},{}, val {}".format(size, x,y, data[y][x]))
return size
# + tags=[]
def day2(te):
hm = {}#defaultdict(lambda:9)
for r in range(height):
hm[r] = []
for c in range(width):
hm[r].append(int(te[r][c]))
lows = []
for r in range(height):
for c in range(width):
val = int(te[r][c])
ismin = 1
n = giveneighbors(c, r)
#print(n)
for [x,y] in n:
if int(te[y][x]) <= val:
ismin = 0
if ismin:
lows.append((c,r))
#print("Risky: {},{}: {}. N: {}".format(c,r, val, n))
#print("Lows: ", lows)
basins = defaultdict(list)
bid = 0
checked = set()
for [x,y] in lows:
basins[bid] = step(checked, hm, x, y)
bid += 1
#print(checked)
if(0):
for k in hm.keys():
print(hm[k])
bc = (basins.values())
(a,b,c) = (sorted(bc)[-3:])
print(a,b,c)
if(0):
print("Basins:")
for k in basins.keys():
print(k, basins[k])
return a*b*c
day2(t)
| 9/notebook.ipynb |
# ---
# title: "Array Operations"
# author: "Sanjay"
# date: 2020-09-04
# description: "-"
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kagglevil
# language: python
# name: kagglevil
# ---
# Importing numpy library
import numpy as np
# Defining Array A
A = np.array([[17, 23],
[34, 46]])
# Defining Array B
B = np.array([[48, 39],
[25, 14]])
# Adding 5 to every element in Array A
print("Adding 5 to every element in Array A:", A + 5)
# Subtracting 2 from each element
print("\nSubtracting 2 from each element of Array B:", B - 2)
# Sum of Array A elements performing Unary operations
print ("\nSum of all array A elements: ", A.sum())
# Adding two arrays A and B performing Binary operations
print ("\nArray sum:\n", A + B)
| docs/python/numpy/Array-Operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp loader
# -
#hide
from nbdev.showdoc import *
# # Loader
# > Functions for finding and loading image files and saved embeddings
#
# ## File manipulation
#export
from pathlib import Path
from PIL import Image
from tqdm import tqdm
# **NB: A lot of this implementation is too specific, especially the slugified filenames being used for dictionary IDs. Should be replaced with a better database implementation. Right now if the length of the list of files is different than the length of the list of files in the database, we have to rebuild the entire tree. This means that if we index a folder with corrupted files, it rebuilds the tree every time, as the length of the file list with skipped files is always different than the db without them.**
# +
#export
def slugify(filepath):
return f'{filepath.stem}_{str(filepath.stat().st_mtime).split(".")[0]}'
def get_image_files(path):
img_extensions = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp']
return [(f, slugify(f)) for f in tqdm(path.rglob('*')) if f.suffix in img_extensions]
# This returns boolean and should be called is_valid_image or something like that
def verify_image(f):
try:
img = Image.open(f)
img.verify()
return(True)
except Exception as e:
print(f'Skipping bad file: {f}\ndue to {type(e)}')
pass
# -
# Demonstrating the usage here, not a great test though:
# +
root = Path('./images')
filepaths = get_image_files(root)
len(filepaths)
# -
filepaths[:3]
# ## Loaders
#
# So we have a list of paths and slugified filenames from the folder. We want to see if there's an archive, so that we don't have to recalculate tensors for images we've seen before. Then we want to pass that directly to the indexer, but send the new images through the crafter and encoder first.
#
#
#export
import torch
import torchvision
# We want to use the GPU, if possible, for all the pyTorch functions. But if we can't get access to it we need to fallback to CPU. Either way we call it `device` and pass it to each function in the executors that use torch.
#export
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
# The `archive_loader` is only called in `indexFlow`. It takes the list of image files and the folder they're in (and the torch device), opens an archive if there is one
#export
def archive_loader(filepaths, root, device):
dbpath = root/'memery.pt'
# dbpath_backup = root/'memery.pt'
db = db_loader(dbpath, device)
current_slugs = [slug for path, slug in filepaths]
archive_db = {i:db[item[0]] for i, item in enumerate(db.items()) if item[1]['slug'] in current_slugs}
archive_slugs = [v['slug'] for v in archive_db.values()]
new_files = [(str(path), slug) for path, slug in filepaths if slug not in archive_slugs and verify_image(path)]
return(archive_db, new_files)
# The `db_loader` takes a location and returns either the archive dictionary or an empty dictionary. Decomposed to its own function so it can be called separately from `archive_loader` or `queryFlow`.
#export
def db_loader(dbpath, device):
# check for savefile or backup and extract
if Path(dbpath).exists():
db = torch.load(dbpath, device)
# elif dbpath_backup.exists():
# db = torch.load(dbpath_backup)
else:
db = {}
return(db)
# The library `annoy`, [Approximate Nearest Neighbors Oh Yeah!](https://github.com/spotify/annoy) allows us to search through vector space for approximate matches instead of exact best-similarity matches. We sacrifice accuracy for speed, so we can search through tens of thousands of images in less than a thousand times the time it would take to search through tens of images. There's got to be a better way to put that.
#export
from annoy import AnnoyIndex
#export
def treemap_loader(treepath):
treemap = AnnoyIndex(512, 'angular')
if treepath.exists():
treemap.load(str(treepath))
else:
treemap = None
return(treemap)
treepath = Path('images/memery.ann')
treemap = AnnoyIndex(512, 'angular')
if treepath.exists():
treemap.load(str(treepath))
else:
treemap = None
# Here we just test on the local image folder
archive_db, new_files = archive_loader(get_image_files(root), root, device)
len(archive_db), len(new_files), treemap.get_n_items()
# +
dbpath = root/'memery.pt'
# dbpath_backup = root/'memery.pt'
db = db_loader(dbpath, device)
current_slugs = [slug for path, slug in filepaths]
# -
archive_db = {i:db[item[0]] for i, item in enumerate(db.items()) if item[1]['slug'] in current_slugs}
len(archive_db)
| 01_loader.ipynb |
# +
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the zebra problem as invented by <NAME>.
There are five houses.
The Englishman lives in the red house.
The Spaniard owns the dog.
Coffee is drunk in the green house.
The Ukrainian drinks tea.
The green house is immediately to the right of the ivory house.
The Old Gold smoker owns snails.
Kools are smoked in the yellow house.
Milk is drunk in the middle house.
The Norwegian lives in the first house.
The man who smokes Chesterfields lives in the house next to the man
with the fox.
Kools are smoked in the house next to the house where the horse is kept.
The Lucky Strike smoker drinks orange juice.
The Japanese smokes Parliaments.
The Norwegian lives next to the blue house.
Who owns a zebra and who drinks water?
"""
from __future__ import print_function
from ortools.sat.python import cp_model
# pylint: disable=too-many-statements
def solve_zebra():
"""Solves the zebra problem."""
# Create the model.
model = cp_model.CpModel()
red = model.NewIntVar(1, 5, 'red')
green = model.NewIntVar(1, 5, 'green')
yellow = model.NewIntVar(1, 5, 'yellow')
blue = model.NewIntVar(1, 5, 'blue')
ivory = model.NewIntVar(1, 5, 'ivory')
englishman = model.NewIntVar(1, 5, 'englishman')
spaniard = model.NewIntVar(1, 5, 'spaniard')
japanese = model.NewIntVar(1, 5, 'japanese')
ukrainian = model.NewIntVar(1, 5, 'ukrainian')
norwegian = model.NewIntVar(1, 5, 'norwegian')
dog = model.NewIntVar(1, 5, 'dog')
snails = model.NewIntVar(1, 5, 'snails')
fox = model.NewIntVar(1, 5, 'fox')
zebra = model.NewIntVar(1, 5, 'zebra')
horse = model.NewIntVar(1, 5, 'horse')
tea = model.NewIntVar(1, 5, 'tea')
coffee = model.NewIntVar(1, 5, 'coffee')
water = model.NewIntVar(1, 5, 'water')
milk = model.NewIntVar(1, 5, 'milk')
fruit_juice = model.NewIntVar(1, 5, 'fruit juice')
old_gold = model.NewIntVar(1, 5, 'old gold')
kools = model.NewIntVar(1, 5, 'kools')
chesterfields = model.NewIntVar(1, 5, 'chesterfields')
lucky_strike = model.NewIntVar(1, 5, 'lucky strike')
parliaments = model.NewIntVar(1, 5, 'parliaments')
model.AddAllDifferent([red, green, yellow, blue, ivory])
model.AddAllDifferent(
[englishman, spaniard, japanese, ukrainian, norwegian])
model.AddAllDifferent([dog, snails, fox, zebra, horse])
model.AddAllDifferent([tea, coffee, water, milk, fruit_juice])
model.AddAllDifferent(
[parliaments, kools, chesterfields, lucky_strike, old_gold])
model.Add(englishman == red)
model.Add(spaniard == dog)
model.Add(coffee == green)
model.Add(ukrainian == tea)
model.Add(green == ivory + 1)
model.Add(old_gold == snails)
model.Add(kools == yellow)
model.Add(milk == 3)
model.Add(norwegian == 1)
diff_fox_chesterfields = model.NewIntVar(-4, 4, 'diff_fox_chesterfields')
model.Add(diff_fox_chesterfields == fox - chesterfields)
model.AddAbsEquality(1, diff_fox_chesterfields)
diff_horse_kools = model.NewIntVar(-4, 4, 'diff_horse_kools')
model.Add(diff_horse_kools == horse - kools)
model.AddAbsEquality(1, diff_horse_kools)
model.Add(lucky_strike == fruit_juice)
model.Add(japanese == parliaments)
diff_norwegian_blue = model.NewIntVar(-4, 4, 'diff_norwegian_blue')
model.Add(diff_norwegian_blue == norwegian - blue)
model.AddAbsEquality(1, diff_norwegian_blue)
# Solve and print out the solution.
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.FEASIBLE:
people = [englishman, spaniard, japanese, ukrainian, norwegian]
water_drinker = [
p for p in people if solver.Value(p) == solver.Value(water)
][0]
zebra_owner = [
p for p in people if solver.Value(p) == solver.Value(zebra)
][0]
print('The', water_drinker.Name(), 'drinks water.')
print('The', zebra_owner.Name(), 'owns the zebra.')
else:
print('No solutions to the zebra problem, this is unusual!')
solve_zebra()
| examples/notebook/examples/zebra_sat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Big Mart Sales Prediction
# +
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from scipy.stats import mode
import matplotlib.pyplot as plt
# %matplotlib inline
#Read files:
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# -
# ## Data Exploration
train.shape, test.shape
train.columns
test.columns
# + active=""
# Let's merge train and test dataset into a dataframe 'data' and then do some feature engineering on the combined dataset, it saves us from performing the same steps twice on test and train.
# -
train['source'] = 'train'
test['source'] = 'test'
test['Item_Outlet_Sales'] = 0
data = pd.concat([train, test], sort = False)
print(train.shape, test.shape, data.shape)
data.apply(lambda x: sum(x.isnull()))
# ##### Only Item_Weight and Outlet_Size have missing values. These can be imputed during the Data Cleaning process.
data.describe()
# #### Some observations which could be made:
#
# #Item_Visibility has a min value of zero. This makes no practical sense because when a product is being sold in a store, the visibility cannot be 0.
#
# #Outlet_Establishment_Years vary from 1985 to 2009. The values might not be apt in this form. Rather, if we can convert them to how old the particular store is, it should have a better impact on sales.
data['Outlet_Establishment_Year'].value_counts()
# + active=""
# Now let's find out the number of unique values:
# -
data.apply(lambda x: len(x.unique()))
data['Item_Outlet_Sales'].describe()
# ##### Lets start looking Outlet_Size, Outlet_Location_Type, and Outlet_Type distribution in Item_Outlet_Sale
# +
plt.figure(figsize = (10,9))
plt.subplot(311)
sns.boxplot(x='Outlet_Size', y='Item_Outlet_Sales', data=data, palette="Set1")
plt.subplot(312)
sns.boxplot(x='Outlet_Location_Type', y='Item_Outlet_Sales', data=data, palette="Set1")
plt.subplot(313)
sns.boxplot(x='Outlet_Type', y='Item_Outlet_Sales', data=data, palette="Set1")
# -
sns.distplot(data['Item_Outlet_Sales'])
# + active=""
# We can conclude from the given distplot that it:
# -> Deviate from the normal distribution.
# -> Has appreciable positive skewness.
# -> Shows peakedness.
#
# -
print('Skewness: %f' % data['Item_Outlet_Sales'].skew())
print('Kurtsis: %f' %data['Item_Outlet_Sales'].kurt())
# ## Data Cleaning
#
# Impute the missing values in Item_Weight by the average weight of the particular item.
# +
item_avg_weight = data.pivot_table(values='Item_Weight', index='Item_Identifier')
missing_values = data['Item_Weight'].isnull()
print('Missing values: %d' %sum(missing_values))
data.loc[missing_values,'Item_Weight'] = data.loc[missing_values,'Item_Identifier'].apply(lambda x: item_avg_weight.at[x,'Item_Weight'])
print('Missing values after immputation %d' %sum(data['Item_Weight'].isnull()))
# -
# Impute the missing values in Outlet_Size by the mode of the same.
outlet_size_mode = data.pivot_table(values='Outlet_Size', columns='Outlet_Type',aggfunc=(lambda x:mode(x).mode[0]) )
print("Mode for each Outlet_Type:")
print(outlet_size_mode)
# #### Now, there are no missing values
# ##### We saw that there's some typos and difference in representation in categories of Item_Fat_Content variable. Low Fat is represented by lf, low fat, Low Fat and Regular is also represented as reg etc. This should be corrected as:
# +
#Change categories of low fat:
print('Original Categories:')
print(data['Item_Fat_Content'].value_counts())
print('\nModified Categories:')
data['Item_Fat_Content'] = data['Item_Fat_Content'].replace({'LF':'Low Fat',
'reg':'Regular',
'low fat':'Low Fat'})
print(data['Item_Fat_Content'].value_counts())
# -
# ## Feature Engineering
# We saw during the data exploration that minimum value of Item_Visibility is 0, which is impractical.So, we consider it like missing information and impute it with mean visibility of that product.
# +
#Determine average visibility of a product
visibility_avg = data.pivot_table(values='Item_Visibility', index='Item_Identifier')
#Impute 0 values with mean visibility of that product:
missing_values = (data['Item_Visibility'] == 0)
print ('Number of 0 values initially: %d'%sum(missing_values))
data.loc[missing_values,'Item_Visibility'] = data.loc[missing_values,'Item_Identifier'].apply(lambda x: visibility_avg.at[x, 'Item_Visibility'])
print ('Number of 0 values after modification: %d'%sum(data['Item_Visibility'] == 0))
# -
# #### Now there are no '0' values
# ### Create a broad category of Item_type
# ###### We saw that there are 16 types of Item, but there is a catch every item has a unique ID i.e Item_Identifier and the Item_Identifier starts with either 'FD', 'NC'or 'DR'. And hence, we combine the each type together as follows:
# FC = Food
# NC = Non-Consumables
# DR = Drinks
#Get the first two characters of Item_Identifier:
data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])
#Rename them to more intuitive categories:
data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food',
'NC':'Non-Consumable',
'DR':'Drinks'})
data['Item_Type_Combined'].value_counts()
# +
plt.figure(figsize = (8,5))
plt.subplot(211)
sns.boxplot(x='Item_Type_Combined', y='Item_Outlet_Sales', data=data, palette="Set1")
plt.subplot(212)
sns.boxplot(x='Item_Fat_Content', y='Item_Outlet_Sales', data=data, palette="Set1")
plt.subplots_adjust(wspace = 0.2, hspace = 0.4,top = 1.5)
plt.show()
# -
# #### Numerical and One-Hot Coding of Categorical variables
# Since scikit-learn accepts only numerical variables, convert all categories of nominal variables into numeric types.
# One-Hot-Coding refers to creating dummy variables, one for each category of a categorical variable. For example, the Item_Fat_Content has 2 categories – ‘Low Fat’and ‘Regular’. One hot coding will remove this variable and generate 2 new variables. Each will have binary numbers – 0 (if the category is not present) and 1(if category is present). This can be done using ‘get_dummies’ function of Pandas.
#Import library:
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
le = LabelEncoder()
#New variable for outlet
data['Outlet'] = le.fit_transform(data['Outlet_Identifier'])
var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type','Outlet']
le = LabelEncoder()
for i in var_mod:
data[i] = le.fit_transform(data[i])
#One Hot Coding:
data = pd.get_dummies(data, columns=['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Outlet_Type',
'Item_Type_Combined','Outlet'])
data.dtypes
data[['Item_Fat_Content_0','Item_Fat_Content_1']].head(10)
# You can notice that each row will have only one of the columns as 1 corresponding to the category in the original variable.
# ### Exporting Data
# ##### Final step is to convert data back into train and test data sets. Its generally a good idea to export both of these as modified data sets so that they can be re-used for multiple sessions. This can be achieved using following code:
# +
#Drop the columns which have been converted to different types and item which do not affect much:
data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True)
#Divide into test and train:
train = data.loc[data['source']=="train"]
test = data.loc[data['source']=="test"]
#Drop unnecessary columns:
test.drop(['Item_Outlet_Sales','source'],axis=1,inplace=True)
train.drop(['source'],axis=1,inplace=True)
#Export files as modified versions:
train.to_csv("train_modified.csv",index=False)
test.to_csv("test_modified.csv",index=False)
# -
# #### Now the data is ready for Model Building
# ## Model Building
# Reading modified data
train2 = pd.read_csv("train_modified.csv")
test2 = pd.read_csv("test_modified.csv")
train2.head()
X_train = train2.drop(['Item_Outlet_Sales', 'Outlet_Identifier','Item_Identifier'], axis=1)
y_train = train2.Item_Outlet_Sales
X_test = test2.drop(['Outlet_Identifier','Item_Identifier'], axis=1)
X_train.head()
y_train.head()
# #### Let's first explore how Linear Regression is working
# ### Linear Regression Model
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the test set results
y_pred = regressor.predict(X_test)
y_pred
# +
import warnings
warnings.filterwarnings('ignore')
# Measuring Accuracy
from sklearn.metrics import accuracy_score, r2_score, mean_squared_error
from sklearn.model_selection import cross_val_score
# +
lr_accuracy = round(regressor.score(X_train,y_train) * 100,2)
lr_accuracy
# -
r2_score(y_train, regressor.predict(X_train))
print("RMSE : %.4g" % np.sqrt(metrics.mean_squared_error(y_train, regressor.predict(X_train))))
# +
submission = pd.DataFrame({
'Item_Identifier':test2['Item_Identifier'],
'Outlet_Identifier':test2['Outlet_Identifier'],
'Item_Outlet_Sales': y_pred
},columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales'])
submission.to_csv('submissionLR.csv',index=False)
# -
# ### Decision Tree Model
#
# #### Let's see if this model improves the accuracy or not.
# Fitting Decision Tree Regression to the dataset
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(max_depth=15,min_samples_leaf=300)
regressor.fit(X_train, y_train)
# Predicting the test set results
y_pred = regressor.predict(X_test)
y_pred
tree_accuracy = round(regressor.score(X_train,y_train),2)
tree_accuracy
# +
r2_score(y_train, regressor.predict(X_train))
# -
print("RMSE : %.4g" % np.sqrt(metrics.mean_squared_error(y_train, regressor.predict(X_train))))
# #### Accuracy as well as RMSE has improved, for obvious reasons.
# + active=""
# submission = pd.DataFrame({
# 'Item_Identifier':test2['Item_Identifier'],
# 'Outlet_Identifier':test2['Outlet_Identifier'],
# 'Item_Outlet_Sales': y_pred
# },columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales'])
#
# submission.to_csv('submissionDT.csv',index=False)
# -
# ### Random Forest Model
# #### Let's see the improvements which random forest model brings out.
# Fitting Random Forest Regression to the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators=100,max_depth=6, min_samples_leaf=50,n_jobs=4)
regressor.fit(X_train, y_train)
# Predicting the test set results
y_pred = regressor.predict(X_test)
y_pred
rf_accuracy = round(regressor.score(X_train,y_train),2)
rf_accuracy
r2_score(y_train, regressor.predict(X_train))
print("RMSE : %.4g" % np.sqrt(metrics.mean_squared_error(y_train, regressor.predict(X_train))))
# +
submission = pd.DataFrame({
'Item_Identifier':test2['Item_Identifier'],
'Outlet_Identifier':test2['Outlet_Identifier'],
'Item_Outlet_Sales': y_pred
},columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales'])
submission.to_csv('submissionRF.csv',index=False)
# -
# #### Undoubtedly, Random Forest Model works better than Decision Tree and Linear Regression, but it may not be the best solution out there.
#
| BigMartProject.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="mVNL2ZNYUihx"
# !pip install -q jina git+https://github.com/jina-ai/jina-commons
# !pip install -q torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html
# !pip install -q torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html
# !pip install -q git+https://github.com/rusty1s/pytorch_geometric.git
# + colab={"base_uri": "https://localhost:8080/"} id="9DMxdGbtUvqm" executionInfo={"status": "ok", "timestamp": 1630014159313, "user_tz": -330, "elapsed": 582, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b6f7d14e-15fb-4d0e-ea0c-df85b2db700d"
# !mkdir /content/x && git clone https://github.com/pmernyei/wiki-cs-dataset /content/x
# + id="YEcHbgorcM_W" executionInfo={"status": "ok", "timestamp": 1630014631920, "user_tz": -330, "elapsed": 1033, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import json
from itertools import chain
import torch
from torch_geometric.data import InMemoryDataset, Data, download_url
from torch_geometric.datasets import wikics
import pandas as pd
import json
# + [markdown] id="Ci8nXUy1dm48"
# ### Data loading
# + colab={"base_uri": "https://localhost:8080/"} id="_GjDOKSKcM9I" executionInfo={"status": "ok", "timestamp": 1630014650564, "user_tz": -330, "elapsed": 831, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e3a2d9e1-4002-4a29-8840-16029388ea32"
dataset = wikics.WikiCS('./wiki-cs-dataset_autodownload')
dataset.data
# + [markdown] id="h2-sj5Lvdk-4"
# ### Data exploration
# + colab={"base_uri": "https://localhost:8080/"} id="3aeNLymZcM6T" executionInfo={"status": "ok", "timestamp": 1630014665006, "user_tz": -330, "elapsed": 619, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2dc92f0c-bc18-499e-83f7-6dd71ac8ce8b"
dataset.num_classes
# + colab={"base_uri": "https://localhost:8080/"} id="783VLyklcM3a" executionInfo={"status": "ok", "timestamp": 1630014680340, "user_tz": -330, "elapsed": 846, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="dcd0a9d0-8cf2-4d04-b03c-b5ab3f5efc1a"
# the 300 dimension corresponds to glove embebeddings
# for each word in the document averaged over
dataset.data.x.shape
# + colab={"base_uri": "https://localhost:8080/"} id="jdq8vYDacjrU" executionInfo={"status": "ok", "timestamp": 1630014792546, "user_tz": -330, "elapsed": 6206, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6cc98a3f-ee52-4f2d-8fc1-f6a34ca88208"
metadata = json.load(open('/content/x/dataset/metadata.json'))
metadata.keys()
# + colab={"base_uri": "https://localhost:8080/"} id="lg2d7roYcjou" executionInfo={"status": "ok", "timestamp": 1630014795942, "user_tz": -330, "elapsed": 752, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="60890f84-a056-4c2d-ac65-87d82d38d47f"
metadata['labels']
# + colab={"base_uri": "https://localhost:8080/"} id="QLWTTVLLcjl6" executionInfo={"status": "ok", "timestamp": 1630014802159, "user_tz": -330, "elapsed": 584, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="06756df3-53ca-4c3a-e0f1-e0267870d01e"
len(metadata['nodes'])
# + [markdown] id="32d64346"
# For each node we have the following information
# + id="2e9fffdc" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630014845660, "user_tz": -330, "elapsed": 647, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="fabd4c4f-f208-4583-baf9-f59edb9119b1"
metadata['nodes'][40].keys()
# + [markdown] id="a8f1a3ca"
# Note that from a node `title` we can construct a valid URL from wikipedia as follows:
# + id="374b8743" executionInfo={"status": "ok", "timestamp": 1630014848917, "user_tz": -330, "elapsed": 1085, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
def create_url(title):
return f'https://en.wikipedia.org/wiki/{title}'
# + id="04081a77" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1630014849787, "user_tz": -330, "elapsed": 16, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b1c6d61b-87b1-436f-9be7-df3fdb930543"
pos = 1900
create_url(metadata['nodes'][pos]['title'])
# + [markdown] id="Wii36Mx3cjgJ"
# ### Defining a GCN
# + id="spsdxFDuV61_" executionInfo={"status": "ok", "timestamp": 1630014886965, "user_tz": -330, "elapsed": 1025, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
class GCN(torch.nn.Module):
def __init__(self, num_node_features=300, num_classes=10, hidden_channels=128):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(num_node_features, hidden_channels)
self.conv2 = GCNConv(hidden_channels, num_classes)
def encode(self, x, edge_index):
feature_map = None
def get_activation(model, model_inputs, output):
nonlocal feature_map
feature_map = output.detach()
handle = self.conv1.register_forward_hook(get_activation)
self.forward(x, edge_index)
handle.remove()
return feature_map
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x
# + id="BSy-Zq0_cjeG" executionInfo={"status": "ok", "timestamp": 1630014931457, "user_tz": -330, "elapsed": 620, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
num_classes = len(dataset.data.y.unique())
num_features = dataset.data.x.shape[1]
model = GCN(num_node_features=num_features,
num_classes=num_classes,
hidden_channels=128)
# + colab={"base_uri": "https://localhost:8080/"} id="9uqlpi51cja8" executionInfo={"status": "ok", "timestamp": 1630014940107, "user_tz": -330, "elapsed": 693, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0839ffdd-0d6b-423d-ee54-cf7e407cd59b"
model
# + [markdown] id="388e0e93"
# ### Training a GCN
# + id="55771b24" executionInfo={"status": "ok", "timestamp": 1630015028003, "user_tz": -330, "elapsed": 1888, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
data = dataset.data
loss_values = []
# + id="4150d8f9" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630015789615, "user_tz": -330, "elapsed": 761622, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7a86ec18-3090-4a25-d744-2f008eb4b77a"
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
def train(model, data):
model.train()
optimizer.zero_grad() # Clear gradients.
out = model(data.x, data.edge_index) # Perform a single forward pass.
#mask = data.train_mask[:,0]
#loss = criterion(out[mask], data.y[mask]) # Compute the loss solely based on the training nodes.
loss = criterion(out, data.y) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
def test(model, data):
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1) # Use the class with highest probability.
test_correct = pred[data.test_mask] == data.y[data.test_mask] # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(data.test_mask.sum()) # Derive ratio of correct predictions.
return test_acc
n_epochs = 400
for epoch in range(1, n_epochs):
loss = train(model, data)
loss_values.append(loss)
print(f'\rEpoch: {epoch:03d}, Loss: {loss:.4f}', end='')
# + id="0949b204" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630016492388, "user_tz": -330, "elapsed": 1649, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="5146afcb-4788-424e-d334-5370daf1659c"
test_acc = test(model, data)
print(f'Test Accuracy: {test_acc:.4f}')
# + [markdown] id="6f8af27b"
# ### Storing model to disk
# + id="7a3a0caf" executionInfo={"status": "ok", "timestamp": 1630016516911, "user_tz": -330, "elapsed": 729, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
torch.save(model.state_dict(), './saved_model.torch')
# + [markdown] id="82f51264"
# ### Load model from disk
# + id="72972626" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630016530971, "user_tz": -330, "elapsed": 703, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cc11da04-2600-4205-b48c-0daaf5b3080d"
model2 = GCN(num_node_features= num_features,
num_classes= num_classes,
hidden_channels=128)
model2.load_state_dict(torch.load('./saved_model.torch'))
# + id="a7cf10ec" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1630016537738, "user_tz": -330, "elapsed": 1601, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8f8e0a87-70ed-46a8-cd8e-21c8a49a09c9"
test(model2, data)
# + [markdown] id="b3f37d3c"
# ### Visualized learned embeddings
# + id="621ae501" executionInfo={"status": "ok", "timestamp": 1630016551994, "user_tz": -330, "elapsed": 1107, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Helper function for visualization.
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
def visualize(h, color):
z = TSNE(n_components=2).fit_transform(out.detach().cpu().numpy())
plt.figure(figsize=(10,10))
plt.xticks([])
plt.yticks([])
plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap="Set2")
plt.show()
# + id="0e14b722" colab={"base_uri": "https://localhost:8080/", "height": 578} executionInfo={"status": "ok", "timestamp": 1630016694224, "user_tz": -330, "elapsed": 140375, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d120fc78-920c-4f4a-b563-8792765c4813"
out = model(data.x, data.edge_index)
visualize(out, color=data.y)
| _docs/nbs/reco-tut-gml-wiki-link-recommender-gcn-pytorch-geometric.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Make a regular expression to get all IP addresses from the below link and Extract the IP addresses.¶
# https://study-ccna.com/classes-of-ip-addresses/
# +
# importing the module
import re
# opening and reading the file
with open('C:/Users/user/Desktop/New Text Document.txt') as fh:
fstring = fh.readlines()
# decalring the regex pattern for IP addresses
pattern = re.compile(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
# initializing the list object
lst=[]
# extracting the IP addresses
for line in fstring:
lst.append(pattern.search(line)[0])
# displaying the extracted IP adresses
print(lst)
| DAY 16 ASSIGNMENT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to Python Packages!
# <hr style="height:1px;border:none;color:#666;background-color:#666;" />
# [**<NAME>**](https://www.tomasbeuzen.com/) & [**<NAME>**](http://tiffanytimbers.com)
# Python packages are the fundamental unit of shareable code in Python. Packages make it easy to reuse and maintain your code, and are how you share your code with your colleagues and the wider Python community. *Python Packages* is an open source textbook that describes modern and efficient workflows for creating Python packages. It's scope and intent is inspired by the [R packages](https://r-pkgs.org/) book written by <NAME> and <NAME>.
#
# This book is currently under development. Please feel free to provide comments or suggestions in a [GitHub issue](https://github.com/UBC-MDS/py-pkgs/issues).
#
# ```{figure} images/py-pkgs-hex.png
# ---
# height: 300px
# name: py-pkg
# ---
# ```
| py-pkgs/welcome.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from scripts.download_script import *
red_wine_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'
white_wine_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv'
data_folder = 'data'
df_red = download_data(red_wine_url, data_folder)
# -
pip install docopt
| misc/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
import glob
import os
# %matplotlib inline
# ## Model
df = pd.read_csv('../../train/KPI/train_18fbb1d5a5dc099d.csv', index_col='timestamp')
def classify(df):
pred = np.where( (df['value']>13.0) | (df['value']<1.3), 1, 0)
# df['pred'] = np.where( (df['value']>=13.138) | (df['value']<1.3), 1, 0)
return pred
y_pred = classify(df)
f1_score(y_pred,df.label.values)
# ## Image Analysis
rem_out = df[ (df['value'] < (13.0)) ]
rem_out.plot(kind='line', label='label',figsize=(14,8))
plt.title('KPI 18fbb1d5a5dc099d Distribution')
plt.locator_params(axis='x', nbins=8)
plt.xlabel('Time')
plt.ylabel('Value')
plt.savefig('KPI_18fbb1d5a5dc099d_NO.png', bbox_inches='tight')
| THU--Advanced_Network_Management/GP/Models/NaiveClass/Naive_18fbb1d5a5dc099d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # School DataFrame
#
# #### Version: 2
# #### Creation Date: 2/5/21
# #### File Names Generated: school_df_v2.csv
#
#
# #### Updates from previous version:
# * Adds in budget info
#
# #### Notes:
# * new column names listed as **bold** below
# ---
# COLUMN NAMES:
# * 'attnd_absence_1-5_SY1718',
# * 'attnd_absence_1-5_SY1819',
# * 'attnd_absence_11-20_SY1718',
# * 'attnd_absence_11-20_SY1819',
# * 'attnd_absence_20+_SY1718',
# * 'attnd_absence_20+_SY1819',
# * 'attnd_absence_6-10_SY1718',
# * 'attnd_absence_6-10_SY1819',
# * 'attnd_count_truancy_aged_students_SY1718',
# * 'attnd_count_truancy_aged_students_SY1819',
# * **'school_budgeted_amount_FY16',**
# * **'school_budgeted_amount_FY17',**
# * **'school_budgeted_enrollment_FY16',**
# * **'school_budgeted_enrollment_FY17',**
# * 'school_capacity_SY1718',
# * 'school_capacity_SY1819',
# * 'school_cluster',
# * 'school_code',
# * 'school_enrollment_SY1718',
# * 'school_enrollment_SY1819',
# * 'school_grade_band',
# * 'school_grade_range',
# * 'school_latitude',
# * 'school_longitude',
# * 'school_name',
# * 'school_sector',
# * 'school_star_rating_SY1718',
# * 'school_star_rating_SY1819',
# * 'school_star_score_SY1718',
# * 'school_star_score_SY1819',
# * 'school_unfilled_seats_SY1718',
# * 'school_unfilled_seats_SY1819',
# * 'school_ward'
# ---
# <br><Br>
import pandas as pd
import numpy as np
# + active=""
# pip install openpyxl
# -
# <br><br>
school_df = pd.read_csv('../data/cleaned/school_df_v1.csv')
school_df.head(2)
# <br><br>
budget_16 = pd.read_excel('../data/from_drive/FY16_budget_school.xlsx',sheet_name='reformatted')
budget_16.tail(2)
budget_16.columns
budget_16.info()
budget_16 = budget_16[budget_16['school_code'] > 0]
budget_16['school_code']=budget_16['school_code'].astype(int)
grab_section = pd.DataFrame()
grab_section['school_code']= budget_16['school_code']
grab_section['school_budgeted_amount_FY16']= budget_16['school_budgeted_amount_FY16']
grab_section['school_budgeted_enrollment_FY16']= budget_16['school_budgeted_enrollment_FY16']
grab_section.info()
school_df = school_df.join(grab_section.set_index('school_code'),on='school_code',how='left')
school_df.head()
# <br><br>
#budget Desktop/DSI/Submissions/Projects/project-5/data/from_drive/FY16_budget_school.xlsx
budget_17 = pd.read_excel('../data/from_drive/FY17_budget_allocation_school.xlsx',sheet_name='Sheet1')
budget_17.tail(2)
budget_17.info()
school_df = school_df.join(budget_17.set_index('school_code'),on='school_code',how='left')
school_df.head()
# <br><br>
school_df.columns.sort_values()
school_df.to_csv('../data/cleaned/school_df_v2.csv', index=False)
| code/create_school_df_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ypRhwsJ75e2r" colab_type="code" outputId="efaacc80-66f3-489e-ce9e-e2fa585b830c" colab={"base_uri": "https://localhost:8080/", "height": 51}
# %tensorflow_version 2.x
import numpy as np
import tensorflow as tf
import time
import matplotlib.pyplot as plt
import pandas as pd
import torch
from IPython.display import clear_output
from google.colab import files
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.applications import vgg19
from tensorflow.keras.applications.vgg19 import preprocess_input
from PIL import Image
torch.cuda.empty_cache()
gpu_name = tf.test.gpu_device_name()
cpu_name = '/cpu:0'
print(gpu_name)
# + id="kpasRmBl6JKN" colab_type="code" outputId="1ad135c1-c776-4587-c147-7dd5adf0e8cb" colab={"base_uri": "https://localhost:8080/", "height": 34}
download = False
if download:
VGG19 = vgg19.VGG19(include_top=False, weights='imagenet')
VGG19.trainable = False
#tf.keras.models.save_model(VGG19, 'drive/My Drive/VGG19.h5')
else:
VGG19 = tf.keras.models.load_model('drive/My Drive/VGG19.h5')
#VGG19.trainable = False
# + id="DG4-uXwV7SNN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="f592b24c-34cb-4131-af15-d865f73858f5"
from google.colab import drive
drive.mount('/content/drive')
# + id="nsC4rLI9vu6W" colab_type="code" colab={}
def model_maker():
content_layer_name = ['block4_conv2']
style_layers_name = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
layers = style_layers_name + content_layer_name
core_layers = [VGG19.get_layer(layer).output for layer in layers]
DeepSynthCore = models.Model(VGG19.input, core_layers)
return DeepSynthCore
# + id="Bi8RXXBFNITg" colab_type="code" colab={}
def content_loss(output, content, model):
R_C = model.predict(output)[5]
return tf.reduce_mean(tf.square(R_C-content))
# + id="9045pP5sd1jF" colab_type="code" colab={}
def gram_matrix(input_tensor):
channels = int(input_tensor.shape[-1])
a = tf.reshape(input_tensor, [-1, channels])
n = tf.shape(a)[0]
gram = tf.matmul(a, a, transpose_a=True)
return gram / tf.cast(n, tf.float32)
# + id="oeK6TE5kHb8u" colab_type="code" colab={}
def get_style_loss(gram1, gram2):
return tf.reduce_mean(tf.square(gram1 - gram2))
# + id="kzH9_jRUd7vc" colab_type="code" colab={}
def style_loss_func(gram_style, gram_product, weights):
Loss = 0
W = [100, 10, 10**-2, 10**-2, 100]
w = [10, 10, 5, 1, 1]
v = [1, 1, 5, 10, 10]
c = [10, 0, 0, 0, 0]
d = [100, 10, 1, 1, 10]
e = [1,1,1,1,1]
#c = [1,1,1,1,1]
for i in range(5):
Loss += get_style_loss(gram_style[i], gram_product[i])*weights[i]
Loss = Loss/sum(weights)
return Loss
# + id="tT2iidxM3G2f" colab_type="code" colab={}
def content_loss_func(feature_output, feature_content):
return tf.reduce_mean(tf.square(feature_output - feature_content))
# + id="40CXKDQIMVeD" colab_type="code" colab={}
def logger(product_image, i, closs, sloss, L, ratio, losses):
clear_output(wait=True)
plt.figure(figsize=(8, 6))
plt.subplot(121)
plt.imshow(np.array(product_image[0,:,:,:]+np.array([103.939, 116.779, 123.68])).astype('uint8')[:,:,::-1])
plt.subplot(122)
plt.plot(losses)
print('%3d [L_C %.3e] [L_S %.3e] [Loss %.3e] [C/S %.2e]'%(i,
closs,
sloss,
L,
ratio))
plt.show()
log = open('drive/My Drive/ED/log.txt', 'w')
log.write('ITERATION %3d'%(i))
log.close()
# + id="gZT06eONy3kV" colab_type="code" colab={}
def image_loader(filename, max_pix_length):
image = Image.open(filename)
w, h = image.size
W, H = int(w*max_pix_length/max(w, h)), int(h*max_pix_length/max(w, h))
input_size = (W, H)
image = image.resize(input_size)
image = np.array(image).astype('float32')
image = tf.reshape(image, (1, *image.shape))
image = preprocess_input(image)
return image
# + id="G9Wi25ePtNgU" colab_type="code" colab={}
def style_transfer(content_image_tensor, style_image_tensor,
product_image_tensor,
max_iter,
W_C, W_S, style_layers_weights,
LR, neural_model,
name):
## Setup and clean-up
torch.cuda.empty_cache()
optimizer = tf.keras.optimizers.Adam(learning_rate = LR)
## Constants
loss_history = []
MIN = np.inf
norm_means = np.array([103.939, 116.779, 123.68])
style_image_features = neural_model(style_image_tensor)[:5]
content_image_features = neural_model(content_image_tensor)[5]
gram_list_style = [gram_matrix(feature) for feature in style_image_features]
## Image generation core
with tf.device(gpu_name):
total_runtime = time.time()
for iteration in range(max_iter):
start_time = time.time()
product_image_tensor = tf.Variable(product_image_tensor)
with tf.GradientTape() as grad:
grad.watch(product_image_tensor)
*style_features, content_feature = neural_model(product_image_tensor)
gram_list_product = [gram_matrix(style_feature) for style_feature in style_features]
content_loss = content_loss_func(content_feature,
content_image_features)*W_C
style_loss = style_loss_func(gram_list_style,
gram_list_product,
style_layers_weights)*W_S
ratio = (content_loss/style_loss)
total_loss = content_loss + style_loss
loss_history.append(total_loss)
dy_dx = grad.gradient(total_loss, product_image_tensor)
optimizer.apply_gradients([(dy_dx, product_image_tensor)])
product_image_tensor = tf.clip_by_value(product_image_tensor,
-norm_means,
255-norm_means)
if not iteration%10:
logger(product_image_tensor, iteration, content_loss,
style_loss, total_loss, ratio, loss_history)
iter_time = time.time()-start_time
eta = (max_iter-iteration)*iter_time
minutes, seconds = eta//60, eta%60
print("Batch\t\t%8.3f (s)\nE.T.A\t\t%8.3f (s) %2dm %2ds"%(iter_time,
eta,
minutes,
seconds))
if (not iteration%200) and iteration:
torch.cuda.empty_cache()
if total_loss < MIN:
best = product_image_tensor
MIN = total_loss
print("Total Runtime:\t%8.3f (s)"%(time.time()-total_runtime))
values = (name, W_S, W_C, max_iter, LR, style_layers_weights)
model_text = '%sS%.1e-C%.1e-%5d-LR%.1e-[W%s]'%values
return product_image_tensor, best, model_text
# + id="kIpCIX9hqEUw" colab_type="code" colab={}
def image_saver(filename, image_tensor):
final = np.array(image_tensor[0,:,:,:]+np.array([103.939, 116.779, 123.68]))
final = final.astype('uint8')[:,:,::-1]
plt.imshow(final)
Image.fromarray(final).save(filename+'.png')
files.download(filename+'.png')
return final
# + id="RP1OFC36WNxi" colab_type="code" colab={}
cConvNum = 5
DeepTransfer = model_maker()
## Pre-process
cname = 'challenger'
sname = 'ccc'
Name = 'DeepTransfer-%s+%s-'%(cname, sname)
bigside = 600
# Image 1: Content carrier
content_image_tensor = image_loader('drive/My Drive/ED/%s.jpg'%(cname), bigside)
# Image 2: Style carrier
style_image_tensor = image_loader('drive/My Drive/ED/%s.jpg'%(sname), bigside)
# Image 3: Product carrier
product_image_tensor = content_image_tensor
# Config
input_kwargs = {"content_image_tensor" : content_image_tensor,
"style_image_tensor" : style_image_tensor,
"product_image_tensor" : product_image_tensor,
"max_iter" : 200,
"W_C" : 1e+0,
"W_S" : 1e+4,
"LR" : 1e0,
"style_layers_weights" : np.array([1, 1, 1, 1, 1]),
"neural_model" : DeepTransfer,
"name" : Name
}
# + id="uJcXVidOWqfW" colab_type="code" outputId="71802aec-45d7-4b09-e44a-d1fd71f95753" colab={"base_uri": "https://localhost:8080/", "height": 453}
product_image_tensor, best_prod, filename = style_transfer(**input_kwargs)
#image_saver(filename)
# + id="N5FGCM8LWUQ-" colab_type="code" outputId="365f7661-7e45-4fdf-ec94-9ab86d161cfe" colab={"base_uri": "https://localhost:8080/", "height": 269}
final_image = image_saver(filename, best_prod)
# + id="OAwDYqmlXf3c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 130} outputId="5b29dc74-0599-49df-b51d-af62fca184fa"
Error Maker!
# + id="NcMosyR4rls9" colab_type="code" colab={}
def feature_show(n_filters = 10, save=False):
fig = plt.figure(figsize=(20, 20))
num = 0
for layer in range(5):
print(layer)
_,_,N = DeepTransfer(sty)[layer][0,:,:,:].shape
R = np.random.randint(N, size=(n_filters))
for i in range(n_filters):
num += 1
plt.subplot(5, n_filters, num)
p = DeepTransfer(sty)[layer][0,:,:,R[i]]
plt.imshow(p)
if save:
plt.savefig('X.png')
files.download('X.png')
# + id="TwAiZ6RbxoTC" colab_type="code" colab={}
feature_show(10)
| StyleTransfer_XI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Explaining MNIST Image Classifiers with CXPlain
#
# First, we load 500 sample images from the MNIST dataset which we will use to attempt to recognise the digit type
# that a given image shows.
# + pycharm={"name": "#%%\n", "is_executing": false}
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from cxplain.util.test_util import TestUtil
num_subsamples = 500
(x_train, y_train), (x_test, y_test) = TestUtil.get_mnist(flattened=False, num_subsamples=num_subsamples)
# + [markdown] pycharm={"name": "#%% md\n"}
# Next, we fit a multilayer perceptron (MLP) that predicts the digit type that a given MNIST image shows.
# + pycharm={"name": "#%%\n", "is_executing": false}
from sklearn.neural_network import MLPClassifier
explained_model = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(64, 32), random_state=1)
explained_model.fit(x_train.reshape((len(x_train), -1)), y_train);
# + [markdown] pycharm={"name": "#%% md\n"}
# After fitting the multilayer perceptron, we wish to explain its decisions, i.e. what input features were most relevant
# for a given model prediction. To do so, we train a causal explanation (CXPlain) model that can learn to explain any
# machine-learning model using the same training data. In practice, we have to define:
# - `model_builder`: The type of model we want to use as our CXPlain model. In this case we are using a neural explanation model using
# a UNet structure.
# - `masking_operation`: The masking operaion used to remove a certain input feature from the set of available input features. In this case we are using zero masking, i.e. setting the value of that input feature to zero.
# - `loss`: The loss function that we wish to use to measure the impact of removing a certain input feature from the set of available features. In most common use cases, this will be the mean squared error (MSE) for regression problems and the cross-entropy for classification problems.
#
# + pycharm={"name": "#%%\n", "is_executing": false}
from tensorflow.python.keras.losses import categorical_crossentropy
from cxplain import UNetModelBuilder, ZeroMasking, CXPlain
downsample_factors = (2, 2)
model_builder = UNetModelBuilder(downsample_factors, num_layers=2, num_units=64, activation="relu",
p_dropout=0.2, verbose=0, batch_size=256, learning_rate=0.001)
masking_operation = ZeroMasking()
loss = categorical_crossentropy
# + [markdown] pycharm={"name": "#%% md\n"}
# Using this configuration, we now instantiate a CXPlain model and fit it to the same Boston Housing data that we used to fit the MLP model that we wish to explain.
# We additionally request that 5 bootstrap resampled versions of this model are trained in order to be able to compute confidence intervals for the estimated importance scores.
# Only a single model is trained - and computation of confidence intervals is not possible - if you do not specify the number of models you wish to train.
# + pycharm={"name": "#%%\n", "is_executing": false}
explainer = CXPlain(explained_model, model_builder, masking_operation, loss,
num_models=5, downsample_factors=downsample_factors, flatten_for_explained_model=True)
explainer.fit(x_train, y_train);
# + [markdown] pycharm={"name": "#%% md\n"}
# We can then use this fitted CXPlain model to explain the predictions of the explained model on the held-out test samples.
# We additionally request confidence intervals at the 80% confidence level, i.e. the 10% and 90% quantiles of the observed output distribution of importance scores.
# Note that the importance scores are normalised to sum to a value of 1 and each score therefore represents the relative importance of each respective input feature.
# + pycharm={"name": "#%%\n", "is_executing": false}
attributions, confidence = explainer.explain(x_test, confidence_level=0.80)
# + [markdown] pycharm={"name": "#%% md\n"}
# We can now visualise the 2x2 pixel block attributions and their confidence for a specific sample from the test set using the `Plot` toolset available as part of CXPlain.
# + pycharm={"name": "#%%\n", "is_executing": false}
import numpy as np
import matplotlib.pyplot as plt
from cxplain.visualisation.plot import Plot
plt.rcdefaults()
selected_index = 2
selected_sample = x_test[selected_index]
importances = attributions[selected_index]
importances_confidence = confidence[selected_index]
Plot.plot_attribution_2d(selected_sample,
importances,
importances_confidence)
| examples/mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# <h1 align="center">Explorar el concepto de Pluralismo utilizando la Ciencia de los Datos: un estudio de caso con el ecosistema mediático de Chile</h3>
# -
# <div align="center"><i>Autores: Profesores y Estudiantes del Magíster en Informática (Universidad Austral de Chile)</i></div>
# <div align="center"><i>I semestre 2018</i></div>
# <h2>1. Objetivo de investigación general</h2>
# <ul>
# <li><p>El <b>Pluralismo</b> de los medios es un principio que garantiza que l@s ciudadan@s disponen de una información pólitica e ideólogica diversificada, permitiendoles ejercer su <i>espiritu crítico</i> y su <i>libertad de pensar</i>. Por lo tanto, la Organización de las Naciones Unidas para la Educación, la Ciencia y la Cultura (UNESCO) definió el pluralismo de los medios como una condición necesaria para construir la Democracía.</p></li>
# <li><b>Medir para entender</b>: <i>“When you can measure what you are speaking about, and express it in numbers, you know something about it; but when you cannot measure it, when you cannot express it in numbers, your knowledge is of a meagre and unsatisfactory kind; it may be the beginning of knowledge, but you have scarcely, in your thoughts, advanced to the stage of science, whatever the matter may be.”</i> - <NAME> (1883)
# <li><b>Pregunta general:</b> ¿La Informática, más particularmente la Ciencia de los Datos, puede medir el Pluralismo de los medios? ¿Se puede establecer un protocolo computacional para medir y entender varias facetas del Pluralismo de los Medios basandose sobre técnicas de Clustering de datos?</li>
# </ul>
# <h2>2. Experimentación de Ciencia de los Datos</h2>
# <h3><b>1. Definir una pregunta de investigación </b></h3>
#
# <p><u>Ejemplos de preguntas:</u></p>
# <ul>
# <li>En el marco de una temática (<i>Mapuche, Cambio climático, Feminismo, Sexismo, etc.</i>), ¿en cuántos tópicos se dividen los discursos de los medios de prensa? ¿Existen diferencias significativas entre los distintos medios? </li>
# <li>En el marco de una semana de noticias, ¿en cuántos tópicos se dividen las noticias? ¿Existen diferencias significativas entre los distintos medios?</li>
# </ul>
#
# ...
# <h3>2. Recopilar y preparar los datos</h3>
# +
import numpy as np
import pandas as pd
#Cargar el dataset de tweets
df_feminismo = pd.read_csv('datasets/sophia_cambioclimatico_v2.csv',delimiter="|", header=None)
df_feminismo
#selección de los mensajes
docs = df_feminismo[3].as_matrix()
len(docs)
# +
import nltk
nltk.download('stopwords')
nltk.download('punkt')
# load nltk's SnowballStemmer as variabled 'stemmer'
from nltk.stem.snowball import SnowballStemmer
from sklearn.metrics import adjusted_rand_score
stemmer = SnowballStemmer("spanish")
# load nltk's English stopwords as variable called 'stopwords'
stopwords = nltk.corpus.stopwords.words('spanish')
# here I define a tokenizer and stemmer which returns the set of stems in the text that it is passed
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
# -
# <h3>3. Explorar los datos</h3>
# +
#Aplicando Modelos Probabilistas de Tópicos y LDA
from sklearn.decomposition import LatentDirichletAllocation
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
#stopwords = nltk.corpus.stopwords.words('spanish')
tf_vectorizer = TfidfVectorizer(max_df=0.2, min_df=7,
stop_words=stopwords,tokenizer=tokenize_only, ngram_range=(1,1))
tf = tf_vectorizer.fit_transform(docs)
diccionario= tf_vectorizer.get_feature_names()
#Estimación de LDA con Bayes Variacional
lda = LatentDirichletAllocation(n_components=10, max_iter=10,
learning_method='online',
learning_offset=50.,
random_state=0)
lda.fit(tf)
#Cálculo de índice de ajuste de los datos
print(lda.perplexity(tf))
print("\nTopics in LDA model:")
print_top_words(lda, diccionario, 30)
# -
| actividades_LDA_unidad_2/INFO343-Clustering-Actividad3-RB-AK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="nc8Za2CHT56E" outputId="f25001cb-7bf4-4588-bb3c-4ebc2dedb371"
#install
# !pip install ipython-autotime
# !pip install autocorrect
# !pip install giphy-ipython-magic
#libraries
from IPython.display import YouTubeVideo
from autocorrect import Speller
import pandas as pd
#extensions
# %load_ext autotime
# %load_ext rpy2.ipython
# %load_ext giphy_magic
# + [markdown] id="5wOOXZ7JTmkH"
# # **Tips**
# + colab={"base_uri": "https://localhost:8080/"} id="imawsv3Cxjes" outputId="500ae072-d14d-4a01-963a-7fedca78872e"
#the autotime function will tell you how long it takes to run each cell within the notebook
for i in range(10000):
print(i)
# + id="dB88uS5qxfuq"
#read/write files to your Google drive
from google.colab import drive
drive.mount('/content/gdrive')
# + id="Hzcw5usxnnhx" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="81451377-d233-4fcc-bdb5-38e3ae9ca403"
#all magic functions
# %lsmagic
# + id="PAFwjyYZTqgE" colab={"base_uri": "https://localhost:8080/"} outputId="b30518f2-6af6-45e5-cca1-27da8eb5f588"
#@title
#you have the option to hide a cell if you use the above code
def square_root(i):
return i**(1/2)
print(f'The square root of 4 is {square_root(4):.0f}\n')
# + id="TNZ13DCXTqo6" colab={"base_uri": "https://localhost:8080/"} outputId="eeb98985-250f-4f7d-f0a5-e8a4bb437542"
#google colab makes it easy to work with R within python
# %%R
install.packages('Metrics')
# + id="OuocAn_vkS2D" colab={"base_uri": "https://localhost:8080/"} outputId="847705b4-ba42-46b1-a0d4-a272a343b3da" language="R"
# x <- 1
# x
# + id="JYMmaNzQTqsE" colab={"base_uri": "https://localhost:8080/"} outputId="2e8005ff-5fb5-4ea7-b77a-56de6894a522"
#check the history of code you've run
# %history
# + [markdown] id="Vp_-MwFoTsOs"
# # **Tricks**
# + id="6CYJ35yLTujP" colab={"base_uri": "https://localhost:8080/", "height": 339} outputId="f3cdf262-1183-4f1c-aa7d-31faa2a7c242"
#embed videos into your notebook
YouTubeVideo('ws6eWf2LeRg')
# + id="I569EJYUcHgh" colab={"base_uri": "https://localhost:8080/"} outputId="fb286151-2317-4b67-c2b0-adc2c913a2b8"
#use "CTRL + M + L" or "Command + M + L" to show line number within cells
print(1+1)
print(1+1)
print(1+1)
# + id="_xRkm6Yq9rPB" colab={"base_uri": "https://localhost:8080/", "height": 519} outputId="44219f9e-6535-4ea6-928a-003276abfc76"
#generate random gifs
# %giphy magic
# + [markdown] id="TPeXNLnVxR55"
# <font size="5"> To change the theme of a Google Colab Notebook, you \\
# can go to: Tools >> Settings >> Theme, and select the theme \\
# from: light, dark and adaptive.</font>
# + [markdown] id="FpC-qiM1Ji1K"
# # **References**
# + [markdown] id="Zk1uvsQ8OWgT"
# ## IPython Documentation - [Built-in magic commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html)
| Google Colab Tutorials/Google_Colab_Magic_Commands_and_Packages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.1
# language: julia
# name: julia-1.0
# ---
# # Knet-Flux CNN benchmark based on [Flux/model-zoo](https://github.com/FluxML/model-zoo/blob/master/vision/mnist/conv.jl) conv.jl example
]activate ..; instantiate; st
# +
# Uncomment this to get Knet profiling info at the end:
# ENV["KNET_TIMER"] = ENV["AUTOGRAD_TIMER"] = "true"
# using Pkg; Pkg.build("AutoGrad"); Pkg.build("Knet")
# -
using Flux, Flux.Data.MNIST, Statistics
using Flux: onehotbatch, onecold, crossentropy, throttle
using Base.Iterators: repeated, partition
using CuArrays
using Knet: Knet, AutoGrad, conv4, pool, KnetArray
Knet.gpu()
# Implement Chain, Conv and Dense in Knet
struct kChain; layers; kChain(ls::Tuple)=new(ls); end
kChain(ls...)=kChain(ls)
(c::kChain)(x) = (for l in c.layers; x = l(x); end; x)
struct kDense; w; b; f; end
kDense(nx::Int,ny::Int,fn=identity)=kDense(Knet.param(ny,nx),Knet.param0(ny),fn)
(d::kDense)(x) = d.f.(d.w * Knet.mat(x) .+ d.b)
struct kConv; w; b; f; end
kConv(w1,w2,cx,cy,fn=identity)=kConv(Knet.param(w1,w2,cx,cy),Knet.param0(1,1,cy,1), fn)
(f::kConv)(x) = pool(f.f.(conv4(f.w,x) .+ f.b))
# ## GPU tests
# Load data
imgs = MNIST.images()
labels = onehotbatch(MNIST.labels(), 0:9)
train = [(cat(float.(imgs[i])..., dims = 4), labels[:,i])
for i in partition(1:60_000, 1000)]
train = gpu.(train)
klabels = MNIST.labels() .+ 1
ktrain = [(KnetArray{Float32}(cat(float.(imgs[i])..., dims = 4)), klabels[i])
for i in partition(1:60_000, 1000)]
summary.((train[1]..., ktrain[1]...))
# +
# Run this several times to get timing for Flux:
# (loss(X, Y), accuracy(X, Y)) = (2.302674f0 (tracked), 0.109)
# 9.770545 seconds (2.27 M allocations: 138.747 MiB, 26.75% gc time)
# (loss(X, Y), accuracy(X, Y)) = (0.19522423f0 (tracked), 0.942)
m = Chain(
Conv((2,2), 1=>16, relu),
x -> maxpool(x, (2,2)),
Conv((2,2), 16=>8, relu),
x -> maxpool(x, (2,2)),
x -> reshape(x, :, size(x, 4)),
Dense(288, 10), softmax) |> gpu
loss(x, y) = crossentropy(m(x), y)
accuracy(x, y) = mean(onecold(m(x)) .== onecold(y))
opt = ADAM(params(m))
X,Y = train[1]
@show loss(X, Y), accuracy(X, Y)
@time for i in 1:10; Flux.train!(loss, train, opt); end
@show loss(X, Y), accuracy(X, Y);
# +
# Run this several times to get timing for Knet:
# (Knet.nll(km, kX, kY), Knet.accuracy(km, kX, kY)) = (2.2925608f0, 0.145)
# 2.766763 seconds (1.58 M allocations: 58.009 MiB, 12.79% gc time)
# (Knet.nll(km, kX, kY), Knet.accuracy(km, kX, kY)) = (0.15760595f0, 0.951)
km = kChain(
kConv(2,2,1,16,Knet.relu),
kConv(2,2,16,8,Knet.relu),
kDense(288,10))
kX,kY = ktrain[1]
iters(n)=(J->((n-=1)>=0))
@show Knet.nll(km,kX,kY), Knet.accuracy(km,kX,kY)
@time for i in 1:10; Knet.train!(km, ktrain; optimizer=Knet.Adam(), callback=iters(length(ktrain))); end
@show Knet.nll(km,kX,kY), Knet.accuracy(km,kX,kY);
# -
# ## CPU tests
# Load data
imgs = MNIST.images()
labels = onehotbatch(MNIST.labels(), 0:9)
train = [(cat(float.(imgs[i])..., dims = 4), labels[:,i])
for i in partition(1:60_000, 1000)]
# train = gpu.(train)
klabels = MNIST.labels() .+ 1
ktrain = [(cat(float.(imgs[i])..., dims = 4), klabels[i])
for i in partition(1:60_000, 1000)]
summary.((train[1]..., ktrain[1]...))
# +
# Run this several times to get timing for Flux:
m = Chain(
Conv((2,2), 1=>16, relu),
x -> maxpool(x, (2,2)),
Conv((2,2), 16=>8, relu),
x -> maxpool(x, (2,2)),
x -> reshape(x, :, size(x, 4)),
Dense(288, 10), softmax)
m0 = deepcopy(m)
loss(x, y) = crossentropy(m(x), y)
accuracy(x, y) = mean(onecold(m(x)) .== onecold(y))
opt = ADAM(params(m))
X,Y = train[1]
@show loss(X, Y), accuracy(X, Y)
@time Flux.train!(loss, train, opt)
@show loss(X, Y), accuracy(X, Y);
# -
# Run this several times to get timing for Knet:
f2k(a)=Knet.Param(Array(a))
km = kChain(kConv(f2k(m0.layers[1].weight.data),f2k(reshape(m0.layers[1].bias.data,(1,1,16,1))),Knet.relu),
kConv(f2k(m0.layers[3].weight.data),f2k(reshape(m0.layers[3].bias.data,(1,1,8,1))),Knet.relu),
kDense(f2k(m0.layers[6].W.data),f2k(m0.layers[6].b.data),identity))
kX,kY = ktrain[1]
iters(n)=(J->((n-=1)>=0))
@show Knet.nll(km,kX,kY), Knet.accuracy(km,kX,kY)
@time Knet.train!(km, ktrain; optimizer=Knet.Adam(), callback=iters(length(ktrain)))
@show Knet.nll(km,kX,kY), Knet.accuracy(km,kX,kY);
| notebooks/conv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watch Me Code 2: Ways to send data over HTTP
#
# Examples using the httpbin.org service
#
# https://httpbin.org/
#
import requests
# In the URL query string, HTTP GET
url = "https://httpbin.org/get?name=mike&age=45"
response = requests.get(url)
response.json()
# same example but don't create the querystring by hand. use a dict
querystring = { 'name' : 'mike', 'age' : 45 }
url = "http://httpbin.org/get"
response = requests.get(url, params = querystring)
response.json()
# +
# make a request, adding data to the header
# NOTE: all header values MUST be strings!!! AND they are case-insensitive as per the HTTP protocol spec.
header = { 'api-key' : 'testing', 'id' : '345876' }
url = "http://httpbin.org/get"
response = requests.get(url, headers = header)
response.json()
# -
# here's a combination of querystring plus headers:
querystring = { 'name' : 'mike', 'age' : 45 }
header = { 'api-key' : 'demo'}
url = "http://httpbin.org/get"
response = requests.get(url, params = querystring, headers = header)
response.json()
# here's an example of a post
payload = "this is a lot of data.this is a lot of data.this is a lot of data.this is a lot of data.this is a lot of data.this is a lot of data.this is a lot of data."
url = "http://httpbin.org/post"
response = requests.post(url, data = payload)
response.json()
# here's another post, with a python dict, because there are key-values the post uses form.
person = { 'name' : 'Mike', 'age' : 45, 'status' : 'married' }
url = "http://httpbin.org/post"
response = requests.post(url, data = person)
response.json()
# this one uses a POST, payload, querystring and headers to show these can all be combined!
person = { 'name' : 'Mike', 'age' : 45, 'status' : 'married' }
header = { 'api-key' : '32871549', 'user-agent' : 'demo' }
querystring = { 'id' : 1 }
url = "http://httpbin.org/post"
response = requests.post(url, data = person, params = querystring, headers=header )
response.json()
| lessons/10-http/WMC2-Sending-Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import xarray as xr
import cartopy.crs as ccrs
from erddapy import ERDDAP
import netCDF4
# +
# try both arctic and west coast data, same results
#url = 'https://ferret.pmel.noaa.gov/pmel/erddap/tabledap/saildrone_arctic_data'
url = 'https://ferret.pmel.noaa.gov/pmel/erddap/tabledap/saildrone_west_coast_survey_2018'
#url = 'https://ferret.pmel.noaa.gov/pmel/erddap/tabledap/saildrone_west_coast_survey_2018.nc?trajectory%2Clatitude%2Clongitude%2CSOG%2CCOG%2CHDG%2CHDG_WING%2CROLL%2CPITCH%2CWING_ANGLE%2CBARO_PRES_MEAN%2CBARO_PRES_STDDEV%2CTEMP_AIR_MEAN%2CTEMP_AIR_STDDEV%2CRH_MEAN%2CRH_STDDEV%2CPAR_AIR_MEAN%2CPAR_AIR_STDDEV%2CTEMP_IR_UNCOR_MEAN%2CTEMP_IR_UNCOR_STDDEV%2CUWND_MEAN%2CUWND_STDDEV%2CVWND_MEAN%2CVWND_STDDEV%2CWWND_MEAN%2CWWND_STDDEV%2CGUST_WND_MEAN%2CGUST_WND_STDDEV%2CTEMP_CTD_MEAN%2CTEMP_CTD_STDDEV%2CCOND_MEAN%2CCOND_STDDEV%2CSAL_MEAN%2CSAL_STDDEV%2CO2_RBR_CONC_MEAN%2CO2_RBR_CONC_STDDEV%2CO2_RBR_SAT_MEAN%2CO2_RBR_SAT_STDDEV%2CTEMP_O2_RBR_MEAN%2CTEMP_O2_RBR_STDDEV%2CO2_AANDERAA_CONC_UNCOR_MEAN%2CO2_AANDERAA_CONC_UNCOR_STDDEV%2CO2_AANDERAA_SAT_MEAN%2CO2_AANDERAA_SAT_STDDEV%2CTEMP_O2_AANDERAA_MEAN%2CTEMP_O2_AANDERAA_STDDEV%2CCHLOR_MEAN%2CCHLOR_STDDEV%2Ctime%2Cwind_speed%2Cwind_dir%2Cdist_partner%2CBARO_PRES_serial_number%2CAT_RH_serial_number%2CPAR_serial_number%2CTEMP_IR_serial_number%2CWIND_serial_number%2CRBR_serial_number%2CAANDERAA_serial_number&time%3E=2018-11-20T00%3A00%3A00Z&time%3C=2018-11-27T19%3A59%3A00Z'
#url = 'https://ferret.pmel.noaa.gov/pmel/erddap/tabledap/saildrone_west_coast_survey_2018.ncCF?trajectory%2Clatitude%2Clongitude%2CSOG%2CCOG%2CHDG%2CHDG_WING%2CROLL%2CPITCH%2CWING_ANGLE%2CBARO_PRES_MEAN%2CBARO_PRES_STDDEV%2CTEMP_AIR_MEAN%2CTEMP_AIR_STDDEV%2CRH_MEAN%2CRH_STDDEV%2CPAR_AIR_MEAN%2CPAR_AIR_STDDEV%2CTEMP_IR_UNCOR_MEAN%2CTEMP_IR_UNCOR_STDDEV%2CUWND_MEAN%2CUWND_STDDEV%2CVWND_MEAN%2CVWND_STDDEV%2CWWND_MEAN%2CWWND_STDDEV%2CGUST_WND_MEAN%2CGUST_WND_STDDEV%2CTEMP_CTD_MEAN%2CTEMP_CTD_STDDEV%2CCOND_MEAN%2CCOND_STDDEV%2CSAL_MEAN%2CSAL_STDDEV%2CO2_RBR_CONC_MEAN%2CO2_RBR_CONC_STDDEV%2CO2_RBR_SAT_MEAN%2CO2_RBR_SAT_STDDEV%2CTEMP_O2_RBR_MEAN%2CTEMP_O2_RBR_STDDEV%2CO2_AANDERAA_CONC_UNCOR_MEAN%2CO2_AANDERAA_CONC_UNCOR_STDDEV%2CO2_AANDERAA_SAT_MEAN%2CO2_AANDERAA_SAT_STDDEV%2CTEMP_O2_AANDERAA_MEAN%2CTEMP_O2_AANDERAA_STDDEV%2CCHLOR_MEAN%2CCHLOR_STDDEV%2Ctime%2Cwind_speed%2Cwind_dir%2Cdist_partner%2CBARO_PRES_serial_number%2CAT_RH_serial_number%2CPAR_serial_number%2CTEMP_IR_serial_number%2CWIND_serial_number%2CRBR_serial_number%2CAANDERAA_serial_number&time%3E=2018-11-20T00%3A00%3A00Z&time%3C=2018-11-27T19%3A59%3A00Z'
#url = 'F:/data/cruise_data/saildrone/noaa_erddap/saildrone_west_coast_survey_2018_2506_7567_f05c.nc'
#server = "https://ferret.pmel.noaa.gov/pmel/erddap"
#e = ERDDAP(server=server, protocol="tabledap")
#e.dataset_id = "saildrone_arctic_data"
#url = e.get_download_url(response="opendap",)
#print(url)
#url = 'F:/data/cruise_data/saildrone/noaa_erddap/saildrone_arctic_data_6c6a_108d_ff70.nc'
#ds_usv = xr.open_dataset(url)
#ds_usv.close()
#ds_usv = ds_usv.isel(trajectory=0).swap_dims({'obs':'time'}).rename({'longitude':'lon','latitude':'lat','SAL_MEAN':'salinity'})
#ds_usv
from netCDF4 import Dataset
nc = Dataset(url)
# -
lat = nc['s.latitude'][:]
lon = nc['s.latitude'][:]
traj = nc['s.trajectory'][:]
sal = nc['s.SAL_MEAN'][:]
traj
plt.scatter(ds_usv.longitude,ds_usv.latitude,c=ds_usv.TEMP_AIR_MEAN)
# # The first issue is that all the variables are s.name
# #### I think this is something to do with how the netcdf file is read but I've never seen this before, ever, and I read a lot of different files. So, just so I can try to look at the data, I'll create some new variables without the '.' in them and put some data in there to look at
ds_usv['lat']=ds_usv['s.latitude']
ds_usv['lon']=ds_usv['s.longitude']
ds_usv['time']=ds_usv['s.time']
ds_usv['trajectory']=ds_usv['s.trajectory']
ds_usv['SAL_MEAN']=ds_usv['s.SAL_MEAN']
ds_usv['TEMP_CTD_MEAN']=ds_usv['s.TEMP_CTD_MEAN']
ds_usv = ds_usv.swap_dims({'s':'time'})
# # when I try to do anything, even just look at the data it gives me a error I've never seen before and suggests that I try to load the data
ds_usv.load()
ds_usv['s.trajectory'].size
ds_usv['s.trajectory'][0:5]
# +
# but only the first 6 have any data, if you try to print out after that
# it gives an error.
#what should be there is either trajectory is another dimension (like other saildrone files)
# or trajectory should have same length as other arrays and just an id in it.
# -
# # used this code to figure out where the data goes bad
#WARNING THIS TAKES A WHILE TO RUN
ilen = ds_usv.SAL_MEAN.size
print(ilen)
tem = np.arange(ilen)*np.nan
for i in range(ilen): #787066):
if ds_usv.SAL_MEAN[i]>20:
tem[i]=ds_usv.SAL_MEAN[i]
tem[i:]=np.nan
print('this is last point it was able to read correction:', i)
ibad = i-1 #776100
# # subset the data & plot it
#there is something bad in the data file above 776100 so subset the data to just the good part
ds_usv2 = ds_usv.isel(time=slice(None,ibad))
xlon = ds_usv2.lon.copy(deep=True)
xlat = ds_usv2.lat.copy(deep=True)
salinity = ds_usv2.SAL_MEAN.copy(deep=True)
ax = plt.axes(projection=ccrs.PlateCarree())
cs1 = ax.scatter(xlon, xlat, s=3.0, c=salinity, edgecolor='none', cmap='jet',vmin=32,vmax=34.35)
ax.coastlines()
x1,x2,y1,y2 = -130,-114,30,52
ax.set_xlim(x1,x2)
ax.set_ylim(y1,y2)
ax.set_xticks(np.arange(x1,x2,4))
ax.set_yticks(np.arange(y1,y2,5))
cax = plt.colorbar(cs1)
cax.set_label('Salinity (psu)')
fig_fname = 'C:/Users/gentemann/Google Drive/f_drive/docs/projects/SSS/figures/wcoast_location2.png'
plt.savefig(fig_fname, transparent=False, format='png')
| Read NOAA Saildrone data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from sklearn.preprocessing import MinMaxScaler
data = np.random.randint(0, 100, (10, 2))
data
scalar_model = MinMaxScaler()
scalar_model.fit(data)
scalar_model.transform(data)
data
result = scalar_model.fit_transform(data)
result
import pandas as pd
data = pd.DataFrame(data=np.random.randint(0,101, (50,4)), columns=['f1', 'f2', 'f3', 'label'])
data
data.head()
x = data[['f1', 'f2', 'f3']]
y = data['label']
x.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x,y, test_size = 0.3)
X_train.shape
X_test.shape
| study_python/tensorflow/workspace/SciKit-learn/01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Parallelization
# When programming custom algorithms in python, it can happen that our code becomes slow because we run a couple of nested for-loops. If the inner loops do not depend on each other, code can be parallelized and sped up. Note, we are parallelizing code on a central processing unit (CPU) not not mix it up with GPU-acceleration that uses graphics processing units (GPUs).
import time
import numpy as np
from functools import partial
import timeit
import matplotlib.pyplot as plt
import platform
# We start with an algorithm that does something with an image at given pixel coordinates
def slow_thing(image, x, y):
# Silly algorithm for wasting compute time
sum = 0
for i in range(1000):
for j in range(100):
sum = sum + x
sum = sum + y
image[x, y] = sum
image = np.zeros((10, 10))
# We now use [timeit](https://docs.python.org/3/library/timeit.html) to measure how long the operation takes for processing a single pixel.
# %timeit slow_thing(image, 4, 5)
# We now define the operation on the whole image
def process_image(image):
for x in range(image.shape[1]):
for y in range(image.shape[1]):
slow_thing(image, x, y)
# We measure the time of this function as mentioned above
# %timeit process_image(image)
# This function is quite slow and parallelization may make sense.
# ## Parallelization using joblib.Parallel
# A simple and straightforward approach for parallelization is using [`joblib.Parallel`](https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html) and `joblib.delayed`.
from joblib import Parallel, delayed, cpu_count
# Note the reverse writing of the for loops in the following block. The term `delayed(slow_thing)(image, x, y)` is technially a function call, that is not executed. Later, when the return value of this call is actually needed, then the actualy execution will happen. See [dask delayed](https://docs.dask.org/en/stable/delayed.html) for details.
def process_image_parallel(image):
Parallel(n_jobs=-1)(delayed(slow_thing)(image, x, y)
for y in range(image.shape[0])
for x in range(image.shape[1]))
# %timeit process_image_parallel(image)
# A speedup of 7 is not bad. The `n_jobs=-1` implies that all compute units / threads are used. We can also print out how many compute cores were used:
cpu_count()
# For documentation purposes, we can also print out on what kind of CPU that algorithm was executed. This string might be more or less informative depending on what operating system / computer we are executing this notebook.
platform.processor()
# ## Benchmarking execution time
# In image processing, it is very common that execution time of algorithms shows different patterns depending on image size. We will now benchmark the algorithm above and see how it performs on differently sized images.
# To bind a function to benchmark to a given image without executing it, we are using the [partial](https://docs.python.org/3/library/functools.html#functools.partial) pattern.
def benchmark(target_function):
"""
Tests a function on a couple of image sizes and returns times taken for processing.
"""
sizes = np.arange(1, 5) * 10
benchmark_data = []
for size in sizes:
print("Size", size)
# make new data
image = np.zeros((size, size))
# bind target function to given image
partial_function = partial(target_function, image)
# measure execution time
time_in_s = timeit.timeit(partial_function, number=10)
print("time", time_in_s, "s")
# store results
benchmark_data.append([size, time_in_s])
return np.asarray(benchmark_data)
print("Benchmarking normal")
benchmark_data_normal = benchmark(process_image)
print("Benchmarking parallel")
benchmark_data_parallel = benchmark(process_image_parallel)
plt.scatter(benchmark_data_normal[:,0] ** 2, benchmark_data_normal[:,1])
plt.scatter(benchmark_data_parallel[:,0] ** 2, benchmark_data_parallel[:,1])
plt.legend(["normal", "parallel"])
plt.xlabel("Image size in pixels")
plt.ylabel("Compute time in s")
plt.show()
# If we see this pattern, we speak of _linear_ relationship between data size and compute time. Computer scientists use the [O notation](https://en.wikipedia.org/wiki/Big_O_notation) to describe the [complexity](https://en.wikipedia.org/wiki/Computational_complexity) of algorithms. This algorithm has `O(n)` and `n` represents the number of pixels in this case.
#
# Let's take a look at another algorithm.
def silly_sum(image):
# Silly algorithm for wasting compute time
sum = 0
for i in range(image.shape[1]):
for j in range(image.shape[0]):
for k in range(image.shape[0]):
for l in range(image.shape[0]):
sum = sum + image[i,j] - k + l
sum = sum + i
image[i, j] = sum / image.shape[1] / image.shape[0]
benchmark_data_silly_sum = benchmark(silly_sum)
plt.scatter(benchmark_data_silly_sum[:,0] ** 2, benchmark_data_silly_sum[:,1])
plt.legend(["normal"])
plt.xlabel("Image size in pixels")
plt.ylabel("Compute time in s")
plt.show()
# This algorithm is stronger dependent on image size, the plot shows approximately [quadratic](https://en.wikipedia.org/wiki/Time_complexity#Table_of_common_time_complexities) complexity. That means if the data size douples, the compute time multiplies by four. The algorithms O-notation is `O(n^2)`. We could presume that a similar algorithm applied in 3D has cubic complexity, `O(n^3)`. If such algorithms are bottlenecks in your science, parallelization and GPU-acceleration make a lot of sense.
# ## Code optimization using numba
# In case the code we perform is simple and just uses standard python, numpy etc. function, we can use a just-in-time (JIT) compiler, e.g. provided by [numba](https://numba.pydata.org/) to speedup the code.
# +
from numba import jit
@jit
def process_image_compiled(image):
for x in range(image.shape[1]):
for y in range(image.shape[1]):
# Silly algorithm for wasting compute time
sum = 0
for i in range(1000):
for j in range(1000):
sum = sum + x
sum = sum + y
image[x, y] = sum
# -
# %timeit process_image_compiled(image)
# ## Quality assurance
# Note that in this section we only measured conmpute time of algorithms. We did not determine if the differently optimized versions of the algorithms produce the same result. Quality assurance is good scientific practice. The same is relevant in the context of GPU-acceleration and for example described in detail [here](https://arxiv.org/pdf/2008.11799).
# ## Excercise: Parallelization
# To practice parallelization, parallelize the function `silly_sum` shown above and plot its performance in comparison with the non-parallel version.
| docs/03_advanced_python/parallelization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.5.0
# language: julia
# name: julia-0.5
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Basic Introduction to Julia
#
# This quick introduction assumes that you have basic knowledge of some scripting language and provides an example of the Julia syntax. So before we explain anything, let's just treat it like a scripting language, take a head-first dive into Julia, and see what happens.
#
# You'll notice that, given the right syntax, almost everything will "just work". There will be some peculiarities, and these we will be the facts which we will study in much more depth. Usually, these oddies/differences from other scripting languages are "the source of Julia's power".
# + [markdown] slideshow={"slide_type": "slide"}
# ### Problems
#
# Time to start using your noggin. Scattered in this document are problems for you to solve using Julia. Many of the details for solving these problems have been covered, some have not. You may need to use some external resources:
#
# http://docs.julialang.org/en/release-0.5/manual/
#
# https://gitter.im/JuliaLang/julia
#
# Solve as many or as few problems as you can during these times. Please work at your own pace, or with others if that's how you're comfortable!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Documentation and "Hunting"
#
# The main source of information is the [Julia Documentation](http://docs.julialang.org/en/latest/manual/). Julia also provides lots of built-in documentation and ways to find out what's going on. The number of tools for "hunting down what's going on / available" is too numerous to explain in full detail here, so instead this will just touch on what's important. For example, the ? gets you to the documentation for a type, function, etc.
# + slideshow={"slide_type": "slide"}
?copy
# + [markdown] slideshow={"slide_type": "slide"}
# To find out what methods are available, we can use the `methods` function. For example, let's see how `+` is defined:
# + slideshow={"slide_type": "fragment"}
methods(+)
# + [markdown] slideshow={"slide_type": "slide"}
# We can inspect a type by finding its fields with `fieldnames`
# + slideshow={"slide_type": "fragment"}
fieldnames(LinSpace)
# + [markdown] slideshow={"slide_type": "slide"}
# and find out which method was used with the `@which` macro:
# + slideshow={"slide_type": "fragment"}
@which copy([1,2,3])
# + [markdown] slideshow={"slide_type": "fragment"}
# Notice that this gives you a link to the source code where the function is defined.
# + [markdown] slideshow={"slide_type": "slide"}
# Lastly, we can find out what type a variable is with the `typeof` function:
# + slideshow={"slide_type": "fragment"}
a = [1;2;3]
typeof(a)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Array Syntax
#
# The array syntax is similar to MATLAB's conventions.
# + slideshow={"slide_type": "fragment"}
a = Vector{Float64}(5) # Create a length 5 Vector (dimension 1 array) of Float64's
a = [1;2;3;4;5] # Create the column vector [1 2 3 4 5]
a = [1 2 3 4] # Create the row vector [1 2 3 4]
a[3] = 2 # Change the third element of a (using linear indexing) to 2
b = Matrix{Float64}(4,2) # Define a Matrix of Float64's of size (4,2)
c = Array{Float64}(4,5,6,7) # Define a (4,5,6,7) array of Float64's
mat = [1 2 3 4
3 4 5 6
4 4 4 6
3 3 3 3] #Define the matrix inline
mat[1,2] = 4 # Set element (1,2) (row 1, column 2) to 4
mat
# + [markdown] slideshow={"slide_type": "slide"}
# Note that, in the console (called the REPL), you can use `;` to surpress the output. In a script this is done automatically. Note that the "value" of an array is its pointer to the memory location. This means that arrays which are set equal affect the same values:
# + slideshow={"slide_type": "fragment"}
a = [1;3;4]
b = a
b[1] = 10
a
# + [markdown] slideshow={"slide_type": "slide"}
# To set an array equal to the values to another array, use copy
# + slideshow={"slide_type": "fragment"}
a = [1;4;5]
b = copy(a)
b[1] = 10
a
# + [markdown] slideshow={"slide_type": "slide"}
# We can also make an array of a similar size and shape via the function `similar`, or make an array of zeros/ones with `zeros` or `ones` respectively:
# + slideshow={"slide_type": "fragment"}
c = similar(a)
d = zeros(a)
e = ones(a)
println(c); println(d); println(e)
# + [markdown] slideshow={"slide_type": "slide"}
# Note that arrays can be index'd by arrays:
# + slideshow={"slide_type": "fragment"}
a[1:2]
# + [markdown] slideshow={"slide_type": "slide"}
# Arrays can be of any type, specified by the type parameter. One interesting thing is that this means that arrays can be of arrays:
# + slideshow={"slide_type": "fragment"}
a = Vector{Vector{Float64}}(3)
a[1] = [1;2;3]
a[2] = [1;2]
a[3] = [3;4;5]
a
# + [markdown] slideshow={"slide_type": "slide"}
# ---------------------
#
# #### Question 1
#
# Can you explain the following behavior? Julia's community values consistancy of the rules, so all of the behavior is deducible from simple rules. (Hint: I have noted all of the rules involved here).
# + slideshow={"slide_type": "fragment"}
b = a
b[1] = [1;4;5]
a
# + [markdown] slideshow={"slide_type": "slide"}
#
# ----------------------------------------
#
# To fix this, there is a recursive copy function: `deepcopy`
# + slideshow={"slide_type": "fragment"}
b = deepcopy(a)
b[1] = [1;2;3]
a
# + [markdown] slideshow={"slide_type": "slide"}
# For high performance, Julia provides mutating functions. These functions change the input values that are passed in, instead of returning a new value. By convention, mutating functions tend to be defined with a `!` at the end and tend to mutate their first argument. An example of a mutating function in `scale!` which scales an array by a scalar (or array)
# + slideshow={"slide_type": "fragment"}
a = [1;6;8]
scale!(a,2) # a changes
# + [markdown] slideshow={"slide_type": "fragment"}
# The purpose of mutating functions is that they allow one to reduce the number of memory allocations which is crucial for achiving high performance.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Control Flow
#
# Control flow in Julia is pretty standard. You have your basic for and while loops, and your if statements. There's more in the documentation.
# + slideshow={"slide_type": "fragment"}
for i=1:5 #for i goes from 1 to 5
println(i)
end
t = 0
while t<5
println(t)
t+=1 # t = t + 1
end
school = :UCI
if school==:UCI
println("ZotZotZot")
else
println("Not even worth discussing.")
end
# + [markdown] slideshow={"slide_type": "slide"}
# One interesting feature about Julia control flow is that we can write multiple loops in one line:
# + slideshow={"slide_type": "fragment"}
for i=1:2,j=2:4
println(i*j)
end
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problems
#
# Try problems 1-5 in the Basic Problems.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Function Syntax
# + slideshow={"slide_type": "fragment"}
f(x,y) = 2x+y # Create an inline function
# + slideshow={"slide_type": "fragment"}
f(1,2) # Call the function
# + slideshow={"slide_type": "fragment"}
function f(x)
x+2
end # Long form definition
# + [markdown] slideshow={"slide_type": "slide"}
# By default, Julia functions return the last value computed within them.
# + slideshow={"slide_type": "fragment"}
f(2)
# + [markdown] slideshow={"slide_type": "slide"}
# A key feature of Julia is multiple dispatch. Notice here that there is "one function", `f`, with two methods. Methods are the actionable parts of a function. Here, there is one method defined as `(::Any,::Any)` and `(::Any)`, meaning that if you give `f` two values then it will call the first method, and if you give it one value then it will call the second method.
#
# Multiple dispatch works on types. To define a dispatch on a type, use the `::Type` signifier:
# + slideshow={"slide_type": "fragment"}
f(x::Int,y::Int) = 3x+2y
# + [markdown] slideshow={"slide_type": "slide"}
# Julia will dispatch onto the strictest acceptible type signature.
# + slideshow={"slide_type": "fragment"}
f(2,3) # 3x+2y
# + slideshow={"slide_type": "fragment"}
f(2.0,3) # 2x+y since 2.0 is not an Int
# + [markdown] slideshow={"slide_type": "slide"}
# Types in signatures can be parametric. For example, we can define a method for "two values are passed in, both Numbers and having the same type". Note that `<:` means "a subtype of".
# + slideshow={"slide_type": "fragment"}
f{T<:Number}(x::T,y::T) = 4x+10y
# + slideshow={"slide_type": "fragment"}
f(2,3) # 3x+2y since (::Int,::Int) is stricter
# + slideshow={"slide_type": "fragment"}
f(2.0,3.0) # 4x+10y
# + [markdown] slideshow={"slide_type": "slide"}
# Note that type parameterizations can have as many types as possible, and do not need to declare a supertype. For example, we can say that there is an `x` which must be a Number, while `y` and `z` must match types:
# + slideshow={"slide_type": "fragment"}
f{T<:Number,T2}(x::T,y::T2,z::T2) = 5x + 5y + 5z
# + [markdown] slideshow={"slide_type": "fragment"}
# We will go into more depth on multiple dispatch later since this is the core design feature of Julia. The key feature is that Julia functions specialize on the types of their arguments. This means that `f` is a separately compiled function for each method (and for parametric types, each possible method). The first time it is called it will compile.
# + [markdown] slideshow={"slide_type": "slide"}
# -------------------------
#
# #### Question 2
#
# Can you explain these timings?
# + slideshow={"slide_type": "fragment"}
f(x,y,z,w) = x+y+z+w
@time f(1,1,1,1)
@time f(1,1,1,1)
@time f(1,1,1,1)
@time f(1,1,1,1.0)
@time f(1,1,1,1.0)
# + [markdown] slideshow={"slide_type": "fragment"}
#
# -------------------------
# + [markdown] slideshow={"slide_type": "slide"}
# Note that functions can also feature optional arguments:
# + slideshow={"slide_type": "fragment"}
function test_function(x,y;z=0) #z is an optional argument
if z==0
return x+y,x*y #Return a tuple
else
return x*y*z,x+y+z #Return a different tuple
#whitespace is optional
end #End if statement
end #End function definition
# + [markdown] slideshow={"slide_type": "fragment"}
# Here, if z is not specified, then it's 0.
# + slideshow={"slide_type": "fragment"}
x,y = test_function(1,2)
# + slideshow={"slide_type": "fragment"}
x,y = test_function(1,2;z=3)
# + [markdown] slideshow={"slide_type": "subslide"}
# Notice that we also featured multiple return values.
# + slideshow={"slide_type": "fragment"}
println(x); println(y)
# + [markdown] slideshow={"slide_type": "slide"}
# The return type for multiple return values is a Tuple. The syntax for a tuple is `(x,y,z,...)` or inside of functions you can use the shorthand `x,y,z,...` as shown.
#
# Note that functions in Julia are "first-class". This means that functions are just a type themselves. Therefore functions can make functions, you can store functions as variables, pass them as variables, etc. For example:
# + slideshow={"slide_type": "fragment"}
function function_playtime(x) #z is an optional argument
y = 2+x
function test()
2y # y is defined in the previous scope, so it's available here
end
z = test() * test()
return z,test
end #End function definition
z,test = function_playtime(2)
# + slideshow={"slide_type": "fragment"}
test()
# + [markdown] slideshow={"slide_type": "fragment"}
# Notice that `test()` does not get passed in `y` but knows what `y` is. This is due to the function scoping rules: an inner function can know the variables defined in the same scope as the function. This rule is recursive, leading us to the conclusion that the top level scope is global. Yes, that means
# + slideshow={"slide_type": "fragment"}
a = 2
# + [markdown] slideshow={"slide_type": "fragment"}
# defines a global variable. We will go into more detail on this.
# + [markdown] slideshow={"slide_type": "slide"}
# Lastly we show the anonymous function syntax. This allows you to define a function inline.
# + slideshow={"slide_type": "fragment"}
g = (x,y) -> 2x+y
# + [markdown] slideshow={"slide_type": "fragment"}
# Unlike named functions, `g` is simply a function in a variable and can be overwritten at any time:
# + slideshow={"slide_type": "fragment"}
g = (x) -> 2x
# + [markdown] slideshow={"slide_type": "fragment"}
# An anonymous function cannot have more than 1 dispatch. However, as of v0.5, they are compiled and thus do not have any performance disadvantages from named functions.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Type Declaration Syntax
#
# A type is what in many other languages is an "object". If that is a foreign concept, thing of a type as a thing which has named components. A type is the idea for what the thing is, while an instantiation of the type is a specific one. For example, you can think of a car as having an make and a model. So that means a Toyota RAV4 is an instantiation of the car type.
#
# In Julia, we would define the car type as follows:
# + slideshow={"slide_type": "fragment"}
type Car
make
model
end
# + [markdown] slideshow={"slide_type": "fragment"}
# We could then make the instance of a car as follows:
# + slideshow={"slide_type": "fragment"}
mycar = Car("Toyota","Rav4")
# + [markdown] slideshow={"slide_type": "fragment"}
# Here I introduced the string syntax for Julia which uses "..." (like most other languages, I'm glaring at you MATLAB). I can grab the "fields" of my type using the `.` syntax:
# + slideshow={"slide_type": "fragment"}
mycar.make
# + [markdown] slideshow={"slide_type": "slide"}
# To "enhance Julia's performance", one usually likes to make the typing stricter. For example, we can define a WorkshopParticipant (notice the convention for types is capital letters, CamelCase) as having a name and a field. The name will be a string and the field will be a Symbol type, (defined by :Symbol, which we will go into plenty more detail later).
# + slideshow={"slide_type": "fragment"}
type WorkshopParticipant
name::String
field::Symbol
end
tony = WorkshopParticipant("Tony",:physics)
# + [markdown] slideshow={"slide_type": "slide"}
# As with functions, types can be set "parametrically". For example, we can have an StaffMember have a name and a field, but also an age. We can allow this age to be any Number type as follows:
# + slideshow={"slide_type": "fragment"}
type StaffMember{T<:Number}
name::String
field::Symbol
age::T
end
ter = StaffMember("Terry",:football,17)
# + [markdown] slideshow={"slide_type": "slide"}
# The rules for parametric typing is the same as for functions. Note that most of Julia's types, like Float64 and Int, are natively defined in Julia in this manner. This means that there's no limit for user defined types, only your imagination. Indeed, many of Julia's features first start out as a prototyping package before it's ever moved into Base (the Julia library that ships as the Base module in every installation).
#
# Lastly, there exist abstract types. These types cannot be instantiated but are used to build the type hierarchy. You've already seen one abstract type, Number. We can define one for Person using the Abstract keyword
# + slideshow={"slide_type": "fragment"}
abstract Person
# + [markdown] slideshow={"slide_type": "fragment"}
# Then we can set types as a subtype of person
# + slideshow={"slide_type": "fragment"}
type Student <: Person
name
grade
end
# + [markdown] slideshow={"slide_type": "fragment"}
# You can define type heirarchies on abstract types. See the beautiful explanation at: http://docs.julialang.org/en/release-0.5/manual/types/#abstract-types
# + slideshow={"slide_type": "fragment"}
abstract AbstractStudent <: Person
# + [markdown] slideshow={"slide_type": "slide"}
# Another "version" of type is `immutable`. When one uses `immutable`, the fields of the type cannot be changed. However, Julia will automatically stack allocate immutable types, whereas standard types are heap allocated. If this is unfamiliar terminology, then think of this as meaning that immutable types are able to be stored closer to the CPU and have less cost for memory access (this is a detail not present in many scripting languages). Many things like Julia's built-in Number types are defined as `immutable` in order to give good performance.
# + slideshow={"slide_type": "fragment"}
immutable Field
name
school
end
ds = Field(:DataScience,[:PhysicalScience;:ComputerScience])
# + [markdown] slideshow={"slide_type": "slide"}
# ----
# #### Question 3
#
# Can you explain this interesting quirk? Thus Field is immutable, meaning that `ds.name` and `ds.school` cannot be changed:
# + slideshow={"slide_type": "fragment"}
ds.name = :ComputationalStatistics
# + [markdown] slideshow={"slide_type": "fragment"}
# However, the following is allowed:
# + slideshow={"slide_type": "fragment"}
push!(ds.school,:BiologicalScience)
ds.school
# + [markdown] slideshow={"slide_type": "fragment"}
# (Hint: recall that an array is not the values itself, but a pointer to the memory of the values)
#
# -----------------------
# + [markdown] slideshow={"slide_type": "slide"}
# One important detail in Julia is that everything is a type (and every piece of code is an Expression type, more on this later). Thus functions are also types, which we can access the fields of. Not only is everything compiled down to C, but all of the "C parts" are always accessible. For example, we can, if we so choose, get a function pointer:
# + slideshow={"slide_type": "fragment"}
foo(x) = 2x
first(methods(foo)).lambda_template.fptr
# + [markdown] slideshow={"slide_type": "slide"}
# ------------------------------
#
# ## Problems
#
# Try the new few problems in the Basic Problems
# + [markdown] slideshow={"slide_type": "slide"}
# ## Some Basic Types
#
# Julia provides many basic types. Indeed, you will come to know Julia as a system of multiple dispatch on types, meaning that the interaction of types with functions is core to the design.
#
# ### Lazy Iterator Types
#
# While MATLAB or Python has easy functions for building arrays, Julia tends to side-step the actual "array" part with specially made types. One such example are ranges. To define a range, use the `start:stepsize:end` syntax. For example:
# + slideshow={"slide_type": "fragment"}
a = 1:5
println(a)
b = 1:2:10
println(b)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can use them like any array. For example:
# + slideshow={"slide_type": "fragment"}
println(a[2]); println(b[3])
# + [markdown] slideshow={"slide_type": "slide"}
# But what is `b`?
# + slideshow={"slide_type": "fragment"}
println(typeof(b))
# + [markdown] slideshow={"slide_type": "fragment"}
# `b` isn't an array, it's a StepRange. A StepRange has the ability to act like an array using its fields:
# + slideshow={"slide_type": "fragment"}
fieldnames(StepRange)
# + [markdown] slideshow={"slide_type": "slide"}
# Note that at any time we can get the array from these kinds of type via the `collect` function:
# + slideshow={"slide_type": "fragment"}
c = collect(a)
# + [markdown] slideshow={"slide_type": "slide"}
# The reason why lazy iterator types are preferred is that they do not do the computations until it's absolutely necessary, and they take up much less space. We can check this with `@time`:
# + slideshow={"slide_type": "slide"}
@time a = 1:100000
@time a = 1:100
@time b = collect(1:100000);
# + [markdown] slideshow={"slide_type": "fragment"}
# Notice that the amount of time the range takes is much shorter. This is mostly because there is a lot less memory allocation needed: only a `StepRange` is built, and all that holds is the three numbers. However, `b` has to hold `100000` numbers, leading to the huge difference.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Dictionaries
#
# Another common type is the Dictionary. It allows you to access (key,value) pairs in a named manner. For example:
# + slideshow={"slide_type": "fragment"}
d = Dict(:test=>2,"silly"=>:suit)
println(d[:test])
println(d["silly"])
# + [markdown] slideshow={"slide_type": "slide"}
# ### Tuples
#
# Tuples are immutable arrays. That means they can't be changed. However, they are super fast. They are made with the `(x,y,z,...)` syntax and are the standard return type of functions which return more than one object.
# + slideshow={"slide_type": "fragment"}
tup = (2.,3) # Don't have to match types
x,y = (3.0,"hi") # Can separate a tuple to multiple variables
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problems
#
# Try problems 8-11 in the Basic Problems
# + [markdown] slideshow={"slide_type": "slide"}
# ## Metaprogramming
#
# Metaprogramming is a huge feature of Julia. The key idea is that every statement in Julia is of the type `Expression`. Julia operators by building an Abstract Syntax Tree (AST) from the Expressions. You've already been exposed to this a little bit: a `Symbol` (like `:PhysicalSciences` is not a string because it is part of the AST, and thus is part of the parsing/expression structure. One interesting thing is that symbol comparisons are O(1) while string comparisons, like always, are O(n)) is part of this, and macros (the weird functions with an `@`) are functions on expressions.
#
# Thus you can think of metaprogramming as "code which takes in code and outputs code". One basic example is the `@time` macro:
# + slideshow={"slide_type": "fragment"}
macro my_time(ex)
return quote
local t0 = time()
local val = $ex
local t1 = time()
println("elapsed time: ", t1-t0, " seconds")
val
end
end
# + [markdown] slideshow={"slide_type": "slide"}
# This takes in an expression `ex`, gets the time before and after evaluation, and prints the elapsed time between (the real time macro also calculates the allocations as seen earlier). Note that `$ex` "interpolates" the expression into the macro. Going into detail on metaprogramming is a large step from standard scripting and will be a later session.
#
# Why macros? One reason is because it lets you define any syntax you want. Since it operates on the expressions themselves, as long as you know how to parse the expression into working code, you can "choose any syntax" to be your syntax. A case study will be shown later. Another reason is because these are done at "parse time" and those are only called once (before the function compilation).
# + [markdown] slideshow={"slide_type": "slide"}
# ## Steps for Julia Parsing and Execution
#
# 1. The AST after parsing <- Macros
# 2. The AST after lowering (@code_typed)
# 3. The AST after type inference and optimization <- Generated Functions (@code_lowered)
# 4. The LLVM IR <- Functions (@code_llvm)
# 5. The assembly code (@code_native)
| Notebooks/BasicIntroduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Double 7's (Short Term Trading Strategies that Work)
#
# 1. The SPY is above its 200-day moving average
# 2. The SPY closes at a X-day low, buy some shares
# If it set further lows, buy some more
# 3. If the SPY closes at a X-day high, sell some.
# If it sets further highs, sell some more, etc...
#
# (Scaling in and out)
# +
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from talib.abstract import *
import pinkfish as pf
import strategy
# format price data
pd.options.display.float_format = '{:0.2f}'.format
# %matplotlib inline
# -
# set size of inline plots
'''note: rcParams can't be in same cell as import matplotlib
or %matplotlib inline
%matplotlib notebook: will lead to interactive plots embedded within
the notebook, you can zoom and resize the figure
%matplotlib inline: only draw static images in the notebook
'''
plt.rcParams["figure.figsize"] = (10, 7)
# Some global data
#symbol = '^GSPC'
symbol = 'SPY'
#symbol = 'DIA'
#symbol = 'QQQ'
#symbol = 'IWM'
#symbol = 'TLT'
#symbol = 'GLD'
#symbol = 'AAPL'
#symbol = 'BBRY'
#symbol = 'GDX'
capital = 10000
start = datetime.datetime(1900, 1, 1)
end = datetime.datetime.now()
# Define high low trade periods
period = 7
# Define max number of positions to scale into
max_positions = 2
# Define the margin multiple
margin = 2
# Run Strategy
s = strategy.Strategy(symbol, capital, start, end, stop_loss_pct=85, margin=margin,
period=period, max_positions=max_positions)
s.run()
# Retrieve log DataFrames
tlog, dbal = s.get_logs()
stats = s.get_stats()
tlog.tail()
dbal.tail()
# Generate strategy stats - display all available stats
pf.print_full(stats)
# Equity curve
# Run Benchmark, Retrieve benchmark logs, and Generate benchmark stats
benchmark = pf.Benchmark(symbol, capital, s.start, s.end)
benchmark.run()
# Plot Equity Curves: Strategy vs Benchmark
pf.plot_equity_curve(dbal, benchmark=benchmark.dbal)
# Plot Trades
pf.plot_trades(dbal, benchmark=benchmark.dbal)
# Bar Graph: Strategy vs Benchmark
df = pf.plot_bar_graph(stats, benchmark.stats)
df
| examples/scaling-in-out/strategy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Demonstration of basic PET capabilities with SIRF: IO and projections
# This demonstration shows how to read images and data, display them. It then
# illustrates how to use an AcquisitionModel to forward and backproject.
#
# This demo is a jupyter notebook, i.e. intended to be run step by step.
# You could export it as a Python file and run it one go, but that might
# make little sense as the figures are not labelled.
# Author: <NAME>
# First version: 8th of September 2016
# Second Version: 17th of May 2018
#
# CCP PETMR Synergistic Image Reconstruction Framework (SIRF).
# Copyright 2015 - 2017 Rutherford Appleton Laboratory STFC.
# Copyright 2015 - 2018 University College London.
#
# This is software developed for the Collaborative Computational
# Project in Positron Emission Tomography and Magnetic Resonance imaging
# (http://www.ccppetmr.ac.uk/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # Initial set-up
#%% make sure figures appears inline and animations works
# %matplotlib notebook
#%% Initial imports etc
import numpy
from numpy.linalg import norm
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import os
import sys
import shutil
#%% Use the 'pet' prefix for all STIR-based SIRF functions
# This is done here to explicitly differentiate between SIRF pet functions and
# anything else.
import sirf.STIR as pet
from sirf.Utilities import examples_data_path
# +
#%% First define some handy function definitions
# To make subsequent code cleaner, we have a few functions here. You can ignore
# ignore them when you first see this demo.
# They have (minimal) documentation using Python docstrings such that you
# can do for instance "help(imshow)"
#
# First a function to display an image
def imshow(image, limits, title=''):
"""Display an image with a colourbar, returning the plot handle.
Arguments:
image -- a 2D array of numbers
limits -- colourscale limits as [min,max]. An empty [] uses the full range
title -- a string for the title of the plot (default "")
"""
plt.title(title)
bitmap=plt.imshow(image)
if len(limits)==0:
limits=[image.min(),image.max()]
plt.clim(limits[0], limits[1])
plt.colorbar(shrink=.6)
plt.axis('off');
return bitmap
def make_positive(image_array):
"""Truncate any negatives in an ndarray to zero."""
image_array[image_array<0] = 0
return image_array
def make_cylindrical_FOV(image):
"""Truncate a pet image to a cylindrical FOV."""
filter = pet.TruncateToCylinderProcessor()
filter.apply(image)
# -
#%% Go to directory with input files
# Adapt this path to your situation (or start everything in the relevant directory)
os.chdir(examples_data_path('PET'))
#%% Copy files to a working folder and change directory to where these files are.
# We do this to avoid cluttering your SIRF files. This way, you can delete
# working_folder and start from scratch.
shutil.rmtree('working_folder/brain',True)
shutil.copytree('brain','working_folder/brain')
os.chdir('working_folder/brain')
# OK. finally done with initial set-up...
# # Basic image manipulations
# Images (like most other things in SIRF) are represented as *objects*, in this case of type `ImageData`.
# In practice, this means that you can only manipulate its data via *methods*.
#
# Image objects contain the actual voxel values, but also information on the number of voxels,
# voxel size, etc. There are methods to get this information.
#
# There are additional methods for other manipulations, such as basic image arithmetic (e.g.,
# you can add image objects).
#%% Read in images
# Here we will read some images provided with the demo using the ImageData class.
# These are in Interfile format. (A text header pointing to a .v file with the binary data).
image = pet.ImageData('emission.hv')
mu_map = pet.ImageData('attenuation.hv')
#%% What is an ImageData?
# Images are represented by objects with several methods. The most important method
# is as_array() which we'll use below.
# Let's see what all the methods are.
help(pet.ImageData)
#%% Use as_array to extract an array of voxel values
# The resulting array as a `numpy` array, as standard in Python.
image_array=image.as_array()
# We can use the standard `numpy` methods on this array, such as getting its `shape` (i.e. dimensions).
print(image_array.shape)
# Whenever we want to do something with the image-values, we have to do it via this array.
# Let's print a voxel-value.
print(image_array[10,40,60])
#%% Manipulate the image data for illustration
# Multiply the data with a factor
image_array *= 0.01
# Stick this new data into the original image object.
# (This will not modify the file content, only the variable in memory.)
image.fill(image_array)
print(image_array[10,40,60])
#%% You can do basic math manipulations with ImageData objects
# So the above lines can be done directly on the `image` object
image *= 0.01
# Let's check
image_array=image.as_array()
print(image_array[10,40,60])
# +
#%% Display the middle slice of the image (which is really a 3D volume)
# We will use our own imshow function (which was defined above) for brevity.
# Get the middle slice number
slice_num = image_array.shape[0]//2
# Create a new figure
plt.figure()
# Display the slice
imshow(image_array[slice_num,:,:,], [], 'emission image');
# -
#%% Some other things to do with ImageData objects
print(image.voxel_sizes())
another_image=image.clone()
an_image_with_fixed_values = image.get_uniform_copy(5)
# # Forward and back projection
# Now we will do some PET projections!
# SIRF uses AcquisitionModel as the object to do forward and back-projections.
# We will create an AcquisitionModel object and then use it to forward project
# our image etc.
#%% Create a SIRF acquisition model
# We will use the ray-tracing matrix here as our simple PET model.
# There is more to the accquisition model, but that's for another demo.
am = pet.AcquisitionModelUsingRayTracingMatrix()
# Ask STIR to use 5 LORs per sinogram-element
am.set_num_tangential_LORs(5);
#%% Specify sinogram dimensions
# We need to say what scanner to use, what dimensions etc.
# You do this by using existing PET data as a 'template'.
# Here, we read a file supplied with the demo as an AcquisitionData object
templ = pet.AcquisitionData('template_sinogram.hs')
# Now set-up our acquisition model with all information that it needs about the data and image.
am.set_up(templ,image);
# The `AcquisitionModel` is now ready for use.
#%% Do a forward projection of our image
# 'forward projection' is the terminology used in PET to simulate the acquisition.
# Input is a SIRF ImageData object (not image_array), output is an AcquisitionData object.
acquired_data=am.forward(image)
#%% Check what methods an AcquisitionData object has
help(acquired_data)
#%% Let's get the Python array
acquisition_array = acquired_data.as_array()
print(acquisition_array.shape)
#%% Display bitmap of the middle sinogram
# AcquisitionData are organised by sinograms, so we need to use the first index
# of the accquisition_array.
plt.figure()
sino_num = acquisition_array.shape[1]//2
imshow(acquisition_array[0,sino_num,:,:,], [], 'Forward projection');
#%% Display some different 'views' in a movie
# If the animation doesn't work, you might have to change your "backend",
# e.g. using the %matplotlib command.
bitmaps=[]
fig=plt.figure()
# views are the second index in the data
num_views=acquisition_array.shape[2]
# first construct all the plots
for view in range(0,num_views,4):
bitmap=plt.imshow(acquisition_array[0,:,view,:,])
plt.clim(0,acquisition_array.max())
plt.axis('off')
bitmaps.append([bitmap])
# Display as animation
ani = animation.ArtistAnimation(fig, bitmaps, interval=100, blit=True, repeat_delay=1000);
#%% Let's do a back-projection
# Backprojection uses the transpose of the forward-projection matrix to
# go from AcquisitionData to an ImageData
backprojected = am.backward(acquired_data)
# let's display a slice
plt.figure()
backprojected_array=backprojected.as_array();
imshow(backprojected_array[slice_num,:,:],[], 'backprojection');
| notebooks/PET/display_and_projection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import division
import numpy as np
import sys
import matplotlib.pyplot as plt
import seaborn.apionly as sns
from composition.analysis.load_sim import load_sim
import composition.analysis.plotting_functions as plotting
from composition.analysis.effective_area import get_effective_area
# %matplotlib inline
# -
sns.set_palette('Paired', 10)
sns.set_color_codes()
# Import ShowerLLH sim reconstructions and cuts to be made
df, cut_dict = load_sim(return_cut_dict=True)
standard_cut_keys = ['reco_exists', 'reco_zenith', 'min_hits', 'IceTopMaxSignalInEdge',
'IceTopMaxSignal', 'IceTopNeighbourMaxSignal', 'StationDensity',
'reco_containment']
selection_mask = np.array([True] * len(df))
for key in standard_cut_keys:
selection_mask *= cut_dict[key]
eff_area, eff_area_error, energy_midpoints = get_effective_area(df, selection_mask)
fig, ax = plt.subplots()
ax.errorbar(energy_midpoints, eff_area, yerr=eff_area_error, marker='.')
ax.grid()
ax.set_xscale('log')
ax.set_ylabel('$\mathrm{Effective \ Area} \ [\mathrm{m^2}]$')
ax.set_xlabel('$\mathrm{E_{MC}}/\mathrm{GeV}$')
ax.axvline(10**6.2, marker='None', linestyle='-.')
plt.show()
standard_cut_keys = ['reco_exists', 'reco_zenith', 'min_hits', 'IceTopMaxSignalInEdge', 'IceTopMaxSignal', 'IceTopNeighbourMaxSignal',
'StationDensity', 'reco_containment', 'min_energy']
labels = ['Reco success', 'Reco zenith', 'NChannel/NStation', 'IceTopMaxSignalInEdge', 'IceTopMaxSignal', 'IceTopNeighbourMaxSignal',
'StationDensity', 'Reco containment', '$\log_{10}(E/GeV) > 6.2$']
# Import ShowerLLH sim reconstructions and cuts to be made
df, cut_dict = load_sim(return_cut_dict=True)
selection_mask = np.array([True]*len(df))
fig, ax = plt.subplots()
eff_area, eff_area_error, energy_midpoints = get_effective_area(df, selection_mask)
ax.errorbar(energy_midpoints, eff_area, yerr=eff_area_error, marker='.', label='Coincident')
for cut_key, label in zip(standard_cut_keys, labels):
selection_mask = selection_mask & cut_dict[cut_key]
eff_area, eff_area_error, energy_midpoints = get_effective_area(df, selection_mask)
ax.errorbar(energy_midpoints, eff_area, yerr=eff_area_error, marker='.', label='+ {}'.format(label))
# ax.legend()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2,
borderaxespad=0.)
ax.grid()
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('$\mathrm{Effective \ Area} \ [\mathrm{m^2}]$')
ax.set_xlabel('$\mathrm{E_{MC}}/\mathrm{GeV}$')
ax.axvline(10**6.2, marker='None', linestyle='-.')
ax.set_ylim([0,1e6])
plt.show()
| analysis/qualitycuts/effective-area.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (ICTK)
# language: python
# name: ictk
# ---
# ## Week 4 Activity
# <br>BGroup 5
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_excel('./datasets/Telco-Customer-Churn.xlsx')
display(data.head)
display(data.dtypes)
# ## 1) Compare churn count with respect to gender.
gender_churn = data.groupby(['gender','Churn']).size().unstack()
display(gender_churn)
gender_churn.plot(kind='barh', stacked=True, figsize=[16,6])
plt.grid(True)
# ### 2) Findout how many female senior citizen there in the dataset
sen_citi = data[data['SeniorCitizen']==1].groupby(['gender'], as_index=False).agg({'SeniorCitizen':'count'})
display(sen_citi)
sns.barplot(x= 'gender',y = 'SeniorCitizen',data = sen_citi)
plt.yticks(np.arange(0,600,50))
plt.grid(True)
# ### 3) Compare 'tenure' with 'Total Charges'
data.drop(data[data['TotalCharges']==' '].index, inplace = True)
data['TotalCharges']= data['TotalCharges'].astype('float')
plt.scatter(x = data['TotalCharges'],y =data['tenure'])
# ### 4) Findout which contract preffered by the senior citizen.
sen_citi = data[data['SeniorCitizen']==1].groupby(['Contract'], as_index=False).agg({'SeniorCitizen':'count'})
display(sen_citi)
sns.barplot(x= 'Contract',y = 'SeniorCitizen',data = sen_citi)
plt.grid(True)
# ### 5) Comment your finds on Payment Method?
plt.figure(figsize=(20,10))
plt.hist(x='PaymentMethod', data = data)
| notebooks/act-week4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.0 64-bit
# metadata:
# interpreter:
# hash: cf85b414d3663472de89104473c842eaab37d7b845999caf56a47ccda76ea2f8
# name: python3
# ---
# ### 1.
#
# Crear una función que reciba cuatro parámetros: una lista 'L', un número 'T', un número 'V' y un booleano 'S'. La función debe devolver otras dos listas: "lista_train_val" y "lista_test".
#
# - "lista_train_val" es una lista con dos elementos. Cada elemento es una lista: "list_train", que representa el conjunto de entrenamiento y "list_val", que representa el conjunto de validación.
# - "list_test" es el conjunto de test. Como mínimo ha de tener 1 elemento (si 'L' > 1).
# - "L" es cualquier lista de elementos. Al principio, entra con un tamaño completo (100%). Esta es la lista que se partirá y se generarán los conjuntos entrenamiento, de validación y de test.
# - "T" es un número entre 1 y 100 que va a representar el tamaño, en porcentaje, del conjunto de test "list_test".
# - "V" es un número entre 0 y 100 que va a representar el tamaño, en porcentaje, del conjunto de validación "list_val". Su valor por defecto es 0.
# - "S", que por defecto es False, determinará si los conjuntos "list_train", "list_test" y "list_val" están aleatoriamente ordenados. ¿Encuentras alguna posible incoherencia si se reordenan aleatoriamente?
#
# Prueba la función con una lista de 100 elementos números aleatorios del 0 al 100.
#
# ### 2.
#
# Rehacer el ejercicio anterior en otra función para que no devuelva dos listas, sino los tres conjuntos:
#
# --> return list_train, list_val, list_test
import random
def train_test_lists(L:list, T:int, V:int = 0, S:bool = False):
lista_train_val = []
list_test = []
long_train = int(len(L) - ((len(L) * T) /100))
if long_train < 1:
long_train = 1
if S:
rand_samp = random.sample(range(0, len(L)), long_train)
train_sample = [L[x] for x in rand_samp]
test_index = set(range(len(L))) - set(rand_samp)
list_test = [L[x] for x in test_index]
long_val = int(len(train_sample) - (len(train_sample) * V /100))
list_train = train_sample[:long_val]
list_val = train_sample[long_val:]
else:
train_sample = L[:long_train]
list_test = L[long_train:]
long_val = int(len(train_sample) - (len(train_sample) * V /100))
list_train = train_sample[:long_val]
list_val = train_sample[long_val:]
lista_train_val.append(list_train)
lista_train_val.append(list_val)
return lista_train_val, list_test
# Si se reordenan las listas independientemente de la variable independiente los elementos de la independiente no coincidirán con los correspondientes del target
x = random.sample(range(0, 100), 20)
x
train_val, test = train_test_lists(L=x, T=20, V=10, S=True)
print(train_val, 'train len -->', len(train_val[0]), '\n,', 'val len -->', len(train_val[1]))
print(test, 'test len -->', len(test))
train_val, test = train_test_lists(L=x, T=20, V=10)
print(train_val, 'train len -->', len(train_val[0]), '\n,', 'val len -->', len(train_val[1]))
print(test, 'test len -->', len(test))
def train_test_val_lists(L:list, T:int, V:int = 0, S:bool = False)
list_test = []
long_train = int(len(L) - ((len(L) * T) /100))
if long_train < 1:
long_train = 1
if S:
rand_samp = random.sample(range(0, len(L)), long_train)
train_sample = [L[x] for x in rand_samp]
test_index = set(range(len(L))) - set(rand_samp)
list_test = [L[x] for x in test_index]
long_val = int(len(train_sample) - (len(train_sample) * V /100))
list_train = train_sample[:long_val]
list_val = train_sample[long_val:]
else:
train_sample = L[:long_train]
list_test = L[long_train:]
long_val = int(len(train_sample) - (len(train_sample) * V /100))
list_train = train_sample[:long_val]
list_val = train_sample[long_val:]
return list_train, list_test, list_val
train, test, val = train_test_val_lists(L=x, T=20, V=10, S=True)
print(train, 'train len -->', len(train))
print(val, 'val len -->', len(val))
print(test, 'test len -->', len(test))
train, test, val = train_test_val_lists(L=x, T=20, V=10)
print(train, 'train len -->', len(train))
print(val, 'val len -->', len(val))
print(test, 'test len -->', len(test))
| 2_Ejercicios/Modulo2/1.Linear_Regression/train_validation_test_set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Le jeu de cartes
#
# Écrivez un programme qui propose à un utilisateur de penser à une valeur issue d’un jeu de 32 cartes, puis qui devine en trois coups maximum son tirage.
# Votre code ici.
print("Pensez à une carte issue d'un jeu à 32 cartes. Les figures : Valet, Dame, Roi, As. Les cartes à valeurs numériques : 7, 8, 9 et 10.")
test = input("Votre carte représente-t-elle une figure ? (o/n)\n")
# C'est une figure
if test == "o":
test = input("La figure est-elle masculine ? (o/n)\n")
# Figure masculine
if test == "o":
test = input("Est-ce la valet ? (o/n)\n")
if test == "o":
print("La carte à laquelle vous avez pensé est le Valet.")
else:
print("La carte à laquelle vous avez pensé est le Roi.")
# Figure non masculine
else:
test = input("Est-ce la Dame ? (o/n)\n")
if test == "o":
print("La carte à laquelle vous avez pensé est la Dame.")
else:
print("La carte à laquelle vous avez pensé est l'As.")
# Ce n'est pas une figure (7,8,9 ou 10)
else:
test = input("Est-ce une valeur paire ? (o/n)\n")
if test == "o":
test = input("Est-ce le 8 ? (o/n)\n")
if test == "o":
print("La carte à laquelle vous avez pensé est le 8.")
else:
print("La carte à laquelle vous avez pensé est le 10.")
else:
test = input("Est-ce le 7 ? (o/n)\n")
if test == "o":
print("La carte à laquelle vous avez pensé est le 7.")
else:
print("La carte à laquelle vous avez pensé est le 9.")
| 3.control-flow/answers/2.the-card-game.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--NOTEBOOK_HEADER-->
# *This notebook contains material from [nbpages](https://jckantor.github.io/nbpages) by <NAME> (jeff at nd.edu). The text is released under the
# [CC-BY-NC-ND-4.0 license](https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode).
# The code is released under the [MIT license](https://opensource.org/licenses/MIT).*
# <!--NAVIGATION-->
# < [1.2 Notebook Style](https://jckantor.github.io/nbpages/01.02-Notebook-Style.html) | [Contents](toc.html) | [Tag Index](tag_index.html) | [1.4 Coding Style](https://jckantor.github.io/nbpages/01.04-Coding.html) ><p><a href="https://colab.research.google.com/github/jckantor/nbpages/blob/master/docs/01.03-External-Files.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/nbpages/01.03-External-Files.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
# + [markdown] nbpages={"level": 1, "link": "[1.3 External Files](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3-External-Files)", "section": "1.3 External Files"}
# # 1.3 External Files
# + [markdown] nbpages={"level": 1, "link": "[1.3 External Files](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3-External-Files)", "section": "1.3 External Files"}
# Notebooks often incorporate figures, data, or video. Any external files must be appropriately licensed for reuse. If that isn't possible, then that information should be accessed by linking to a resource showing the user how to properly access that information.
#
# To facilitate use of the notebooks, external resources should be referenced by an external link whenever possible. If the resource must be included within repository, the link should be to the publically accessible repository. This practice enables cross-platform use of notebooks and streamlines the import and use of notebooks by other users.
# + [markdown] nbpages={"level": 2, "link": "[1.3.1 Figures](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3.1-Figures)", "section": "1.3.1 Figures"}
# ## 1.3.1 Figures
#
# Figures included within the repository should located within a common figures directory. This practice enables reuse of figures, and streamlines the editing and maintanence of figures. Figures should be `.png` or `.jpg` format as appropriate, and resized for use with the stndard markdown `![]()` markup. Use of HTML image tags is discouraged and reported as 'lint'.
# + [markdown] nbpages={"level": 2, "link": "[1.3.2 Data](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3.2-Data)", "section": "1.3.2 Data"}
# ## 1.3.2 Data
#
# Data files distributed with the repository should be located within a common data directory.
# + [markdown] nbpages={"level": 2, "link": "[1.3.3 Embedding YouTube video](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3.3-Embedding-YouTube-video)", "section": "1.3.3 Embedding YouTube video"}
# ## 1.3.3 Embedding YouTube video
# + nbpages={"level": 2, "link": "[1.3.3 Embedding YouTube video](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3.3-Embedding-YouTube-video)", "section": "1.3.3 Embedding YouTube video"}
from IPython.display import YouTubeVideo
# Youtube
YouTubeVideo('2eDGHy5iu_M')
# + nbpages={"level": 2, "link": "[1.3.3 Embedding YouTube video](https://jckantor.github.io/nbpages/01.03-External-Files.html#1.3.3-Embedding-YouTube-video)", "section": "1.3.3 Embedding YouTube video"}
# -
# <!--NAVIGATION-->
# < [1.2 Notebook Style](https://jckantor.github.io/nbpages/01.02-Notebook-Style.html) | [Contents](toc.html) | [Tag Index](tag_index.html) | [1.4 Coding Style](https://jckantor.github.io/nbpages/01.04-Coding.html) ><p><a href="https://colab.research.google.com/github/jckantor/nbpages/blob/master/docs/01.03-External-Files.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/nbpages/01.03-External-Files.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
| docs/01.03-External-Files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # %%<NAME>
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as lin
import matplotlib.patches as mpatches
import scipy as sypy
import time
import os.path
from scipy import signal
from scipy import io
from numpy import ndarray
from ipynb.fs.full.cnn import cnnload
from ipynb.fs.full.avgpool import avgpool
from ipynb.fs.full.avgpool import maxpool
from ipynb.fs.full.cnn_training import cnn_training
from ipynb.fs.full.cnn_inference import cnn_inference
[trainlabels,trainimages,testlabels,testimages] = cnnload();
use_previous_training=0
maxtrain=2000; #maximum training images
iter= 10; # maximum iterations
eta=0.01; # learning rate
#
# maxtrain=10000; #maximum training images
# iter= 10; #maximum iterations
# eta=0.01; # learning rate
# maxtrain=60000; #maximum training images
# iter= 30; #maximum iterations
# eta=0.01; #learning rate
# select the pooling
# pool='maxpool';
pool= 'avgpool';
trained_parameter_file ='trained_parameters'+'_maxtrain'+str(maxtrain)+'_iter'+str(iter)+'_eta'+str(eta)+ pool+'.mat';
if(use_previous_training==0):
tstart= time.time()
cnn_training(trainlabels,trainimages,maxtrain,iter,eta,pool,trained_parameter_file);
tfinish= time.time() -tstart
if(os.path.isfile(trained_parameter_file)):
print('training parameters are created');
else:
if(os.path.isfile(trained_parameter_file)):
print('using previously trained parameters');
end
end
tstart2= time.time()
[missimages, misslabels] = cnn_inference(testlabels,testimages,pool,trained_parameter_file);
tfinish2= time.time()-tstart
# -
| problem_sets/hdr_om/run_cnn_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.0
# language: julia
# name: julia-1.5
# ---
# # Test SnpArray Linear Algebra
#
# According to [SnpArray documentation](https://openmendel.github.io/SnpArrays.jl/dev/#Linear-Algebra), there is at least 2 ways one can perform linear algebra on a SnpArray. This notebook tests which method is better and compare them to standard BLAS operations (default 8 BLAS threads).
using Revise
using SnpArrays
using BenchmarkTools
using LinearAlgebra
# +
# load test data (no missing)
const EUR = SnpArray(SnpArrays.datadir("EUR_subset.bed"));
# convert to SnpLinAlg and SnpBitMatrix
const EURsla = SnpLinAlg{Float64}(EUR, model=ADDITIVE_MODEL, center=true, scale=true);
const EURsla_ = SnpLinAlg{Float64}(EUR, model=ADDITIVE_MODEL, center=true, scale=true, impute=false);
const EURbm = SnpBitMatrix{Float64}(EUR, model=ADDITIVE_MODEL, center=true, scale=true);
# -
Threads.nthreads()
# ## Matrix vector multiplication (Xv)
v1 = randn(size(EUR, 1))
v2 = randn(size(EUR, 2))
A = convert(Matrix{Float64}, EUR, model=ADDITIVE_MODEL, center=true, scale=true);
# SnpLinAlg
@benchmark mul!($v1, $EURsla, $v2)
# SnpBitMatrix
@benchmark mul!($v1, $EURbm, $v2)
# BLAS
@benchmark mul!($v1, $A, $v2)
# SnpLinAlg is clearly fastest, even faster than BLAS.
# ## Tranpose matrix vector multiplication (X'v)
# SnpLinAlg
@benchmark mul!($v2, $EURsla', $v1)
# SnpBitMatrix
@benchmark mul!($v2, $EURbm', $v1)
# BLAS
@benchmark mul!($v2, $A', $v1)
# Contrary to [documentation](https://openmendel.github.io/SnpArrays.jl/dev/#Linear-Algebra), both $Ax$ and $A'x$ is faster on `SnpLinAlg`.
# ## Does SnpLinAlg require more memory?
#
# [SnpBitMatrix](https://github.com/OpenMendel/SnpArrays.jl/blob/master/src/linalg_bitmatrix.jl) implementation definitely requires allocating 2 `BitMatrix`s, so memory usage is 2 bits per genotype. However it seems like a [SnpLinAlg](https://github.com/OpenMendel/SnpArrays.jl/blob/master/src/linalg_direct.jl) is instantiated from the original `SnpArray`. Does SnpLinAlg require more memory than just the SnpArray?
@show Base.summarysize(EUR)
@show Base.summarysize(EURsla)
@show Base.summarysize(EURsla_)
@show Base.summarysize(EURbm);
# Seems like SnpLinAlg requires 25% more memory (2.5 bit per entry).
# ## mul! on a @view SnpLinAlg
#
# SnpLinAlg behaves like a regular array, and hence, we can use view on it. Let's test performance on a viewed SnpLinAlg.
# +
EURsla_sub = @view(EURsla[1:2:379, 1:2:54051]); # every other row and col
v1 = randn(size(EURsla, 1))
v2 = randn(size(EURsla_sub, 1))
v3 = randn(size(EURsla, 2))
v4 = randn(size(EURsla_sub, 2));
# -
# Full SnpLinAlg
@benchmark mul!($v1, $EURsla, $v3)
# Viewed SnpLinAlg
@benchmark mul!($v2, $EURsla_sub, $v4)
# ## Multithreaded Matrix vector multiplication (Xv)
#
# SnpLinAlg
Threads.nthreads()
@benchmark mul!($v1, $EURsla, $v2)
| test/linalg_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datasets import *
from run_SS_helpers import *
import numpy as np
np.random.seed(42)
# uspstb
data, label, n_class = uspstb()
partition = [50, 1409, 50, 498]
NN = 15
run_fast(data, label, n_class, partition, NN)
# +
from datasets import *
from run_helpers import *
import numpy as np
np.random.seed(42)
# -
# coil20
data, label, n_class = uspstb()
run_RVFL(data, label, n_class)
run_dRVFL(data, label, n_class)
# +
run_edRVFL(data, label, n_class)
# -
run_BRVFL(data, label, n_class)
run_BdRVFL(data, label, n_class)
run_BedRVFL(data, label, n_class)
run_LapRVFL(data, label, n_class)
run_LapdRVFL(data, label, n_class)
run_LapedRVFL(data, label, n_class)
| .ipynb_checkpoints/main_run_uspstb-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import csv
import re
# +
# names of files to read from
r_maxo_classes_with_definitionsTSV = '~/Git/MAxO/src/ontology/sparql-test/maxo_classes_with_definitions.tsv'
r_ncit_definitionsTSV = '~/Git/MAxO/src/ontology/sparql-test/ncit_definitions.tsv'
tsv_read_maxo = pd.read_csv(r_maxo_classes_with_definitionsTSV, sep='\t')
tsv_read_ncit = pd.read_csv(r_ncit_definitionsTSV, sep='\t')
maxo_id=list()
ncit_id=list()
# -
tsv_read_maxo.columns
# + jupyter={"outputs_hidden": true}
from pandas import DataFrame
x=()
mylist=[]
newlist=list()
#extract class IRI, definition, and xref from maxo file
maxo_id = pd.DataFrame(tsv_read_maxo)
cols = ["?cls","?xref","?def"]
#extract MAXO ID from class IRI
maxo_id = maxo_id[maxo_id.columns[0]]
for line in maxo_id:
line=line.strip('/')
x=re.findall('[A-Z]{4,11}_[A-Z0-9]{1,15}', line)
x=[item.replace('_', ':') for item in x]
mylist.append(x)
maxo_df= DataFrame(mylist,columns=['Maxo_ID'])
#join maxo ID to the file
maxo_id_def= maxo_df.join(tsv_read_maxo, lsuffix="_left", rsuffix="_right")
print(maxo_id_def.head(2))
with open("maxo_xref_definitions.tsv",'wb') as out:
maxo_id_def.to_csv('maxo_xref_definitions.tsv', encoding='utf-8', sep='\t', index=False)
# + jupyter={"outputs_hidden": true}
y=()
newlist=[]
#extract just the ncit_id
ncit_id = pd.DataFrame(tsv_read_ncit)
cols = ["?cls","?def"]
ncit_id = ncit_id[ncit_id.columns[0]]
for line in ncit_id:
line=line.strip('/')
y=re.findall('[A-Z]{4,11}_[A-Z0-9]{1,15}', line)
y=[item.replace('_', ':') for item in y]
newlist.append(y)
ncit_df= DataFrame(newlist,columns=['NCIT_ID'])
ncit_id_def= ncit_df.join(tsv_read_ncit, lsuffix="_left", rsuffix="_right")
print(ncit_id_def.head(2))
with open("ncit_definitions.tsv",'wb') as out:
ncit_id_def.to_csv('ncit_definitions.tsv', encoding='utf-8', sep='\t', index=False)
# -
# ncit_id_def.info
# + jupyter={"outputs_hidden": true, "source_hidden": true}
maxo_id_def.columns = ["Maxo_ID","?cls","ID","?def"]
print(maxo_id_def.head())
# -
maxo_id_list= []
maxo_def_list= []
maxo_def_xref_list= []
ncit_id_list=[]
ncit_def_list= []
#find MAXO definitions ONLY with NCIT xrefs
for index, row in maxo_id_def.iterrows():
if row[2].startswith("NCIT:"):
for index, rows in ncit_id_def.iterrows():
#determine if the the MAXO def xref matches the NCIT ID
if row[2] == rows[0]:
maxo_id_list.append(row[0])
maxo_def_list.append(row[3])
maxo_def_xref_list.append(row[2])
ncit_def_list.append(rows[2])
ncit_id_list.append(rows[0])
else:
continue
# +
maxo_ncit_def_df=pd.DataFrame(list(zip(maxo_id_list, maxo_def_list, maxo_def_xref_list, ncit_def_list, ncit_id_list,)), columns=["maxo_id","maxo_def", "maxo_def_xref", "ncit_def", "ncit_id"])
print(maxo_ncit_def_df.head())
with open("maxo_ncit_def.tsv",'wb') as out:
maxo_ncit_def_df.to_csv('maxo_ncit_def.tsv', encoding='utf-8', sep='\t', index=False)
# -
| src/scripts/maxo_ncit_defs-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example 4: masked 4 with bimodal posterior
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 411, "status": "ok", "timestamp": 1568330149594, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18062987068597777273"}, "user_tz": 420} id="D-Fe5G8m1FTC" outputId="481e97a2-895c-40e1-c3f8-aaac7fb27b5d"
import os
# %pylab inline
import pickle
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 5775, "status": "ok", "timestamp": 1568330157145, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18062987068597777273"}, "user_tz": 420} id="GbM9eAbNvLiP" outputId="8d47cc8a-f1ed-49c5-f02d-c69869c04784"
# ! pip install -q https://github.com/dfm/corner.py/archive/master.zip
import corner
# + colab={} colab_type="code" id="sZkaGpCR1kVS"
PROJECT_PATH = os.path.abspath('../')
print(PROJECT_PATH)
# + colab={} colab_type="code" id="-AEYmOsH1FTI"
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_hub as hub
tfd = tfp.distributions
tfb = tfp.bijectors
from tensorflow.contrib.distributions import softplus_inverse
# + colab={} colab_type="code" id="8puPFE90P0aD"
generator_path = os.path.join(PROJECT_PATH,'modules/decoder1/decoder')
encoder_path = os.path.join(PROJECT_PATH,'modules/encoder1/encoder')
nvp_func_path = os.path.join(PROJECT_PATH,'modules/nvp1/')
minima_path = os.path.join(PROJECT_PATH,'minima/')
plot_path = os.path.join(PROJECT_PATH,'plots/')
# + colab={} colab_type="code" id="nFzYYSxY1FTL"
import gzip, zipfile, tarfile
import os, shutil, re, string, urllib, fnmatch
import pickle as pkl
def _download_mnist_realval(dataset):
"""
Download the MNIST dataset if it is not present.
:return: The train, test and validation set.
"""
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
def _get_datafolder_path():
full_path = os.path.abspath('.')
path = full_path +'/data'
return path
def load_mnist_realval(
dataset=_get_datafolder_path()+'/mnist_real/mnist.pkl.gz'):
'''
Loads the real valued MNIST dataset
:param dataset: path to dataset file
:return: None
'''
if not os.path.isfile(dataset):
datasetfolder = os.path.dirname(dataset)
if not os.path.exists(datasetfolder):
os.makedirs(datasetfolder)
_download_mnist_realval(dataset)
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = pkl.load(f, encoding='latin1')
f.close()
x_train, targets_train = train_set[0], train_set[1]
x_valid, targets_valid = valid_set[0], valid_set[1]
x_test, targets_test = test_set[0], test_set[1]
return x_train, targets_train, x_valid, targets_valid, x_test, targets_test
x_train, targets_train, x_valid, targets_valid, x_test, targets_test = load_mnist_realval()
# + colab={} colab_type="code" id="ft_jIh-W1FTN"
data_dim = 28*28
data_size = 1
sigma_n = 0.1
hidden_size = 10
n_channels = 1
seed = 777
# settings for reconstruction with uncorrupted data
# corr_type = 'none'
# num_mnist = 6
# label = 'uncorrupted'
# noise_level = 0.0
# num_comp = 2
# settings for reconstrcution with rectangular mask
corr_type = 'mask'
num_mnist = 6
label = 'solidmask'
noise_level = 0.0
num_comp = 3
#settings for reconstruction with sparse mask
# corr_type = 'sparse mask'
# num_mnist = 1
# label = 'sparse95'
# noise_level = 0.
# num_comp = 2
# settings for reconstruction with noise
# corr_type = 'noise'
# num_mnist = 6
# label = 'noise05'
# noise_level = 0.5
# num_comp = 4
# settings for reconstruction with noise and mask
# corr_type = 'noise+mask'
# num_mnist = 6
# label = 'masknoise05'
# noise_level = 0.5
# num_comp = 2
# + colab={} colab_type="code" id="GivmkSHyR-jC"
plot_path = os.path.join(plot_path,'%s/'%label)
if not os.path.isdir(plot_path):
os.makedirs(plot_path)
# + colab={} colab_type="code" id="nxcZOE0MLGJ1"
def plot_image(image, save=True, directory='./plots/',filename='plotted_image', title='image',vmin=None,vmax=None, mask=None):
if np.any(mask==None):
mask=np.ones_like(image)
mask = np.reshape(mask,(28,28))
plt.figure()
plt.title(title)
plt.imshow((image).reshape((28,28))*mask,cmap='gray',vmin=vmin, vmax=vmax)
plt.axis('off')
plt.colorbar()
if save:
plt.savefig(directory+filename+'.png',bbox_inches='tight')
plt.show()
return True
def get_custom_noise(shape, signal_dependent=False, signal =None, sigma_low=0.07, sigma_high=0.22, threshold=0.02 ):
sigma = np.ones(shape)*sigma_n
if signal_dependent:
for ii in range(data_size):
sigma[ii][np.where(signal[ii]<=threshold)]= sigma_low
sigma[ii][np.where(signal[ii]>threshold)]= sigma_high
data_noise = np.ones_like(sigma)*noise_level
sigma = np.sqrt(sigma**2+data_noise**2)
return sigma
def make_corrupted_data(x_true, corr_type='mask'):
mask = np.ones((28,28))
if corr_type=='mask':
minx = 10
maxx = 24
mask[0:28,minx:maxx]=0.
mask = mask.reshape((28*28))
corr_data = x_true*[mask]
elif corr_type=='sparse mask':
mask = np.ones(data_dim, dtype=int)
percent = 95
np.random.seed(seed+2)
indices = np.random.choice(np.arange(data_dim), replace=False,size=int(percent/100.*data_dim))
print('precentage masked:', len(indices)/data_dim)
mask[indices] =0
corr_data = x_true*[mask]
elif corr_type=='noise':
np.random.seed(seed+2)
noise = np.random.randn(data_dim*data_size)*noise_level
corr_data = x_true+noise
elif corr_type=='noise+mask':
np.random.seed(seed+2)
noise = np.random.randn(data_dim*data_size)*noise_level
minx = 14
maxx = 28
mask[0:28,minx:maxx]=0.
mask = mask.reshape((28*28))
corr_data = x_true+noise
corr_data = corr_data*[mask]
elif corr_type=='none':
corr_data = x_true
corr_data = np.expand_dims(corr_data,-1)
mask = mask.flatten()
return corr_data, mask
# + colab={} colab_type="code" id="9TIQArTJHE87"
def fwd_pass(generator,nvp,z,mask):
fwd_z = nvp({'z_sample':np.zeros((1,hidden_size)),'sample_size':1, 'u_sample':z},as_dict=True)['fwd_pass']
gen_z = tf.boolean_mask(tf.reshape(generator(fwd_z),[data_size,data_dim,n_channels]),mask, axis=1)
return gen_z
def get_likelihood(generator,nvp,z,sigma,mask):
gen_z = fwd_pass(generator,nvp,z,mask)
sigma = tf.boolean_mask(sigma,mask, axis=1)
likelihood = tfd.Independent(tfd.MultivariateNormalDiag(loc=gen_z,scale_diag=sigma))
return likelihood
def get_prior():
return tfd.MultivariateNormalDiag(tf.zeros([data_size,hidden_size]), scale_identity_multiplier=1.0, name ='prior')
def get_log_posterior(z,x,generator,nvp,sigma,mask, beta):
likelihood = get_likelihood(generator,nvp,z,sigma,mask)
prior = get_prior()
masked_x = tf.boolean_mask(x,mask, axis=1)
log_posterior = prior.log_prob(z)+likelihood.log_prob(masked_x)*beta
return log_posterior
def get_recon(generator,nvp, z,sigma,mask):
prob = get_likelihood(generator,nvp, z,sigma,mask)
recon= prob.mean()
return recon
def get_hessian(func, z):
hess = tf.hessians(func,z)
hess = tf.gather(hess, 0)
return(tf.reduce_sum(hess, axis = 2 ))
def get_GN_hessian(generator,nvp,z,mask,sigma):
gen_z = fwd_pass(generator,nvp,z,mask)
sigma = tf.boolean_mask(sigma,mask, axis=1)
grad_g = tf.gather(tf.gradients(gen_z/(sigma),z),0)
grad_g2 = tf.einsum('ij,ik->ijk',grad_g,grad_g)
one = tf.linalg.eye(hidden_size, batch_shape=[data_size],dtype=tf.float32)
hess_GN = one+grad_g2
return hess_GN
def compute_covariance(hessian):
cov = tf.linalg.inv(hessian)
cov = (cov+tf.linalg.transpose(cov))*0.5
return cov
# + colab={} colab_type="code" id="QGyi6PVWx1qd"
def minimize_posterior(initial_value, x, custom_mask, noise, my_sess, annealing =True):
ini = np.reshape(initial_value,[data_size,hidden_size])
my_sess.run(MAP_reset,feed_dict={input_data: x, MAP_ini:ini, mask:custom_mask,sigma_corr:noise})
pos_def = False
posterior_loss = []
for lrate, numiter in zip([1e-1,1e-2,1e-3],[10000,5000,3000]):
print('lrate', lrate)
for jj in range(numiter):
if annealing and lrate==1e-1:
inv_T= np.round(0.5*np.exp(-(1.-jj/numiter)),decimals=1)
else:
inv_T= 1.
_, ll = my_sess.run([opt_op_MAP,loss_MAP],feed_dict={input_data: x, mask:custom_mask, sigma_corr:noise, lr: lrate, inverse_T:inv_T})
posterior_loss.append(ll)
if jj%1000==0:
print('iter', jj, 'loss', ll,r'inverse T', inv_T)
z_value = my_sess.run(MAP,feed_dict={input_data: x, mask:custom_mask, sigma_corr:noise})
eig = my_sess.run(tf.linalg.eigvalsh(hessian),feed_dict={input_data: x, mask:custom_mask,sigma_corr:noise})
if np.all(eig>0.):
pos_def = True
loss = ll
plt.figure()
plt.plot(posterior_loss)
plt.ylabel('loss')
plt.xlabel('iteration')
plt.show()
return z_value, loss, pos_def
# + colab={} colab_type="code" id="oiAie-wjUcHN"
def get_laplace_sample(num,map_value,x,mymask,noise,my_sess):
my_sess.run(MAP_reset,feed_dict={MAP_ini:map_value})
my_sess.run(update_mu)
my_sess.run(update_TriL,feed_dict={input_data: x, mask: mymask, sigma_corr:noise})
samples=[]
for ii in range(num):
my_sess.run(posterior_sample,feed_dict={input_data: x, sigma_corr:noise})
samples.append(my_sess.run(recon,feed_dict={input_data: x, sigma_corr:noise}))
samples=np.asarray(samples)
return samples
def get_gmm_sample(num,x,mymask,noise,my_sess):
samples=[]
for ii in range(num):
samples.append(my_sess.run(gmm_recon,feed_dict={input_data: x, sigma_corr:noise}))
samples=np.asarray(samples)
return samples
# + colab={} colab_type="code" id="SXhLJToHcp7b"
def plot_samples(samples, mask, title='samples', filename='samples'):
plt.figure()
plt.title(title)
for i in range(min(len(samples),16)):
subplot(4,4,i+1)
imshow(np.reshape(samples[i,:],(28,28)),vmin=-0.2,vmax=1.2, cmap='gray')
axis('off')
plt.savefig(plot_path+filename+'.png',bbox_inches='tight')
plt.show()
if corr_type in ['mask', 'sparse mask', 'noise+mask']:
plt.figure()
plt.title('masked'+title)
for i in range(min(len(samples),16)):
subplot(4,4,i+1)
imshow(np.reshape(samples[i,0,:,0]*mask,(28,28)),vmin=-0.2,vmax=1.2, cmap='gray')
axis('off')
plt.savefig(plot_path+filename+'masked.png',bbox_inches='tight')
plt.show()
# + colab={} colab_type="code" id="LYAt6f7MQSpa"
def get_random_start_values(num, my_sess):
result=[]
for ii in range(num):
result.append(my_sess.run(get_prior().sample()))
return result
# + colab={} colab_type="code" id="BaFIFXBnQ3o-"
def get_chi2(sigma,data,mean,masking=True, mask=None,threshold=0.02):
if masking:
mask = np.reshape(mask,data.shape)
data = data[np.where(mask==1)]
mean = mean[np.where(mask==1)]
sigma= sigma[np.where(mask==1)]
low = min(sigma.flatten())
high= max(sigma.flatten())
chi2_tot = np.sum((data-mean)**2/sigma**2)
dof_tot = len(np.squeeze(data))
if corr_type not in ['noise','noise+mask']:
chi2_low = np.sum((data[np.where(data<=threshold)]-mean[np.where(data<=threshold)])**2/sigma[np.where(data<=threshold)]**2)
dof_low = len(np.squeeze(data[np.where(data<=threshold)]))
chi2_high= np.sum((data[np.where(data>threshold)]-mean[np.where(data>threshold)])**2/sigma[np.where(data>threshold)]**2)
dof_high = len(np.squeeze(data[np.where(data>threshold)]))
else:
chi2_low = None
dof_low = None
chi2_high= None
dof_high = None
return chi2_tot, dof_tot, chi2_low, dof_low, chi2_high, dof_high, masking
# + colab={} colab_type="code" id="yGtEbpIZ2vhx"
def plot_minima(minima, losses, var):
plt.figure()
plt.title('Minimization result')
plt.plot(np.arange(len(losses)),losses,ls='',marker='o')
plt.xlabel('# iteration')
plt.ylabel('loss')
plt.savefig(plot_path+'minimzation_results_%s.png'%(label),bbox_inches='tight')
plt.show()
colors = matplotlib.colors.Normalize(vmin=min(losses), vmax=max(losses))
cmap = matplotlib.cm.get_cmap('Spectral')
var = np.squeeze(var)
plt.figure()
plt.title('value of hidden variables at minima')
for ii in range(len(minima)):
yerr_= np.sqrt(var[ii])
plt.errorbar(np.arange(hidden_size),np.squeeze(minima)[ii], marker='o',ls='', c=cmap(colors(losses[ii])), mew=0, yerr=yerr_, label ='%d'%losses[ii])
plt.legend(ncol=4, loc=(1.01,0))
plt.xlabel('# hidden variable')
plt.ylabel('value')
plt.savefig(plot_path+'hidden_values_at_minima_%s.png'%(label),bbox_inches='tight')
plt.show()
# + colab={} colab_type="code" id="80BR72DX58VQ"
def probe_posterior(minimum, x, noise, mymask, my_sess, filename=label):
_ = my_sess.run(MAP_reset,feed_dict={input_data: x, MAP_ini:minimum, sigma_corr:noise})
_ = my_sess.run(update_mu,feed_dict={input_data: x, mask:mymask, sigma_corr:noise})
_ = my_sess.run(update_TriL,feed_dict={input_data: x, mask:mymask, sigma_corr:noise})
exact_hessian = sess.run(hessian,feed_dict={input_data: x, mask:mymask, sigma_corr:noise})
approx_hessian= sess.run(GN_hessian,feed_dict={input_data: x, mask:mymask, sigma_corr:noise})
ll0 = sess.run(loss_MAP,feed_dict={input_data: x, mask:mymask, sigma_corr:noise})
plt.figure(figsize=(20,5))
for nn in np.arange(hidden_size):
H = exact_hessian[0,nn,nn]
HGN = approx_hessian[0,nn,nn]
losses=[]
subplot(2,5,nn+1)
title('latent space direction %d'%nn)
Delta = 0.1
steps = 1000
delta_z = np.zeros((steps,hidden_size))
delta_z[:,nn] = (np.arange(steps)-steps//2)*Delta/steps
new_ini = delta_z+minimum
for ii in range(steps):
_ = sess.run(MAP_reset,feed_dict={input_data: x, mask:mymask, MAP_ini:np.expand_dims(new_ini[ii],axis=0), sigma_corr:noise})
ll = sess.run(loss_MAP,feed_dict={input_data: x, mask:mymask, sigma_corr:noise})
losses.append(ll)
plt.plot(new_ini[:,nn],ll0+H*delta_z[:,nn]**2,label='estimate from exact Hessian')
#plt.plot(new_ini[:,nn],ll0+HGN*delta_z[:,nn]**2,label='estimate from Gauss-Newton')
plt.plot(new_ini[:,nn],losses,label='probed posterior', lw=2)
plt.xlabel('z')
plt.ylabel('negative log posterior')
plt.ylim(min(losses),min(losses)+1.5)
plt.tight_layout()
plt.legend(loc=(1.02,1.7))
plt.savefig(plot_path+'probing_posterior_%s.png'%(filename),bbox_inches='tight')
plt.show()
# + colab={} colab_type="code" id="qrZDSLzEIKrn"
def get_gmm_parameters(minima, x, noise, mymask, offset):
mu =[]
w =[]
sigma=[]
print(len(minima), num_comp)
for ii in range(num_comp):
# do Laplace approximation around this minimum
mu+=[minima[ii]]
sess.run(MAP_reset,feed_dict={MAP_ini:minima[ii]})
sigma+=[sess.run(update_TriL,feed_dict={input_data: x, sigma_corr:noise, mask: mymask})]
# correct weighting of different minima according to El20 procedure, with samples at the maxima and well seperated maxima
logdet = sess.run(tf.linalg.logdet(approx_posterior_laplace.covariance()),feed_dict={input_data: x, sigma_corr:noise, mask: mymask})
logprob = sess.run(nlPost_MAP,feed_dict={input_data: x, sigma_corr:noise, mask: mymask})
w+=[np.exp(0.5*logdet+logprob+offset)]
print('weights of Gaussian mixtures:', w/np.sum(w))
mu = np.reshape(np.asarray(mu),[1,num_comp,hidden_size])
sigma = np.reshape(np.asarray(sigma),[1,num_comp,hidden_size,hidden_size])
w = np.squeeze(np.asarray(w))
return mu, sigma, w
# + colab={} colab_type="code" id="gQXNNSN7TecV"
def plot_prob_2D_GMM(samples, indices):
samples = samples[:,0,:]
samples = np.hstack((np.expand_dims(samples[:,indices[0]],-1),np.expand_dims(samples[:,indices[1]],-1)))
figure=corner.corner(samples)
axes = np.array(figure.axes).reshape((2, 2))
axes[1,0].set_xlabel('latent space variable %d'%indices[0])
axes[1,0].set_ylabel('latent space variable %d'%indices[1])
plt.savefig(plot_path+'posterior_contour_GMM_%s_latent_space_dir_%d_%d.png'%(label,indices[0],indices[1]),bbox_inches='tight')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 353} colab_type="code" executionInfo={"elapsed": 14338, "status": "ok", "timestamp": 1558592190485, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18062987068597777273"}, "user_tz": 420} id="yvTEYw44O_5q" outputId="31ee8455-c44d-493e-e04f-e1d42abffdf7"
tf.reset_default_graph()
sigma_corr = tf.placeholder_with_default(np.ones([data_size,data_dim,n_channels], dtype='float32')*sigma_n,shape=[data_size,data_dim,n_channels])
mask = tf.placeholder_with_default(np.ones([data_dim], dtype='float32'),shape=[data_dim])
input_data = tf.placeholder(shape=[data_size,data_dim,n_channels], dtype=tf.float32)
inverse_T = tf.placeholder_with_default(1., shape=[])
lr = tf.placeholder_with_default(0.001,shape=[])
encoder = hub.Module(encoder_path, trainable=False)
generator = hub.Module(generator_path, trainable=False)
nvp_funcs = hub.Module(nvp_func_path, trainable=False)
MAP_ini = tf.placeholder_with_default(tf.zeros([data_size,hidden_size]),shape=[data_size,hidden_size])
MAP = tf.Variable(MAP_ini)
MAP_reset = tf.stop_gradient(MAP.assign(MAP_ini))
nlPost_MAP = get_log_posterior(MAP, input_data, generator,nvp_funcs, sigma_corr,mask, inverse_T)
loss_MAP = -tf.reduce_mean(nlPost_MAP)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
opt_op_MAP = optimizer.minimize(loss_MAP, var_list=[MAP])
recon_MAP = get_recon(generator,nvp_funcs, MAP,sigma_corr,mask)
hessian = get_hessian(-nlPost_MAP,MAP)
GN_hessian = get_GN_hessian(generator,nvp_funcs,MAP,mask,sigma_corr)
ini_val = np.ones((data_size,(hidden_size *(hidden_size +1)) // 2),dtype=np.float32)
with tf.variable_scope("Laplace_Posterior",reuse=tf.AUTO_REUSE):
mu_new = tf.Variable(np.ones((data_size,hidden_size),dtype=np.float32), dtype=np.float32)
sigma_new_t = ini_val
sigma_new_t2= tf.Variable(tfd.matrix_diag_transform(tfd.fill_triangular(sigma_new_t), transform=tf.nn.softplus),dtype=tf.float32)
approx_posterior_laplace = tfd.MultivariateNormalTriL(loc=mu_new,scale_tril=sigma_new_t2)
update_mu = mu_new.assign(MAP)
covariance = compute_covariance(hessian)
variance = tf.linalg.diag_part(covariance)[0]
update_TriL = sigma_new_t2.assign(tf.linalg.cholesky(covariance))
posterior_sample = approx_posterior_laplace.sample()
recon = get_recon(generator,nvp_funcs, posterior_sample ,sigma_corr,mask)
ini_val2 = np.ones((data_size,num_comp,(hidden_size *(hidden_size +1)) // 2),dtype=np.float32)
with tf.variable_scope("corrupted/gmm",reuse=tf.AUTO_REUSE):
mu_gmm = tf.Variable(np.ones((data_size,num_comp,hidden_size)), dtype=np.float32)
sigma_gmm = tf.Variable(tfd.fill_triangular(ini_val2))
w_gmm = tf.Variable(np.ones((num_comp))/num_comp, dtype=np.float32)
sigma_gmmt = tfd.matrix_diag_transform(sigma_gmm, transform=tf.nn.softplus)
w_positive = tf.math.softplus(w_gmm)
w_rescaled = tf.squeeze(w_positive/tf.reduce_sum(w_positive))
gmm = tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(probs=w_rescaled),components_distribution=tfd.MultivariateNormalTriL(loc=mu_gmm,scale_tril=sigma_gmmt))
mu_ini = tf.placeholder_with_default(tf.zeros([data_size,num_comp,hidden_size]),shape=[data_size,num_comp,hidden_size])
sigma_ini = tf.placeholder_with_default(tf.ones([data_size,num_comp,hidden_size, hidden_size]),shape=[data_size,num_comp,hidden_size, hidden_size])
w_ini = tf.placeholder_with_default(tf.ones([num_comp])/num_comp,shape=[num_comp])
update_w = tf.stop_gradient(w_gmm.assign(softplus_inverse(w_ini)))
update_mugmm = tf.stop_gradient(mu_gmm.assign(mu_ini))
update_TriLgmm= tf.stop_gradient(sigma_gmm.assign(tfd.matrix_diag_transform(sigma_ini, transform=softplus_inverse)))
gmm_sample = gmm.sample()
gmm_recon = get_recon(generator,nvp_funcs, gmm_sample ,sigma_corr,mask)
# + colab={"base_uri": "https://localhost:8080/", "height": 4721} colab_type="code" executionInfo={"elapsed": 131558, "status": "ok", "timestamp": 1558592307817, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18062987068597777273"}, "user_tz": 420} id="Soh1tnGH1FTW" outputId="48802731-2a32-48fc-fd12-071300e5c583"
if __name__ == "__main__":
sess = tf.Session()
sess.run(tf.global_variables_initializer())
truth = x_test[num_mnist:num_mnist+data_size]
plot_image(truth, directory=plot_path, filename='truth_%s'%label, title='truth')
data, custom_mask = make_corrupted_data(truth, corr_type=corr_type)
plot_image(data, directory=plot_path, filename='input_data_%s'%label, title='data')
plot_image(custom_mask, directory=plot_path, filename='mask_data_%s'%label, title='mask')
noise = get_custom_noise(data.shape, signal_dependent=False, signal=truth)
plot_image(noise, directory=plot_path, filename='noise_%s'%label, title='noise')
tf.random.set_random_seed(seed)
inits = get_random_start_values(10, sess)
try:
minima, min_loss, min_var, recons = pickle.load(open(minima_path+'minima_%s.pkl'%label,'rb'))
except:
minima =[]
min_loss=[]
min_var =[]
recons =[]
for jj,init in enumerate(inits):
print('progress in %', jj/len(inits)*100)
min_z, min_l, pos_def = minimize_posterior(init, data,custom_mask,noise,sess)
rec = sess.run(recon_MAP, feed_dict={sigma_corr:noise})
var = sess.run(variance, feed_dict={input_data: data,mask:custom_mask,sigma_corr:noise})
plot_image(rec, directory=plot_path, filename='recon_%s_minimum%d'%(label,jj), title='reconstruction with loss %.1f'%min_l)
print(min_z)
if pos_def:
print('hessian postive definite')
minima.append(min_z)
min_loss.append(min_l)
min_var.append(var)
recons.append(rec)
order = np.argsort(min_loss)
min_loss = np.asarray(min_loss)[order]
minima = np.asarray(minima)[order]
min_var = np.asarray(min_var)[order]
pickle.dump([minima, min_loss, min_var,recons],open(minima_path+'minima_%s.pkl'%label,'wb'))
plot_minima(minima, min_loss, min_var)
chi2s = get_chi2(noise,data,recons[0],masking=True, mask=custom_mask)
print('total chi2 of lowest minimum', '%.1f'%chi2s[0], 'on', '%.1f'%chi2s[1] ,'pixels')
try:
print('chi2 of lowest minimum, low pixel amplitude regions', '%.1f'%chi2s[2], 'on', '%.1f'%chi2s[3], 'pixels')
print('chi2 of lowest minimum, high pixel amplitude regions', '%.1f'%chi2s[4], 'on', '%.1f'%chi2s[5], 'pixels')
except:
pass
lowest_minimum = sess.run(MAP_reset, feed_dict={MAP_ini:minima[0]})
rec = sess.run(recon_MAP, feed_dict={sigma_corr:noise})
plot_image(rec, directory=plot_path, filename='lowest_minimum_%s'%(label), title='reconstruction', vmin=0, vmax=1)
if corr_type in ['mask', 'sparse mask', 'noise+mask']:
plot_image(rec, directory=plot_path, filename='lowest_minimum_%s_masked'%(label), title='masked reconstruction', vmin=0, vmax=1, mask = custom_mask)
samples = get_laplace_sample(16,minima[0],data,custom_mask,noise,sess)
plot_samples(samples, custom_mask, title='Samples from Laplace approximation', filename='samples_laplace_deepest_minimum_%s'%label)
probe_posterior(minima[0], data, noise, custom_mask, sess)
mu_, sigma_, w_ = get_gmm_parameters([minima[0],minima[4],minima[5]], data, noise, custom_mask, min_loss[0])
_ = sess.run([update_w, update_mugmm,update_TriLgmm], feed_dict={mu_ini:mu_, w_ini:w_, sigma_ini:sigma_ })
samples = get_gmm_sample(16,data,custom_mask,noise,sess)
plot_samples(samples, custom_mask, title='GMM samples', filename='gmm_samples_%s'%label)
more_samples = []
for ii in range(10000):
more_samples+=[sess.run(gmm_sample,feed_dict={input_data: data, sigma_corr:noise})]
more_samples=np.asarray(more_samples)
for indices in [[0,1],[1,2],[3,8]]:
plot_prob_2D_GMM(more_samples, indices)
# + colab={} colab_type="code" id="CUUVYB2Lex8k"
# + colab={} colab_type="code" id="0ye6CFt1Ku0n"
| notebooks/ImageCorruptionMNIST-solidmask.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#https://jakevdp.github.io/blog/2017/12/05/installing-python-packages-from-jupyter/
#import sys
# #!conda install --yes --prefix {sys.prefix} r2pipe
import sys
# !/Applications/Cutter.app/Contents/Frameworks/Python.framework/Versions/3.6/bin/python3
#install --prefix=/Applications/Cutter.app/Contents/Frameworks/Python.framework/Versions/3.6/ r2pipe
#/A/C/C/F/P/V/3/l/python3.7 pip3 install --prefix=(pwd)/Frameworks/Python.framework/Versions/3.6/ r2pipe
# -
import cutter
cutter.cmd('aaa')
# +
import r2pipe
import json
import struct
import re
import base64
from pprint import pprint, pformat
IP_MATCHER = re.compile("(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?:[:]\d+)?)")
URL_MATCHER = re.compile('(?:(?:https?|ftp|file)://|www\.|ftp\.)[-A-Z0-9+&@#/%=~_|$?!:,.]*[A-Z0-9+&@#/%=~_|$]', re.IGNORECASE)
EMAIL_MATCHER = re.compile('([A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4})', re.IGNORECASE)
def regex_matcher(matcher):
return lambda st: matcher.findall(st)
def path_matcher(st):
if st.startswith('/'):
return [st]
else:
return []
def contains_matcher(s):
return lambda st: [st] if s in st else []
matchers = [regex_matcher(IP_MATCHER), regex_matcher(URL_MATCHER), regex_matcher(EMAIL_MATCHER), path_matcher, contains_matcher('\\e['), contains_matcher('HTTP')]
def print_s(s, r):
print('0x{:08x} 0x{:08x} {:10} {:4} {:10} {}'.format(s.get('paddr'), s.get('vaddr'), s.get('type'), s.get('length'), s.get('section'), r))
strings = json.loads(cutter.cmd('izj'))
for s in strings:
try:
st = base64.b64decode(s.get('string')).decode(s.get('type'))
for matcher in matchers:
matches = matcher(st)
for match in matches:
print_s (s, match)
except ValueError as e:
print(e)
continue
except LookupError as e:
print(e)
continue
cutter.refresh()
| Extract Indicators of Compromise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 循环
# - 循环是一种控制语句块重复执行的结构
# - while 适用于广度遍历
# - for 开发中经常使用
# ## while 循环
# - 当一个条件保持真的时候while循环重复执行语句
# - while 循环一定要有结束条件,否则很容易进入死循环
# - while 循环的语法是:
#
# while loop-contunuation-conndition:
#
# Statement
i = 0
while i<10:
print('hahaha')
i += 1
# ## 示例:
# sum = 0
#
# i = 1
#
# while i <10:
#
# sum = sum + i
# i = i + 1
# ## 错误示例:
# sum = 0
#
# i = 1
#
# while i <10:
#
# sum = sum + i
#
# i = i + 1
# - 一旦进入死循环可按 Ctrl + c 停止
# ## EP:
# 
# 
# # 验证码
# - 随机产生四个字母的验证码,如果正确,输出验证码正确。如果错误,产生新的验证码,用户重新输入。
# - 验证码只能输入三次,如果三次都错,返回“别爬了,我们小网站没什么好爬的”
# - 密码登录,如果三次错误,账号被锁定
#
import random
n = random.randint(65,122)
N = ""
i = 0
while 1:
if 91<=n<=96:
n = random.randint(65,122)
else:
N += chr(n)
n = random.randint(65,122)
i += 1
if i == 4:
break
print(N)
count = 0
for i in range(1000):
a = random.randint(0,1000) / 1000
if 0<a<0.001
# ## 尝试死循环
# ## 实例研究:猜数字
# - 你将要编写一个能够随机生成一个0到10之间的且包括两者的数字程序,这个程序
# - 提示用户连续地输入数字直到正确,且提示用户输入的数字是过高还是过低
# ## 使用哨兵值来控制循环
# - 哨兵值来表明输入的结束
# - 
# ## 警告
# 
# ## for 循环
# - Python的for 循环通过一个序列中的每个值来进行迭代
# - range(a,b,k), a,b,k 必须为整数
# - a: start
# - b: end
# - k: step
# - 注意for 是循环一切可迭代对象,而不是只能使用range
for i in range(100):
print('Joker is a better man!')
a = 100
bb = 'JOker'
bb.__iter__()
c = [1,2,3]
c.__iter__
{'key':'value'}.__iter__
(1,3,43).__iter__
{1,2,43}.__iter__
for i in range(5):
print(i)
# # 在Python里面一切皆对象
# ## EP:
# - 
i = 1
sum_ = 0
while sum_ < 10000:
sum_ += i
i += 1
print(sum_)
sum_ = 0
for i in range(1,10001):
sum_ += i
if sum_ > 10000:
break
print(sum_)
sum = 0
i = 0
while i < 1001:
sum = sum + i
i += 1
print(sum)
# ## 嵌套循环
# - 一个循环可以嵌套另一个循环
# - 每次循环外层时,内层循环都会被刷新重新完成循环
# - 也就是说,大循环执行一次,小循环会全部执行一次
# - 注意:
# > - 多层循环非常耗时
# - 最多使用3层循环
# ## EP:
# - 使用多层循环完成9X9乘法表
# - 显示50以内所有的素数
# ## 关键字 break 和 continue
# - break 跳出循环,终止循环
# - continue 跳出此次循环,继续执行
for i in range(1,10):
for j in range(1,i+1):
print(j,'X',i,'=',i*j,end=' ')
print()
# ## 注意
# 
# 
# # Homework
# - 1
# 
zs=0
fs=0
count = 0
num1=int(input('Enter an integer,the input ends if it is 0:'))
while num1!=0:
num2=int(input('Enter an integer,the input ends if it is 0:'))
if num2 > 0:
zs = zs + num2
count += 1
elif num2 < 0:
fs = fs + num2
count += 1
else:
break
if num1>0:
zs=zs+num1
count +=1
if num1<0:
fs=fs+num1
count +=1
aver=(zs + fs)//count
print('正整数和为:%d'%zs)
print('负整数和为:%d'%fs)
print('输入个数为:%d'%count)
print('平均值为:%d'%aver)
# - 2
# 
dorlla = 10000
for i in range(10):
dorlla = dorlla * 0.05 + dorlla
print('十年后的学费为:%d'%dorlla)
for i in range(4):
dorlla = dorlla * 0.05 + dorlla
print('大学四年的总学费为:%d'%dorlla)
# - 3
# 
zs=0
fs=0
count = 0
num1=int(input('Enter an integer,the input ends if it is 0:'))
while num1!=0:
num2=int(input('Enter an integer,the input ends if it is 0:'))
if num2 > 0:
zs = zs + num2
count += 1
elif num2 < 0:
fs = fs + num2
count += 1
else:
break
if num1>0:
zs=zs+num1
count +=1
if num1<0:
fs=fs+num1
count +=1
aver=(zs + fs)//count
print('正整数和为:%d'%zs)
print('负整数和为:%d'%fs)
print('输入个数为:%d'%count)
print('平均值为:%d'%aver)
# - 4
# 
def sum_(a,b):
count = 0
for i in range(a,b):
if i %5==0 and i % 6 == 0:
print(i,end=" ")
count += 1
if count % 10 == 0:
print()
sum_(100,1001)
# - 5
# 
def sum_(n):
while n**2 < 12000:
n += 1
print(n)
sum_(0)
def sum_1(m):
while m**3 < 12000:
m += 1
print(m-1)
sum_1(0)
def sum_(a):
n = 0
while 1:
res = n ** 2
if res >=a:
break
else:
n += 1
print(n)
n = 0
while 1:
res = n ** 3
if res < a:
n += 1
else:
break
print(n-1)
sum_(12000)
def sum_(a):
Money = 10000
lilv = 5
nian = 5
for i in range(a):
M = Money * lilv / 100
T = 12 * M * nian
print(lilv,'%','月利率',M,'总和',T)
if lilv == 5.25:
print('.....')
lilv += 1/8
sum_(24)
# - 7
# 
def sum_(a,b,c):
res = 0
for i in range(a,b,c):
res += 1/i
print(res)
sum_(50000,0,-1)
# - 8
# 
def sum_(a,b,c):
res = 0
for i in range(a,b,c):
res += i/ (i+2)
print(res)
sum_(1,98,2)
# - 9
# 
def sum_(a,b):
res = 0
for i in range(a,b):
res += 4*((-1)**(i+1)/(2*i-1))
print(res)
sum_(1,100000)
# - 10
# 
def sum_(a,b):
for i in range(1,10000):
res = 0
for j in range(1,i):
if i % j == 0:
res += j
if i == res:
print(i)
sum_(1,1000)
# - 11
# 
def sum_(a,b,c):
a = 0
for i in range(a,b,c):
for j in range(2,8):
if i != j:
print(i,j)
a += 1
sum_(1,8,2)
# - 12
# 
def math():
import math
print('enter ten numbers:')
ls=[]
sum1=0.0
devi=0.0
for i in range(10):
num=eval(input(''))
ls.append(num)
part1=0.0
part2=0.0
for i in range(10):
sum1=sum1+ls[i]
mean=sum1/10
for i in range(10):
part1=part1+(ls[i]-mean)**2
devi=math.sqrt(part1/9)
print('The mean is %.2f'%(mean))
print('The standerd deviation %f'%(devi))
math()
| 7.19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Cognitive Hackathon: Week 4 - Teacher's Guide
# ## Overview
#
# The four hours of this course this week will break down into these sections, which can be done one per day:
#
# **[Day 1](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day1.ipynb)** Project Discussion and Conclusions
#
# **[Day 2](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day2.ipynb)** Data Presentation
#
# **[Day 3](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day3.ipynb)** Group Review on Data
#
# **[Day 4](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day4.ipynb)** Cognitive Hackathon Wrapup
#
#
# Here is detail of these Week 4 course days:
#
# ## [Day 1](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day1.ipynb)
# Teams present their project conclusions.
#
# ## [Day 2](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day2.ipynb)
# Complete the project presentations. Talk over the next two days about data, what it is, where it comes from, how we try to interpret it, and its challenges.
#
# ## [Day 3](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day3.ipynb)
# Conduct a Group Review and discussion around the project data, assumptions, and interpretation.
#
# ## [Day 4](https://sp19azureteachersguide-sguthals.notebooks.azure.com/j/notebooks/Week%204/Day4.ipynb)
# Cognitive Hackathon Wrapup. Students can activate more API keys for services they didn't try before to try them out just for fun.
#
#
#
#
#
| Teals/2019 Projects/Spring 2019 TEALS Project Teacher Guide/Week 4/README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Configuracion para recargar módulos y librerías
# %reload_ext autoreload
# %autoreload 2
# + slideshow={"slide_type": "skip"}
# Configuración para plots inline
# %matplotlib inline
# -
# # MAT281 - Laboratorio 9
#
# ## Aplicaciones de la Matemática en la Ingeniería
#
# Puedes ejecutar este jupyter notebook de manera interactiva:
#
# [](https://mybinder.org/v2/gh/sebastiandres/mat281_m04_data_science/master?filepath=/07_lab_clustering//07_lab_clustering.ipynb)
#
# [](https://colab.research.google.com/github/sebastiandres/mat281_m04_data_science/blob/master///07_lab_clustering//07_lab_clustering.ipynb)
# ## __Intrucciones__
#
# * Completa tus datos personales (nombre y rol USM).
# * Debes enviar este .ipynb con el siguiente formato de nombre: 08_lab_clasificacion_NOMBRE_APELLIDO.ipynb con tus respuestas a <EMAIL> y <EMAIL> .
# * Se evaluará:
# - Soluciones
# - Código
# - Al presionar `Kernel -> Restart Kernel and Run All Cells` deben ejecutarse todas las celdas sin error.
# - La escala es de 0 a 4 considerando solo valores enteros.
# * __La entrega es al final de esta clase.__
#
# __Nombre__:
#
# __Rol__:
# ## Observación
#
# ***Este laboratorio utiliza la librería sklearn (oficialmente llamada [scikit learn](http://scikit-learn.org/stable/)), puesto que buscamos aplicar la técnica del clustering a datos tal como se haría en una aplicación real. El código a proveer en este laboratorio es reducido, y la nota se basará mayoritariamente en la calidad de las respuestas entregadas en los comentarios.***
# ## Problema: Wine Dataset
#
# Los datos del [Wine Dataset](https://archive.ics.uci.edu/ml/datasets/Wine) son un conjunto de datos clásicos para verificar los algoritmos de clustering.
#
# <img src="images/wine.jpg" alt="" width="600px" align="middle"/>
#
# Los datos corresponden a 3 cultivos diferentes de vinos de la misma región de Italia, y que han sido identificados con las etiquetas 1, 2 y 3. Para cada tipo de vino han sido realizados 13 análisis químicos:
#
# 1. Alcohol
# 2. Malic acid
# 3. Ash
# 4. Alcalinity of ash
# 5. Magnesium
# 6. Total phenols
# 7. Flavanoids
# 8. Nonflavanoid phenols
# 9. Proanthocyanins
# 10. Color intensity
# 11. Hue
# 12. OD280/OD315 of diluted wines
# 13. Proline
#
#
# La base de datos contiene 178 muestras distintas en total.
# ## 0. Entendimiento del dataset
# Antes de leer los datos y aplicar algoritmos, resulta importante comprender la naturaleza de los datos. Los datos del wine dataset ya se encuentan en la carpeta `data/`.
#
# Existen 2 archivos de interés:
# * `wine_data.txt` : Datos de interés.
# * `wine_names.txt` : Explicación de los datos.
#
# Lea atentamente el archivo `wine_names.txt` y responda las preguntas.
# + language="bash"
# cat data/wine_names.txt
# -
# ## Pregunta 1.1
# ¿Que contiene el archivo? Describa su contenido de manera que una tercera persona, que no ha visto la descripción, pueda entenderlo.
#
# *R:*
# ## Pregunta 1.2
# ¿**Porqué** y **cómo** podemos usar este dataset para probar algoritmos de clustering cuando los datos han sido usados para algoritmos de clasificación?
#
# *R:*
#
# ## 1. Lectura de datos
# Antes de proceder, miremos algunas lineas del archivo a utilizar.
# + language="bash"
# head data/wine_data.txt
# -
# El siguiente código permite leer los datos desde el archivo `data/wine_data.txt` y cargarlos en un dataframe.
# +
import pandas as pd
import os
names = ["alcohol", "malic_acid", "ash", "alcalinity_of_ash", "magnesium", "total_phenols",
"flavanoids", "nonflavanoid_phenols", "proanthocyanins", "color_intensity",
"hue", "OD280/OD315", "proline"]
columns = ["wine_class"] + names
filename = os.path.join("data","wine_data.txt")
df = pd.read_csv(filename, names=columns, sep=",")
df.head()
# -
# ### Pregunta 2
# Complete la preparación de los datos, separando los datos en un dataframe `X` (datos a utilizar para clustering) y una serie `true_labels` (etiquetas verdaderas para cada dato de `X`).
#
#
# **OBSERVACION**: La serie `true_labels` debe modificarse para que sean 0, 1 y 2 (en vez de 1, 2 y 3 como vienen en el archivo), porque el algoritmo de clustering asume que las categorías se numeran desde 0.
# +
import numpy as np
# Seleccionar X
X = df # FIX ME ¿que columnas tomar?
# Seleccionar true_labels
true_labels = df # FIX ME ¿que columna tomar?
# -
# Check X
X.head()
# Check true_labels
true_labels.head()
# ### Pregunta 3
# Utilizando la serie `true_labels` definido anteriormente, complete el código para conocer cuántas muestras son de tipo 0, de tipo 1 y de tipo 2. Compare con lo indicado en el archivo.
# +
# Es muy util saber contar valores en una serie de pandas.
# Porque permite responder esta pregunta con 1 linea
vc = 0 # FIX ME
print(vc)
# -
# ## 2. Exploración de valores
#
# Antes de realizar el clustering, deseamos revisar los datos. El siguiente código permite conocer la distribución de las mediciones para las muestras.
from matplotlib import pyplot as plt
rows, cols = 5, 3
fig1, axes1 = plt.subplots(rows, cols, figsize=(16,16))
for i in range(rows):
for j in range(cols):
n = i*cols + j
if n<13:
ax = axes1[i][j]
col_name = names[n]
ax.hist(X[col_name], alpha=0.50)
ax.set_title(col_name)
fig1.tight_layout()
plt.show()
# O, aprovechando las ventajas de usar la librería pandas,
# podemos utilizar los métodos nativos para obtener el mismo resultado con mucho menor esfuerzo.
from matplotlib import pyplot as plt
rows, cols = 5, 3
fig2, axes2 = plt.subplots(rows, cols, figsize=(16,16))
for n, col_name in enumerate(names):
ax = axes2[n//cols][n%cols]
df[col_name].hist(bins=12, alpha=0.50, ax=ax)
ax.set_title(col_name)
plt.show()
# O, incluso como no son demasiadas variables, podemos graficar todas las relaciones con scatter_matrix.
# Paciencia - tomará 30 segundos.
from pandas.plotting import scatter_matrix
scatter_matrix(df, alpha=0.25, figsize=(20, 20), diagonal='hist');
# ## Pregunta 4
#
# En base a la exploración de valores, usted:
# 1. Aplicaría el algoritmo de clustering directamente.
# 2. Realizaría algún tipo de normalización a los datos, y luego aplicar el algoritmo de clustering.
#
# ¿Que resulta más razonable, opción 1 u opción 2? ¿Porqué?
#
# **Justifique su respuesta**: piense en cómo funciona K-Means.
#
# #### Respuesta
#
# *R:*
#
# ## 3. Clustering Directo
#
# A continuación se provee el código para realizar el clustering de los datos de manera directa (sin normalizar). Recuerde que el algoritmo hará predicción de clusters y no de etiquetas, por lo que la matriz de confusión no necesariamente será diagonal. Para la interpretación de la matriz de confusión, considere la [documentación](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html).
# +
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
# Parameters
n_clusters = 3
# Running the algorithm
kmeans = KMeans(n_clusters)
kmeans.fit(X)
pred_labels = kmeans.labels_
cm = confusion_matrix(true_labels, pred_labels)
print(cm)
# -
# ## Pregunta 5
#
# Ejecute el código anterior y comente los resultados. ¿Permite el clustering recobrar el agrupamiento natural de los datos? ¿Si no, porqué?
#
# #### Respuesta
#
# *R:*
#
#
# ## 3. Normalización de los datos
#
# Sabemos que los algoritmos suelen funcionar mejor con los datos normalizados, como se explicó en la clase de Regresión Lineal. Note que en el caso de los algoritmos de clustering, sólo es necesario normalizar la matrix `X`, ¡las etiquetas no necesitan normalizarse!
# ## Pregunta 6.1
# Normalice los datos utilizando para obtener una nueva matriz `X_normalized_1`, cuyas columnas tengan sus datos en el rango [0,1].
#
# **Observación**: Utilice sus conocimientos matemáticos y opere normalmente con `X.max()` y `X.min()`. La respuesta toma 1 línea.
X_normalized_1 = X # FIX ME
# Check facil con el método describe
X_normalized_1.describe().T
# ### Pregunta 6.2
# Reutilice el código anteriormente provisto para realizar el clustering en los datos normalizados y comente los resultados obtenidos. ¿Cuantos errores existen en total?
# +
# AGREGAR CODIGO PARA REALIZAR CLUSTERING EN X_normalized_1
# -
# Comentario a los resultados obtenidos.
#
# *R:*
#
# La clasificación es perfecta para la etiquetas originales 1 y 3 (0 y 2 después de re-etiquetar). Se obtiene una clasificación con 8 errores del total de 71 para la etiqueta 2 (1 depués de re-etiquetar). La normalización mejora muchísimo el desempeño del algoritmo de clustering.
# ## Nueva normalización de datos
#
# Como usted ya posee cierta experiencia en ajustar modelos, se pregunta si resultará mejor normalizar considerando ahora que cada columna posea media $0$ y desviación estándar $1$ para cada una de sus columnas.
#
# ### Pregunta 7.1
# Estandarice los datos para obtener una nueva matriz `X_normalized_2`, de manera que `X_normalized_2` posea media 0 y desviación estándar 1 para cada una de sus columnas.
X_mod_2 = X # FIX ME
# Check facil con método describe
X_normalized_2.describe().T
# ### Pregunta 7.2
# Reutilice el código anteriormente provisto para realizar el clustering en los datos estandarizados y comente los resultados obtenidos. ¿Cuantos errores existen en total?
# +
# AGREGAR CODIGO PARA REALIZAR CLUSTERING EN X_normalized_2
# -
# Comentario a los resultados obtenidos.
#
# *R:*
# ### Pregunta 8
#
# ¿Cuál de las 3 versiones aplicadas de clustering funcionó mejor? ¿Porqué cree que sea así?
#
# *R:*
#
# ## Bonus Track: Regla del codo
# En todos los casos hemos utilizado que el número de clusters es igual a 3. El ajuste del modelo siempre será mejor al aumentar el número de clusters, pero ello no significa que el número de clusters sea el apropiado. De hecho, si tenemos que ajustar $n$ puntos, claramente tomar $n$ clusters generaría un ajuste perfecto, pero no permitiría representar si existen realmente agrupaciones de datos.
#
# Cuando no se conoce el número de clusters a priori, se utiliza la [regla del codo](https://jarroba.com/seleccion-del-numero-optimo-clusters/), que indica que el número más apropiado es aquel donde "cambia la pendiente" de decrecimiento de la la suma de las distancias a los clusters para cada punto, en función del número de clusters.
#
# A continuación se provee el código para el caso de clustering sobre los datos estandarizados, leídos directamente de un archivo preparado especialmente.
#
# +
from sklearn.cluster import KMeans
X_mod = np.loadtxt("data/X_estandarized.txt")
clusters = range(1,20)
total_distance = []
for n_clusters in clusters:
kmeans = KMeans(n_clusters)
kmeans.fit(X_mod)
pred_labels = kmeans.labels_
centroids = kmeans.cluster_centers_
# Get the distances
distance_for_n = 0
for k in range(n_clusters):
points = X_mod[pred_labels==k]
aux = (points - centroids[k,:])**2
distance_for_n += (aux.sum(axis=1)**0.5).sum()
total_distance.append(distance_for_n)
# -
fig = plt.figure(figsize=(16,8))
plt.plot(clusters, total_distance, 'rs')
plt.xlim(min(clusters)-1, max(clusters)+1)
plt.show()
| 07_lab_clustering/07_lab_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://www.techtrekking.com/getting-started-with-time-series-forecasting-with-prophet/
#
# Using fb prophet for time-series forecasting
# Importing datasets
import pandas as pd
import numpy as np
from fbprophet import Prophet
# Read train and test
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
visitors = pd.read_csv("ppl_visit.csv" )
visitors.info()
pd.to_datetime(visitors['ds'], format='%Y-%m-%d')
visitors
m = Prophet()
m.fit(visitors)
print("type of m" , type(m))
### Extending data to few future dates
future = m.make_future_dataframe(periods=60)
print("type of future" , type(future))
### The predict method will assign each row in future a predicted value which it names yhat
forecast = m.predict(future)
print("type of forecast" , type(future))
# print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail())
### Plotting forecast
fig1 = m.plot(forecast)
# fig1.show()
fig1.savefig('01_fbprophet_getting_started-01.png')
### Plotting forecast components
fig2 = m.plot_components(forecast)
fig2.savefig('01_fbprophet_getting_started-02.png')
### Saving output excel
forecast.to_csv('example_2.csv', sep=',')
print('*** Program Completed ***')
by_date.head()
# I think I want to group by date, time and STORE/OTHER time for dwell00
by_date_dwell00 = cust_journey.groupby(['dateok'], as_index = False)[['dwell00']].sum()
# even adding the list of lists did not yield df
#by_hour = cust_journey.groupby('hour').mean()
type(by_date_dwell00)
by_date_dwell00['date'] = by_date_dwell00.dateok.dt.date
by_date_dwell00['y'] = by_date_dwell00.dateok.dt.date
by_date_dwell00.drop('dateok', axis=1, inplace=True)
reorder_by_date_dwell00 = by_date_dwell00[['date', 'dwell00']]
reorder_by_date_dwell00
reorder_by_date_dwell00.rename(columns={'date': 'ds', 'dwell00': 'y'})
df = reorder_by_date_dwell00.reset_index().rename(columns={'ds':'ds', 'y':'y'})
df.index.dtype
# let's start with prophet now
m = Prophet()
m.fit(df)
by_date.to_csv("by_date.csv", index = True)
# extract just two cols for now and reset the date as it was the index
by_date_dwell00 = by_date[['dwell00']]
by_date_dwell00.reset_index(inplace=True)
by_date_dwell00.rename(columns={'Date': 'DS', 'dwell00': 'Y'})
df = pd.DataFrame([by_date_dwell00])
df.dtypes()
# transpose experiment - doesn't help for prophet
by_dwell = by_date.transpose()
by_dwell
cust_journey['dateok'] = pd.to_datetime(cust_journey.Date,format='%Y-%m-%d %H:%M:%S')
cust_journey['hour'] = cust_journey.hod
# Calculate average hourly fraction
hourly_frac = cust_journey.groupby(['hour']).mean()/np.sum(cust_journey.groupby(['hour']).mean())
cust_journey.head()
hourly_frac[10:]
| bayes/fb_prophet/FB_prophet_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1.4. Creating an IPython extension with custom magic commands
from IPython.core.magic import (register_line_magic,
register_cell_magic)
@register_line_magic
def hello(line):
if line == 'french':
print("Salut tout le monde!")
else:
print("Hello world!")
# %hello
# %hello french
# +
import pandas as pd
from io import StringIO
@register_cell_magic
def csv(line, cell):
# We create a string buffer containing the
# contents of the cell.
sio = StringIO(cell)
# We use Pandas' read_csv function to parse
# the CSV string.
return pd.read_csv(sio)
# + podoc={"output_text": "Output"}
# %%csv
col1,col2,col3
0,1,2
3,4,5
7,8,9
# + podoc={"output_text": "Output"}
df = _
df.describe()
# +
# %%writefile csvmagic.py
import pandas as pd
from io import StringIO
def csv(line, cell):
sio = StringIO(cell)
return pd.read_csv(sio)
def load_ipython_extension(ipython):
"""This function is called when the extension is
loaded. It accepts an IPython InteractiveShell
instance. We can register the magic with the
`register_magic_function` method of the shell
instance."""
ipython.register_magic_function(csv, 'cell')
# -
# %load_ext csvmagic
# + podoc={"output_text": "Output"}
# %%csv
col1,col2,col3
0,1,2
3,4,5
7,8,9
# -
# ## Cleanup
# !rm -f csvmagic.py
| 001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter01_basic/04_magic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow2_p36
# language: python
# name: conda_tensorflow2_p36
# ---
# # Sound anomaly detection
# *Context*
#
# ## Introduction
# ---
# Industrial companies have been collecting a massive amount of time series data about their operating processes, manufacturing production lines and industrial equipment. They sometime store years of data in historian systems or in their factory information system at large. Whereas they are looking to prevent equipment breakdown that would stop a production line, avoid catastrophic failures in a power generation facility or improving their end product quality by adjusting their process parameters, having the ability to process time series data is a challenge that modern cloud technologies are up to. However, everything is not about cloud itself: your factory edge capability must allow you to stream the appropriate data to the cloud (bandwidth, connectivity, protocol compatibility, putting data in context...).
#
# What if had a frugal way to qualify your equipment health with few data? This would definitely help leveraging robust and easier to maintain edge-to-cloud blueprints. In this post, we are going to focus on a tactical approach industrial companies can use to help them reduce the impact of machine breakdowns by reducing how unpredictable they are.
#
# Most times, machine failures are tackled by either reactive action (stop the line and repair...) or costly preventive maintenance where you have to build the proper replacement parts inventory and schedule regular maintenance activities. Skilled machine operators are the most valuable assets in such settings: years of experience allow them to develop a fine knowledge of how the machinery should operate, they become expert listeners and can to detect unusual behavior and sounds in rotating and moving machines. However, production lines are becoming more and more automated, and augmenting these machine operators with AI-generated insights is a way to maintain and develop the fine expertise needed to prevent reactive-only postures when dealing with machine breakdowns.
#
# In this post we are going to compare and contrast two different approaches to identify a malfunctioning machine, providing we have sound recordings from its operation: we will start by building a neural network based on an autoencoder architecture and we will then use an image-based approach where we will feed images of sound (namely spectrograms) to an image based automated ML classification feature.
# ## Solution overview
# ---
# In this example, we are going to use sounds recorded in an industrial environment to perform anomaly detection on industrial equipment.
#
# To achieve this, we are going to explore and leverage the MIMII dataset for anomaly detection purpose: this is a sound dataset for **M**alfunctioning **I**ndustrial **M**achine **I**nvestigation and **I**nspection (MIMII). You can download it from **https://zenodo.org/record/3384388**: it contains sounds from several types of industrial machines (valves, pumps, fans and slide rails). In this example, we are going to focus on the **fans**. **[This paper](https://arxiv.org/abs/1909.09347)** describes the sound capture procedure.
#
# We walk you through the following steps using Jupyter notebooks provided with this blog post:
#
# 1. The first one will focus on *data exploration* to get familiar with sound data: these data are particular time series data and exploring them requires specific approaches.
# 2. We will then use Amazon SageMaker to *build an* *autoencoder* that will be used as a classifier able to discriminate between normal and abnormal sounds.
# 3. Last, we are going to take on a more novel approach in the last part of this work: we are going to *transform the sound files into spectrogram images* and feed them directly to an *image classifier*. We will use Amazon Rekognition Custom Labels to perform this classification task and leverage Amazon SageMaker for the data preprocessing and to drive the Custom Labels training and evaluation process.
#
# Both approaches requires an equal amount of effort to complete: although the models obtained in the end are not comparable, this will give you an idea of how much of a kick start you may get when using an applied AI service.
# ## Introducting the machine sound dataset
# ---
# You can follow this data exploration work with the first companion notebook from **[this repository](https://github.com/michaelhoarau/sound-anomaly-detection)**. Each recording contains 8 channels, one for each microphone that was used to record a given machine sound. In this experiment, we will only focus on the recordings of the first microphone. The first thing we can do is to plot the waveforms of a normal and abnormal signals next to each other:
#
# 
#
# Each signal is 10 seconds long and apart from the larger amplitude of the abnormal signal and some pattern that are more irregular, it’s difficult to distinguish between these two signals. In the companion notebook, you will also be able to listen to some of the sounds: most of the time, the differences are small, especially if you put them in a context of a very noisy environment.
#
# A first approach could be to leverage the **[Fourier transform](https://en.wikipedia.org/wiki/Fourier_transform)**, which is a mathematical operator that decompose a function of time (or a signal) into its underlying frequencies. The Fourier transform is a function of frequency and its amplitude represents how much of a given frequency is present in the original signal. However, a sound signal is highly non-stationary (i.e. their statistics change over time). For a given time period, the frequency decomposition will be different from another time period. As a consequence, it will be rather meaningless to compute a single Fourier transform over the entire signal (however short they are in our case). We will need to call the short-time Fourier transform (STFT) for help: the STFT is obtained by computing the Fourier transform for successive frames in a signal.
#
# If we plot the amplitude of each frequency present in the first 64 ms of the first signal of both the normal and abnormal dataset, we obtain the following plot:
#
# 
#
# We now have a tool to discretize our time signals into the frequency domain which brings us one step closer to be able to visualize them in this domain. For each signal we will now:
#
# 1. Slice the signal in successive time frames
# 2. Compute an STFT for each time frame
# 3. Extract the amplitude of each frequency as a function of time
# 4. Most sounds we can hear as humans, are concentrated in a very small range (**both** in frequency and amplitude range). The next step is then to take a log scale for both the frequency and the amplitude: for the amplitude, we obtain this by converting the color axis to Decibels (which is the equivalent of applying a log scale to the sound amplitudes)
# 5. Plot the result on a spectrogram: a spectrogram has three dimensions: we keep time on the horizontal axis, put frequency on the vertical axis and use the amplitude to a color axis (in dB).
#
# The picture below shows the frequency representation of the signals plotted earlier:
#
# 
#
# We can now see that these images have interesting features that we can easily uncover with our naked eyes: this is exactly the kind of features that a neural network can try to uncover and structure. We will now build two types of feature extractor based on this analysis and feed them to different type of architectures.
# ## Building a custom autoencoder architecture
# ---
# The **[autoencoder architecture](https://en.wikipedia.org/wiki/Autoencoder)** is a neural network with the same number of neurons in the input and the output layers. This kind of architecture learns to generate the “identity” transformation between inputs and outputs. The second notebook of our series will go through these different steps:
#
# 1. Build the dataset: to feed the spectrogram to an autoencoder, we will build a tabular dataset and upload it to Amazon S3.
# 2. Create a TensorFlow autoencoder model, train it in script mode by using the TensorFlow / Keras existing container
# 3. Evaluate the model to obtain a confusion matrix highlighting the classification performance between normal and abnormal sounds.
#
# ### Build a dataset
# We are using the **[librosa library](https://librosa.org/doc/latest/index.html)** which is a python package for audio analysis. A features extraction function based on steps to generate the spectrogram described earlier is central to the dataset generation process.
#
# ```python
# def extract_signal_features(signal, sr, n_mels=64, frames=5, n_fft=1024, hop_length=512):
# # Compute a spectrogram (using Mel scale):
# mel_spectrogram = librosa.feature.melspectrogram(
# y=signal,
# sr=sr,
# n_fft=n_fft,
# hop_length=hop_length,
# n_mels=n_mels
# )
#
# # Convert to decibel (log scale for amplitude):
# log_mel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max)
#
# # Generate an array of vectors as features for the current signal:
# features_vector_size = log_mel_spectrogram.shape[1] - frames + 1
#
# # Build N sliding windows (=frames) and concatenate
# # them to build a feature vector:
# features = np.zeros((features_vector_size, dims), np.float32)
# for t in range(frames):
# features[:, n_mels*t:n_mels*(t+1)] = log_mel_spectrogram[:, t:t+features_vector_size].T
#
# return features
# ```
#
# Note that we will train our autoencoder only on the normal signals: our model will learn how to reconstruct these signals (“learning the identity transformation”). The main idea is to leverage this for classification later; when we feed this trained model with abnormal sounds, the reconstruction error will be a lot higher than when trying to reconstruct normal sounds. Using an error threshold, we will then be able to discriminate abnormal and normal sounds.
#
# ### Create the autoencoder
# To build our autoencoder, we use Keras and assemble a simple autoencoder architecture with 3 hidden layers:
#
# ```python
# from tensorflow.keras import Input
# from tensorflow.keras.models import Model
# from tensorflow.keras.layers import Dense
#
# def autoencoder_model(input_dims):
# inputLayer = Input(shape=(input_dims,))
# h = Dense(64, activation="relu")(inputLayer)
# h = Dense(64, activation="relu")(h)
# h = Dense(8, activation="relu")(h)
# h = Dense(64, activation="relu")(h)
# h = Dense(64, activation="relu")(h)
# h = Dense(input_dims, activation=None)(h)
#
# return Model(inputs=inputLayer, outputs=h)
# ```
#
# We put this in a training script (model.py) and use the SageMaker TensorFlow estimator to configure our training job and launch the training:
#
# ```python
# tf_estimator = TensorFlow(
# base_job_name='sound-anomaly',
# entry_point='model.py',
# source_dir='./autoencoder/',
# role=role,
# instance_count=1,
# instance_type='ml.p3.2xlarge',
# framework_version='2.2',
# py_version='py37',
# hyperparameters={
# 'epochs': 30,
# 'batch-size': 512,
# 'learning-rate': 1e-3,
# 'n_mels': n_mels,
# 'frame': frames
# },
# debugger_hook_config=False
# )
#
# tf_estimator.fit({'training': training_input_path})
# ```
#
# Training over 30 epochs will take few minutes on a p3.2xlarge instance: at this stage, this will cost you a few cents. If you plan to use a similar approach on the whole MIMII dataset or use hyperparameter tuning, you can even further reduce this training cost by using Spot Training (check out **[this sample](https://github.com/aws-samples/amazon-sagemaker-managed-spot-training)** on how you can leverage Managed Training Spot and get a 70% discount in the process).
#
# ### Evaluate the model
# Let’s now deploy the autoencoder behind a SageMaker endpoint: this operation will create a SageMaker endpoint and will continue to cost you as long as you let it leave. Do not forger to shut it down at the end of this experiment!
#
# ```python
# tf_endpoint_name = 'sound-anomaly-'+time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
# tf_predictor = tf_estimator.deploy(
# initial_instance_count=1,
# instance_type='ml.c5.large',
# endpoint_name=tf_endpoint_name
# )
# print(f'Endpoint name: {tf_predictor.endpoint_name}')
# ```
#
# Our test dataset has an equal share of normal and abnormal sounds. We will loop through this dataset and send each test file to this endpoint. As our model is an autoencoder, we will evaluate how good the model is at reconstructing the input. The higher the reconstruction error, the greater the chance that we have identified an anomaly:
#
# ```python
# y_true = test_labels
# reconstruction_errors = []
#
# for index, eval_filename in tqdm(enumerate(test_files), total=len(test_files)):
# # Load signal
# signal, sr = sound_tools.load_sound_file(eval_filename)
#
# # Extract features from this signal:
# eval_features = sound_tools.extract_signal_features(
# signal,
# sr,
# n_mels=n_mels,
# frames=frames,
# n_fft=n_fft,
# hop_length=hop_length
# )
#
# # Get predictions from our autoencoder:
# prediction = tf_predictor.predict(eval_features)['predictions']
#
# # Estimate the reconstruction error:
# mse = np.mean(np.mean(np.square(eval_features - prediction), axis=1))
# reconstruction_errors.append(mse)
# ```
#
# In the plot below, we can see that the distribution of reconstruction error for normal and abnormal signals differs significantly. The overlap between these histograms means we have to compromise:
#
# 
#
# Let's explore the recall-precision tradeoff for a reconstruction error threshold varying between 5.0 and 10.0 (this encompasses most of the overlap we can see above). First, let's visualize how this threshold range separates our signals on a scatter plot of all the testing samples:
#
# 
#
# If we plot the number of samples flagged as false positives and false negatives we can see that the best compromise is to use a threshold set around 6.3 for the reconstruction error (assuming we are not looking at minimizing either the false positive or false negatives occurrences):
#
# 
#
# For this threshold (6.3), we obtain the confusion matrix below:
#
# 
#
# The metrics associated to this matrix are the following:
#
# * Precision: 92.1%
# * Recall: 92.1%
# * Accuracy: 88.5%
# * F1 Score: 92.1%
#
# ### Cleanup
# Let’s not forget to delete our Endpoint to prevent any cost to continue incurring by using the **delete_endpoint()** API.
#
# ### Autoencoder improvement and further exploration
#
# The spectrogram approach requires defining the spectrogram square dimensions (e.g. the number of Mel cell defined in the data exploration notebook) which is a heuristic. In contrast, deep learning networks with a CNN encoder can learn the best representation to perform the task at hands (anomaly detection). Further steps to investigate to improve on this first result could be:
#
# * Experimenting with several more or less complex autoencoder architectures, training for a longer time, performing hyperparameter tuning with different optimizer, tuning the data preparation sequence (e.g. sound discretization parameters), etc.
# * Leveraging high resolution spectrograms and feeding them to a CNN encoder to uncover the most appropriate representation of the sound.
# * Using end-to-end model architecture with encoder-decoder that have been known to give good results on waveform datasets.
# * Using deep learning models with multi-context temporal and channel (8 microphones) attention weights .
# * Experimenting with time distributed 2D convolution layers can be used to encode features across the 8 channels: these encoded features could then be fed as sequences across time steps to an LSTM or GRU layer. From there, multiplicative sequence attention weights can then be learnt on the output sequence from the RNN layer.
# * Exploring the appropriate image representation for multi-variate time series signal that are not waveform: replacing spectrograms with Markov transition fields, recurrence plots or network graphs could then be used to achieve the same goals for non-sound time-based signals.
# ## Using Amazon Rekognition Custom Labels
# ---
# ### Build a dataset
# Previously, we had to train our autoencoder on only normal signals. In this case, we will build a more traditional split of training, and testing dataset. Based on the fans sound database this will yield:
#
# * **4440 signals** for the training dataset, including:
# * 3260 normal signals
# * 1180 abnormal signals
#
# * **1110 signals** for the testing dataset including:
# * 815 normal signals
# * 295 abnormal signals
#
# We will generate and store the spectrogram of each signal and upload them in either a train or test bucket.
#
# ### Create a Rekognition Custom Labels
#
# The first step is to create a Custom Labels project:
#
# ```python
# # Initialization, get a Rekognition client:
# PROJECT_NAME = 'sound-anomaly-detection'
# reko = boto3.client("rekognition")
#
# # Let's try to create a Rekognition project:
# try:
# project_arn = reko.create_project(ProjectName=PROJECT_NAME)['ProjectArn']
#
# # If the project already exists, we get its ARN:
# except reko.exceptions.ResourceInUseException:
# # List all the existing project:
# print('Project already exists, collecting the ARN.')
# reko_project_list = reko.describe_projects()
#
# # Loop through all the Rekognition projects:
# for project in reko_project_list['ProjectDescriptions']:
# # Get the project name (the string after the first delimiter in the ARN)
# project_name = project['ProjectArn'].split('/')[1]
#
# # Once we find it, we store the ARN and break out of the loop:
# if (project_name == PROJECT_NAME):
# project_arn = project['ProjectArn']
# break
#
# print(project_arn)
# ```
#
# We need to tell Amazon Rekognition where to find the training data, testing data and where to output its results:
#
# ```python
# TrainingData = {
# 'Assets': [{
# 'GroundTruthManifest': {
# 'S3Object': {
# 'Bucket': <YOUR-BUCKET-NAME>,
# 'Name': f'{<YOUR-PREFIX-NAME>}/manifests/train.manifest'
# }
# }
# }]
# }
#
# TestingData = {
# 'AutoCreate': True
# }
#
# OutputConfig = {
# 'S3Bucket': <YOUR-BUCKET-NAME>,
# 'S3KeyPrefix': f'{<YOUR-PREFIX-NAME>}/output'
# }
# ```
#
# Now we can create a project version: creating a project version will build and train a model within this Rekognition project for the data previously configured. Project creation can fail, if the bucket you selected cannot be accessed by Rekognition. Make sure the right Bucket Policy is applied to your bucket (check the notebooks to see the recommended policy).
#
# Let’s now create a project version: this will launch a new model training and you will then have to wait for the model to be trained. This should take around 1 hour (less than $1 from a cost perspective):
#
# ```python
# version = 'experiment-1'
# VERSION_NAME = f'{PROJECT_NAME}.{version}'
#
# # Let's try to create a new project version in the current project:
# try:
# project_version_arn = reko.create_project_version(
# ProjectArn=project_arn, # Project ARN
# VersionName=VERSION_NAME, # Name of this version
# OutputConfig=OutputConfig, # S3 location for the output artefact
# TrainingData=TrainingData, # S3 location of the manifest describing the training data
# TestingData=TestingData # S3 location of the manifest describing the validation data
# )['ProjectVersionArn']
#
# # If a project version with this name already exists, we get its ARN:
# except reko.exceptions.ResourceInUseException:
# # List all the project versions (=models) for this project:
# print('Project version already exists, collecting the ARN:', end=' ')
# reko_project_versions_list = reko.describe_project_versions(ProjectArn=project_arn)
#
# # Loops through them:
# for project_version in reko_project_versions_list['ProjectVersionDescriptions']:
# # Get the project version name (the string after the third delimiter in the ARN)
# project_version_name = project_version['ProjectVersionArn'].split('/')[3]
#
# # Once we find it, we store the ARN and break out of the loop:
# if (project_version_name == VERSION_NAME):
# project_version_arn = project_version['ProjectVersionArn']
# break
#
# print(project_version_arn)
# status = reko.describe_project_versions(
# ProjectArn=project_arn,
# VersionNames=[project_version_arn.split('/')[3]]
# )['ProjectVersionDescriptions'][0]['Status']
# ```
#
# ### Evaluate the model
#
# First, we will deploy our model by using the ARN collected before: again, this will deploy an endpoint that will cost you around $4 per hour. Don’t forget to decommission it once you’re done!
#
# ```python
# # Start the model
# print('Starting model: ' + model_arn)
# response = client.start_project_version(ProjectVersionArn=model_arn, MinInferenceUnits=min_inference_units)
#
# # Wait for the model to be in the running state:
# project_version_running_waiter = client.get_waiter('project_version_running')
# project_version_running_waiter.wait(ProjectArn=project_arn, VersionNames=[version_name])
#
# # Get the running status
# describe_response=client.describe_project_versions(ProjectArn=project_arn, VersionNames=[version_name])
# for model in describe_response['ProjectVersionDescriptions']:
# print("Status: " + model['Status'])
# print("Message: " + model['StatusMessage'])
# ```
#
# Once the model is running you can start querying it for predictions: in the notebook, you will find a function *get_results()* that will query a given model with a list of pictures sitting in a given path. This will take a few minutes to run all the test samples and will cost less than $1 (for the ~3,000 test samples):
#
# ```python
# predictions_ok = rt.get_results(project_version_arn, BUCKET, s3_path=f'{BUCKET}/{PREFIX}/test/normal', label='normal', verbose=True)
# predictions_ko = rt.get_results(project_version_arn, BUCKET, s3_path=f'{BUCKET}/{PREFIX}/test/abnormal', label='abnormal', verbose=True)
#
# def get_results(project_version_arn, bucket, s3_path, label=None, verbose=True):
# """
# Sends a list of pictures located in an S3 path to
# the endpoint to get the associated predictions.
# """
#
# fs = s3fs.S3FileSystem()
# data = {}
# predictions = pd.DataFrame(columns=['image', 'normal', 'abnormal'])
#
# for file in fs.ls(path=s3_path, detail=True, refresh=True):
# if file['Size'] > 0:
# image = '/'.join(file['Key'].split('/')[1:])
# if verbose == True:
# print('.', end='')
#
# labels = show_custom_labels(project_version_arn, bucket, image, 0.0)
# for L in labels:
# data[L['Name']] = L['Confidence']
#
# predictions = predictions.append(pd.Series({
# 'image': file['Key'].split('/')[-1],
# 'abnormal': data['abnormal'],
# 'normal': data['normal'],
# 'ground truth': label
# }), ignore_index=True)
#
# return predictions
#
# def show_custom_labels(model, bucket, image, min_confidence):
# # Call DetectCustomLabels from the Rekognition API: this will give us the list
# # of labels detected for this picture and their associated confidence level:
# reko = boto3.client('rekognition')
# try:
# response = reko.detect_custom_labels(
# Image={'S3Object': {'Bucket': bucket, 'Name': image}},
# MinConfidence=min_confidence,
# ProjectVersionArn=model
# )
#
# except Exception as e:
# print(f'Exception encountered when processing {image}')
# print(e)
#
# # Returns the list of custom labels for the image passed as an argument:
# return response['CustomLabels']
# ```
#
# Let’s plot the confusion matrix associated to this test set:
#
# 
#
# The metrics associated to this matrix are the following:
#
# * Precision: 100.0%
# * Recall: 99.8%
# * Accuracy: 99.8%
# * F1 Score: 99.9%
#
# Without any effort (and no ML knowledge!), we get impressive results. With so low false positives and false negatives, we can leverage such a model in even the most challenging industrial context.
#
# ### Cleanup
#
# We need to stop the running model as we will continue to incur costs while the endpoint is live:
#
# ```python
# print('Stopping model:' + model_arn)
#
# # Stop the model:
# try:
# reko = boto3.client('rekognition')
# response = reko.stop_project_version(ProjectVersionArn=model_arn)
# status = response['Status']
# print('Status: ' + status)
#
# except Exception as e:
# print(e)
#
# print('Done.')
# ```
| 0_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from astropy.table import Table, vstack
import glob
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 20
plt.rcParams['figure.figsize']
# # Data analysis
# # MC - Data comparison
def format_axes(ax,option):
if(option=='width'):
ax.set_xlabel('Muon radius [deg]')
ax.set_ylabel('Muon ring width [deg]')
ax.grid(linestyle="dashed")
#ax.set_ylim(0.042,0.07)
ax.legend()
if(option=='size'):
ax2.grid(linestyle="dashed")
ax2.legend(ncol=2)
ax2.set_xlabel('Muon radius [deg]')
ax2.set_ylabel('Muon Size [pe]')
ax2.set_ylim(1000,3500)
OPT_EFF=[0.6, 0.7, 0.8, 0.89]
colors = ['C0', 'C1', 'C2', 'C3', 'C4']
# size (intensity) outside the ring, to get rid of hadronic showers
size_outside = 500 # phe
# ## Analyze real muon data from 20200115
# +
from astropy.table import Table, vstack
import glob
listdir = glob.glob('../../data/muons/real/muons_LST-1.Run01796*')
# Muons analyzed using GlobalPeakWindowSum
dat = Table.read('./{}'.format(listdir[0]), format='fits')
for i in range(1,len(listdir)):
dat2 = Table.read('./{}'.format(listdir[i]), format='fits')
dat = vstack([dat, dat2])
df = dat.to_pandas()
df_good_data = df[df['good_ring']]
# -
# Now we load the MC and plot everything together
# +
fig,ax = plt.subplots()
fig2,ax2 = plt.subplots()
for (opt_eff, col) in zip(OPT_EFF, colors):
listdir=glob.glob(f'../../data/muons/mc/Scale{opt_eff}/muon_run*')
dat = Table.read('{}'.format(listdir[0]), format='fits')
for i in range(1,len(listdir)):
dat2 = Table.read('{}'.format(listdir[i]), format='fits')
dat = vstack([dat, dat2])
df = dat.to_pandas()
df_good = df[df['good_ring']]
sns.regplot(x = df_good['ring_radius'][(df_good['ring_completeness'] > 0.9)
& (df_good['size_outside'] < size_outside)
& (df_good['ring_width'] < 0.3)],
y = df_good['ring_width'][(df_good['ring_completeness'] > 0.9)
& (df_good['size_outside'] < size_outside)
& (df_good['ring_width'] < 0.3)], x_bins=10,
ax=ax, color=col, label='Opt. eff. %.0f %%' %(opt_eff*100))
sns.regplot(x = df_good['ring_radius'][(df_good['ring_completeness'] > 0.9)
& (df_good['size_outside'] < size_outside)
& (df_good['ring_width'] < 0.3)],
y = df_good['ring_size'][(df_good['ring_completeness'] > 0.9)
& (df_good['size_outside'] < size_outside)
& (df_good['ring_width'] < 0.3)], x_bins=10,
ax=ax2, color=col, label='Opt. eff. %.0f %%' %(opt_eff*100))
sns.regplot(x = df_good_data['ring_radius'][(df_good_data['ring_completeness'] > 0.9) & (df_good_data['size_outside'] < size_outside)],
y = df_good_data['ring_width'][(df_good_data['ring_completeness'] > 0.9) & (df_good_data['size_outside'] < size_outside)],
x_bins=10, ax=ax, color = 'k', label = 'Real Data')
sns.regplot(x = df_good_data['ring_radius'][(df_good_data['ring_completeness'] > 0.9) & (df_good_data['size_outside'] < size_outside)],
y = df_good_data['ring_size'][(df_good_data['ring_completeness'] > 0.9) & (df_good_data['size_outside'] < size_outside)],
x_bins=10, ax=ax2, color = 'k', label = 'Real Data')
format_axes(ax,'width')
format_axes(ax2,'size')
# -
| notebooks/lst/calibration/muon_data_mc_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Finding Lane Lines on the Road**
#
# ## 2018-02-22
#
# ### Markus
#
# ---
#
# **Finding Lane Lines on the Road**
#
# The goals / steps of this project are the following:
# * Make a pipeline that finds lane lines on the road
# * Reflect on your work in a written report
#
# ---
#
# ### Reflection
#
# ### 1. Describe your pipeline. As part of the description, explain how you modified the draw_lines() function.
#
# Original image:
# 
#
# Reducing the image data by
#
# 1) applying the color threshold
# 
#
# 2) making it gray scale
# 
#
# 3) perspective mask.
# 
#
# 4) using Canny edge detection to find object/line outlines
# 
#
# 5) with Hough transformation with radial coordinates identify line segmets
# 
#
#
#
# ### 2. Identify potential shortcomings with your current pipeline
#
#
# * Polygon masking is not dynamic. If a camera shifts, side wind, the road curves, there is a vertical changing slope, the masking may fail.
# * Contrast betweeen road and markings might not be there in gray scale.
# * line markings could be obscured by leaves, objects, etc.
#
#
# ### 3. Suggest possible improvements to your pipeline
#
# A possible improvement would be to dynamically adjust
# - image color line detection, color filters etc.
# - multiple runs with different color detection could be done with filters that enhance the markings. (after all there is meaning to the color)
# - finally, one should be aware of line thickness, dashed or solid lines, double lines etc. since there is meaning to that.
# - redefining region of interest based on last frame line findings
# - relative image coordinates to adjust to different size image frames (not an issue in an actual car unless hardware changes)
# - likelihood filter to make sense of the lines. One could only allow so and so much of a change from one frame to the other.
# - instead of fitting the data found in the pipeline to a line, one could model what a line should look like including curves etc. and fit the data to that.
#
# Finally, the video pipeline was developed a bit further to include the solid line fits:
# 
#
#
#
#
| .ipynb_checkpoints/MarkdownTest-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
def init_computer(code, inputs):
return {
'mem': code.copy(),
'mem_size': len(code),
'extend_mem' : {},
'inst': 0,
'rel': 0,
'inputs': inputs.copy(),
'outputs': [],
'halt': False,
'exec_error': False,
'listening': False
}
def read_mem(computer, pos):
if(pos >= computer['mem_size']):
if(pos in computer['extend_mem']):
return computer['extend_mem'][pos]
else:
return 0
else:
return computer['mem'][pos]
def write_mem(computer, pos, val):
if(pos < 0):
print("invalid mem pos %i" % pos)
return
if(pos >= computer['mem_size']):
computer['extend_mem'][pos] = val
else:
computer['mem'][pos] = val
def step(computer):
if(computer['halt']):
return
i = computer['inst']
op_info = {1:3, 2:3, 3:1, 4:1, 5:2, 6:2, 7:3, 8:3, 9:1, 99:0}
op = read_mem(computer, i)
opcode = op % 100
if(not(opcode in op_info)):
print("error unknown opcode %i" % (opcode))
computer['exec_error'] = True
return
a0 = -1
a1 = -1
a2 = -1
jump = False
if(op_info[opcode] > 0):
p_mode = (math.floor(op / 100) % 10)
if( p_mode == 0 ):
#position mode (pointer)
a0 = read_mem(computer, i + 1)
elif( p_mode == 1 ):
#immediate mode (value)
a0 = i + 1
elif( p_mode == 2 ):
#relative mode
a0 = read_mem(computer, i + 1) + computer['rel']
if(op_info[opcode] > 1):
p_mode = (math.floor(op / 1000) % 10)
if( p_mode == 0 ):
#position mode (pointer)
a1 = read_mem(computer, i + 2)
elif( p_mode == 1 ):
#immediate mode (value)
a1 = i + 2
elif( p_mode == 2 ):
#relative mode
a1 = read_mem(computer, i + 2) + computer['rel']
if(op_info[opcode] > 2):
p_mode = (math.floor(op / 10000) % 10)
if( p_mode == 0 ):
#position mode (pointer)
a2 = read_mem(computer, i + 3)
elif( p_mode == 1 ):
#immediate mode (value)
a2 = i + 3
elif( p_mode == 2 ):
#relative mode
a2 = read_mem(computer, i + 3) + computer['rel']
if(opcode == 1):
#add op
write_mem(computer, a2, read_mem(computer, a0) + read_mem(computer, a1))
elif(opcode == 2):
#mult op
write_mem(computer, a2, read_mem(computer, a0) * read_mem(computer, a1))
elif(opcode == 3):
#read op
if(len(computer['inputs']) == 0):
write_mem(computer, a0, -1)
computer['listening'] = True
else:
write_mem(computer, a0, computer['inputs'][0])
computer['inputs'] = computer['inputs'][1:]
elif(opcode == 4):
computer['outputs'].append(read_mem(computer, a0))
elif(opcode == 5):
#jump if true op
if(read_mem(computer, a0) != 0):
jump = True
i = read_mem(computer, a1)
elif(opcode == 6):
#jump if false op
if(read_mem(computer, a0) == 0):
jump = True
i = read_mem(computer, a1)
elif(opcode == 7):
#check less than op
write_mem(computer, a2, 1 if(read_mem(computer, a0) < read_mem(computer, a1)) else 0)
elif(opcode == 8):
#check equals op
write_mem(computer, a2, 1 if(read_mem(computer, a0) == read_mem(computer, a1)) else 0)
elif(opcode == 9):
#change relative param op
computer['rel'] = computer['rel'] + read_mem(computer, a0)
elif(opcode == 99):
#halt op
computer['halt'] = True
if(not(jump)):
i = i + op_info[opcode] + 1
if(i >= computer['mem_size'] and (not(i in computer['extend_mem']))):
print('exiting b/c end of code reached')
computer['exec_error'] = True
computer['inst'] = i
return computer
class Network(object):
def __init__(self, code):
self.timestep = 0
self.computers = []
self.nat = []
self.newnat = False
for i in range (50):
self.computers.append(init_computer(code, [i]))
self.idletime = 0
self.lastnatsendy = None
self.natydup = False
def step(self):
for i in range (50):
step(self.computers[i])
for i in range (50):
if(len(self.computers[i]['outputs']) == 3):
self.idletime = 0
outputs = self.computers[i]['outputs']
if(outputs[0] == 255):
self.nat = outputs[1:]
self.newnat = True
self.computers[i]['listening'] = False
elif(outputs[0] > -1 and outputs[0] < 50):
self.computers[outputs[0]]['inputs'].append(outputs[1])
self.computers[outputs[0]]['inputs'].append(outputs[2])
self.computers[outputs[0]]['listening'] = False
else:
print('invalid packet address: %i from computer %i' % (outputs[0], i))
self.computers[i]['outputs'] = []
listencount = 0
for i in range(50):
if(self.computers[i]['listening']):
listencount = listencount + 1
if(listencount == 50):
self.idletime = self.idletime + 1
if(len(self.nat) > 0):
if(self.idletime > 100):
if(self.lastnatsendy == self.nat[1]):
self.natydup = True
return
self.lastnatsendy = self.nat[1]
self.computers[0]['inputs'].append(self.nat[0])
self.computers[0]['inputs'].append(self.nat[1])
self.computers[0]['listening'] = False
self.nat = []
self.idletime = 0
else:
self.idletime = 0
self.timestep = self.timestep + 1
def run_part1(self):
while(len(self.nat) == 0 and self.timestep < 10000000):
self.step()
if(len(self.nat) > 0):
print ('part 1: output packet sent to 255 at t=%i: x=%i, y=%i' % (self.timestep, self.nat[0], self.nat[1]))
def run_part2(self):
self.lastnat = 0
while(self.timestep < 10000000):
self.step()
if(self.natydup):
print ('part 2: first repeated y detected at t=%i: y=%i' % (self.timestep, self.lastnatsendy))
break
# -
day23code = [3,62,1001,62,11,10,109,2229,105,1,0,1392,1198,1169,736,2060,1136,767,1041,928,1072,1996,1662,1567,994,800,1363,571,2091,1928,1493,2192,705,1695,1800,1596,1464,868,965,1328,1631,1965,835,1262,2029,1101,1433,1829,639,899,1726,2161,1227,672,2122,1899,1526,1763,1295,604,1868,0,0,0,0,0,0,0,0,0,0,0,0,3,64,1008,64,-1,62,1006,62,88,1006,61,170,1106,0,73,3,65,20101,0,64,1,20102,1,66,2,21102,1,105,0,1106,0,436,1201,1,-1,64,1007,64,0,62,1005,62,73,7,64,67,62,1006,62,73,1002,64,2,133,1,133,68,133,102,1,0,62,1001,133,1,140,8,0,65,63,2,63,62,62,1005,62,73,1002,64,2,161,1,161,68,161,1101,1,0,0,1001,161,1,169,102,1,65,0,1102,1,1,61,1102,1,0,63,7,63,67,62,1006,62,203,1002,63,2,194,1,68,194,194,1006,0,73,1001,63,1,63,1105,1,178,21101,0,210,0,105,1,69,2101,0,1,70,1101,0,0,63,7,63,71,62,1006,62,250,1002,63,2,234,1,72,234,234,4,0,101,1,234,240,4,0,4,70,1001,63,1,63,1105,1,218,1105,1,73,109,4,21102,0,1,-3,21101,0,0,-2,20207,-2,67,-1,1206,-1,293,1202,-2,2,283,101,1,283,283,1,68,283,283,22001,0,-3,-3,21201,-2,1,-2,1106,0,263,21202,-3,1,-3,109,-4,2105,1,0,109,4,21102,1,1,-3,21102,0,1,-2,20207,-2,67,-1,1206,-1,342,1202,-2,2,332,101,1,332,332,1,68,332,332,22002,0,-3,-3,21201,-2,1,-2,1106,0,312,22102,1,-3,-3,109,-4,2106,0,0,109,1,101,1,68,358,21002,0,1,1,101,3,68,367,20101,0,0,2,21101,376,0,0,1105,1,436,21201,1,0,0,109,-1,2106,0,0,1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304,8388608,16777216,33554432,67108864,134217728,268435456,536870912,1073741824,2147483648,4294967296,8589934592,17179869184,34359738368,68719476736,137438953472,274877906944,549755813888,1099511627776,2199023255552,4398046511104,8796093022208,17592186044416,35184372088832,70368744177664,140737488355328,281474976710656,562949953421312,1125899906842624,109,8,21202,-6,10,-5,22207,-7,-5,-5,1205,-5,521,21102,1,0,-4,21102,0,1,-3,21101,51,0,-2,21201,-2,-1,-2,1201,-2,385,470,21001,0,0,-1,21202,-3,2,-3,22207,-7,-1,-5,1205,-5,496,21201,-3,1,-3,22102,-1,-1,-5,22201,-7,-5,-7,22207,-3,-6,-5,1205,-5,515,22102,-1,-6,-5,22201,-3,-5,-3,22201,-1,-4,-4,1205,-2,461,1106,0,547,21102,-1,1,-4,21202,-6,-1,-6,21207,-7,0,-5,1205,-5,547,22201,-7,-6,-7,21201,-4,1,-4,1106,0,529,22102,1,-4,-7,109,-8,2106,0,0,109,1,101,1,68,563,21001,0,0,0,109,-1,2105,1,0,1101,100267,0,66,1101,0,2,67,1102,598,1,68,1101,0,302,69,1101,0,1,71,1102,602,1,72,1105,1,73,0,0,0,0,32,91138,1101,51977,0,66,1102,3,1,67,1101,0,631,68,1102,302,1,69,1101,0,1,71,1102,637,1,72,1105,1,73,0,0,0,0,0,0,39,141837,1102,1,20297,66,1101,2,0,67,1101,666,0,68,1102,302,1,69,1102,1,1,71,1102,1,670,72,1105,1,73,0,0,0,0,31,202718,1101,0,30181,66,1102,1,1,67,1102,1,699,68,1102,556,1,69,1101,2,0,71,1101,701,0,72,1105,1,73,1,10,46,194762,45,241828,1102,84659,1,66,1101,1,0,67,1102,1,732,68,1102,556,1,69,1101,0,1,71,1101,0,734,72,1106,0,73,1,-24,41,54983,1101,14563,0,66,1101,0,1,67,1102,763,1,68,1101,556,0,69,1101,0,1,71,1101,0,765,72,1106,0,73,1,43,18,305589,1102,1,27259,66,1101,1,0,67,1101,794,0,68,1102,1,556,69,1101,0,2,71,1101,0,796,72,1105,1,73,1,2053,24,189586,41,164949,1102,23279,1,66,1101,3,0,67,1101,827,0,68,1102,1,302,69,1101,1,0,71,1101,0,833,72,1106,0,73,0,0,0,0,0,0,8,35158,1101,0,101359,66,1102,2,1,67,1101,862,0,68,1101,302,0,69,1101,0,1,71,1102,866,1,72,1105,1,73,0,0,0,0,39,189116,1101,81869,0,66,1102,1,1,67,1102,1,895,68,1102,556,1,69,1101,0,1,71,1102,897,1,72,1106,0,73,1,4127,34,291513,1102,73681,1,66,1102,1,1,67,1101,926,0,68,1101,0,556,69,1102,0,1,71,1101,928,0,72,1105,1,73,1,1053,1101,17579,0,66,1101,4,0,67,1101,0,955,68,1102,253,1,69,1102,1,1,71,1101,0,963,72,1106,0,73,0,0,0,0,0,0,0,0,16,100267,1102,102677,1,66,1101,0,1,67,1102,1,992,68,1101,0,556,69,1101,0,0,71,1101,0,994,72,1105,1,73,1,1635,1102,21481,1,66,1102,1,1,67,1101,0,1021,68,1102,556,1,69,1101,9,0,71,1102,1,1023,72,1105,1,73,1,2,32,45569,24,94793,10,83786,19,92671,11,14891,5,8087,37,20297,45,60457,45,302285,1101,88853,0,66,1102,1,1,67,1102,1068,1,68,1102,556,1,69,1102,1,1,71,1102,1,1070,72,1106,0,73,1,59,20,143578,1101,57793,0,66,1102,1,1,67,1102,1,1099,68,1102,556,1,69,1101,0,0,71,1102,1101,1,72,1106,0,73,1,1975,1102,97171,1,66,1101,0,3,67,1101,0,1128,68,1101,0,302,69,1101,0,1,71,1101,0,1134,72,1106,0,73,0,0,0,0,0,0,8,70316,1102,8087,1,66,1101,0,2,67,1102,1163,1,68,1101,0,302,69,1101,1,0,71,1102,1,1167,72,1105,1,73,0,0,0,0,37,40594,1102,58271,1,66,1101,1,0,67,1102,1196,1,68,1101,0,556,69,1102,1,0,71,1102,1,1198,72,1105,1,73,1,1468,1102,1,87991,66,1102,1,1,67,1102,1225,1,68,1102,556,1,69,1102,0,1,71,1101,1227,0,72,1106,0,73,1,1066,1101,0,54983,66,1101,3,0,67,1102,1,1254,68,1102,302,1,69,1102,1,1,71,1102,1260,1,72,1106,0,73,0,0,0,0,0,0,48,51977,1101,0,45569,66,1102,1,2,67,1102,1289,1,68,1101,302,0,69,1102,1,1,71,1102,1,1293,72,1105,1,73,0,0,0,0,24,284379,1101,87359,0,66,1102,1,2,67,1102,1,1322,68,1101,351,0,69,1102,1,1,71,1102,1326,1,72,1106,0,73,0,0,0,0,255,17599,1102,1,20399,66,1102,1,3,67,1101,0,1355,68,1101,0,302,69,1102,1,1,71,1102,1361,1,72,1105,1,73,0,0,0,0,0,0,39,47279,1102,1,13177,66,1101,1,0,67,1101,1390,0,68,1101,556,0,69,1102,0,1,71,1101,1392,0,72,1105,1,73,1,1410,1102,17599,1,66,1101,0,1,67,1101,0,1419,68,1102,556,1,69,1102,1,6,71,1101,1421,0,72,1105,1,73,1,23694,31,101359,48,103954,48,155931,28,20399,28,40798,28,61197,1101,0,56369,66,1101,0,1,67,1102,1460,1,68,1102,1,556,69,1101,0,1,71,1102,1462,1,72,1106,0,73,1,47163854,10,41893,1101,0,36263,66,1101,0,1,67,1102,1,1491,68,1102,556,1,69,1102,0,1,71,1101,1493,0,72,1105,1,73,1,1357,1102,1,92671,66,1102,2,1,67,1101,1520,0,68,1102,1,302,69,1102,1,1,71,1102,1524,1,72,1106,0,73,0,0,0,0,11,29782,1102,60457,1,66,1101,0,6,67,1102,1,1553,68,1102,302,1,69,1102,1,1,71,1101,1565,0,72,1105,1,73,0,0,0,0,0,0,0,0,0,0,0,0,47,174718,1102,4957,1,66,1102,1,1,67,1101,0,1594,68,1102,556,1,69,1102,0,1,71,1101,0,1596,72,1105,1,73,1,1623,1101,94793,0,66,1101,0,3,67,1102,1,1623,68,1101,0,302,69,1102,1,1,71,1101,1629,0,72,1106,0,73,0,0,0,0,0,0,39,94558,1101,31387,0,66,1102,1,1,67,1102,1,1658,68,1101,0,556,69,1101,0,1,71,1101,0,1660,72,1105,1,73,1,192,14,46558,1101,0,14891,66,1101,0,2,67,1101,0,1689,68,1102,1,302,69,1102,1,1,71,1102,1,1693,72,1106,0,73,0,0,0,0,5,16174,1102,1,46457,66,1102,1,1,67,1101,1722,0,68,1102,556,1,69,1102,1,1,71,1102,1,1724,72,1105,1,73,1,2341,14,69837,1102,47279,1,66,1101,0,4,67,1101,1753,0,68,1101,253,0,69,1102,1,1,71,1102,1761,1,72,1105,1,73,0,0,0,0,0,0,0,0,47,87359,1101,0,97381,66,1101,4,0,67,1101,0,1790,68,1101,0,302,69,1102,1,1,71,1101,0,1798,72,1106,0,73,0,0,0,0,0,0,0,0,45,120914,1102,68597,1,66,1102,1,1,67,1102,1827,1,68,1101,556,0,69,1101,0,0,71,1102,1,1829,72,1105,1,73,1,1465,1101,0,48857,66,1102,1,1,67,1102,1,1856,68,1102,556,1,69,1101,0,5,71,1101,1858,0,72,1106,0,73,1,1,20,215367,18,203726,34,97171,14,23279,41,109966,1101,3463,0,66,1102,1,1,67,1102,1,1895,68,1101,0,556,69,1101,1,0,71,1102,1,1897,72,1106,0,73,1,160,45,362742,1102,1,52181,66,1102,1,1,67,1101,0,1926,68,1101,556,0,69,1102,1,0,71,1101,1928,0,72,1105,1,73,1,1521,1101,0,101863,66,1102,4,1,67,1102,1,1955,68,1102,302,1,69,1102,1,1,71,1101,0,1963,72,1106,0,73,0,0,0,0,0,0,0,0,8,17579,1101,14173,0,66,1101,0,1,67,1102,1992,1,68,1102,1,556,69,1102,1,1,71,1101,0,1994,72,1105,1,73,1,-3,34,194342,1101,0,41893,66,1102,1,2,67,1102,2023,1,68,1102,302,1,69,1101,0,1,71,1102,2027,1,72,1106,0,73,0,0,0,0,19,185342,1101,0,57787,66,1102,1,1,67,1101,2056,0,68,1101,0,556,69,1102,1,1,71,1102,1,2058,72,1106,0,73,1,-209,18,101863,1102,1,66359,66,1102,1,1,67,1101,2087,0,68,1101,556,0,69,1101,1,0,71,1101,2089,0,72,1106,0,73,1,273,20,71789,1101,0,44293,66,1102,1,1,67,1102,2118,1,68,1102,1,556,69,1101,0,1,71,1102,1,2120,72,1106,0,73,1,125,46,292143,1101,0,95561,66,1101,1,0,67,1102,1,2149,68,1102,1,556,69,1102,1,5,71,1101,0,2151,72,1105,1,73,1,5,20,287156,18,407452,46,97381,46,389524,45,181371,1102,32869,1,66,1101,0,1,67,1102,2188,1,68,1102,1,556,69,1102,1,1,71,1101,0,2190,72,1105,1,73,1,128,16,200534,1101,0,71789,66,1101,4,0,67,1101,0,2219,68,1101,302,0,69,1101,0,1,71,1101,2227,0,72,1105,1,73,0,0,0,0,0,0,0,0,8,52737]
network = Network(day23code)
network.run_part1()
network = Network(day23code)
network.run_part2()
| aoc23.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
import nibabel as nb
from scipy.stats import spearmanr, pearsonr
import seaborn as sns
from sklearn.linear_model import LinearRegression
from matplotlib import cm
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import os
import pandas as pd
rootdir='/data1/users/kwagstyl/bigbrain/Hierarchy/'
geodesicdir='/data1/users/kwagstyl/bigbrain/Hierarchy/geodesics/'
figdir='/data1/users/kwagstyl/bigbrain/NeuralNetworks/figs/'
surfdir='/data1/users/kwagstyl/bigbrain/NeuralNetworks/surfdir/'
# +
def beta_confidence_interval(y,x):
"""carry out OLS stats test returning:
B and the 95% confidence interval"""
x2=sm.add_constant(x.copy())
model = sm.OLS(y, x2)
res = model.fit()
ci=res.conf_int(alpha=0.05, cols=None)
return res.params[1], ci[1,0], ci[1,1]
def plot_kde_hemis(r_thick, r_dist,l_thick, l_dist, system):
"""plot kernal density with scatter and marginal plots from seaborn"""
ax = sns.kdeplot(r_thick, r_dist,
cmap="Reds", shade=True, shade_lowest=False,alpha=0.5)
ax = sns.kdeplot(l_thick, l_dist,
cmap="Blues", shade=True, shade_lowest=False,alpha=0.5)
ax = sns.regplot(r_thick, r_dist,lowess=True,scatter=False,color="Red")
ax = sns.regplot(l_thick, l_dist,lowess=True,scatter=False,color="Blue")
r_corr,r_p = pearsonr(r_thick, r_dist)
l_corr,l_p = pearsonr(l_thick, l_dist)
ax.set(xlabel="Geodesic distance \nfrom primary "+system+" area (mm)",
ylabel="Cortical thickness (mm)")
ax.set(yticks=[1.0,2.0,3.0],yticklabels=['1.0','2.0','3.0'])
ax.legend(['right, R='+str(np.round(r_corr,decimals=2))+', p='+str(np.round(r_p,decimals=2)),
'left, R='+str(np.round(l_corr,decimals=2))+', p='+str(np.round(l_p,decimals=2))])
return ax
def plot_kde(y,x,regions, cmap="magma"):
"""plot kernal density with scatter and marginal plots from seaborn"""
g=sns.jointplot(x[regions],y[regions],color=cm.magma(80),cmap=cmap,shade=True, shade_lowest=False,alpha=0.9,kind='kde')
g.plot_joint(plt.scatter, c=cm.magma(50), s=10, linewidth=0, marker=".",alpha=0.1)
g.set_axis_labels("Geodesic distance mm", "Cortical thickness (mm)");
g.plot_joint(sns.regplot,ci=0.95,scatter=False,color=cm.magma(80),lowess=True)
return g
def plot_gradients(Thickness,distances,regions):
"""plot gradients with c.i.s
input geodesic distances, layer thicknesses, region to include."""
beta=np.zeros((6,3))
for i in range(6):
y=Thickness["layer{0}".format(i+1)][regions]
x=distances[regions]
#g=plot_kde(y,x)
#R, P = pearsonr(geodist[regions],Thickness["layer{0}".format(i+1)][regions])
beta[i,:3] = beta_confidence_interval(y,x)
ax=plt.errorbar(beta[:,0],np.arange(6)+1,fmt='.',xerr=beta[:,1]-beta[:,0])
plt.gca().invert_yaxis()
plt.yticks(np.arange(6)+1,['I','II','III','IV','V','VI'])
plt.ylabel('Layers')
plt.xlabel('Thickness gradient')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.plot([0,0],[1,6],color='k')
#stacked bar chart
from scipy import ndimage
def stacked_plot(Thickness,surf_metric, region_mask, n_bins=10, smoothing=0):
"""Plots layer thicknesses against variations in a surface feature.
Thickness - layer thicknesses file
surf_metric - eg geodesic distance, curvature
region_mask - either cortex mask or local mask eg motor
n_bins - number of distance bins"""
distance_bins=np.linspace(np.min(surf_metric[region_mask]),np.max(surf_metric[region_mask]),n_bins+1)
means=np.zeros((6,n_bins))
std=np.zeros((6,n_bins))
indices=[]
for i in range(n_bins):
Region=np.logical_and(np.logical_and(surf_metric>distance_bins[i],surf_metric<distance_bins[i+1]),region_mask)
if np.sum(Region)<50:
break
for n in range(6):
#find labels for each curvature bin
Mean_thickness=np.mean(Thickness["layer{0}".format(n+1)][Region])
std_thickness=np.std(Thickness["layer{0}".format(n+1)][Region])
means[n,i]=Mean_thickness
std[n,i]=std_thickness
indices.append(i)
means=means[:,:i]
if smoothing >0:
means = ndimage.filters.gaussian_filter1d(means,smoothing,axis=1,mode='reflect')
Layers=['Layer I', 'Layer II','Layer III', 'Layer IV', 'Layer V', 'Layer VI']
ind=np.arange(i)
indices=np.array(indices).astype(int)
width=1
colours=[50,200,150,250,200,100]
cmap=plt.get_cmap('Greys')
Plots={}
bottom=np.zeros(i)
for n in 5-np.arange(6):
Plots["p{0}".format(n+1)] = plt.bar(ind, means[n,:], width, color=cmap(colours[n]),bottom=bottom)
bottom+=means[n,:]
plt.ylabel('Cortical thicknes (mm)')
plt.xlabel('Geodesic distance (mm)')
#ticks
ticks=np.round([0,i/4,i/2,3*i/4,i]).astype(int)
labels=np.round(distance_bins[ticks])
plt.xticks(ticks,labels)
#plt.legend((Plots['p1'],Plots['p2'],Plots['p3'],Plots['p4'],Plots['p5'],Plots['p6']),Layers,title='Layer',bbox_to_anchor=(1.28,0.75),loc=5)
def plot_kde_hemis_plus_ve(r_dist, r_thick,l_dist, l_thick, ve_dist,ve_thick, system):
"""plot kernal density with scatter and marginal plots from seaborn"""
ax = sns.kdeplot(r_dist, r_thick,
cmap="Reds", shade=True, shade_lowest=False,alpha=0.5)
ax = sns.kdeplot(l_dist, l_thick,
cmap="Blues", shade=True, shade_lowest=False,alpha=0.5)
ax = sns.regplot(r_dist, r_thick,lowess=True,scatter=False,color="Red")
ax = sns.regplot(l_dist, l_thick,lowess=True,scatter=False,color="Blue")
ax = sns.regplot(ve_dist, ve_thick,scatter=True,color="Gray",line_kws={'alpha':0.6,'linestyle':'dashed'},truncate=True)
#ci=None)
r_corr,r_p = pearsonr(r_dist, r_thick)
l_corr,l_p = pearsonr(l_dist, l_thick)
ve_corr,ve_p = pearsonr(ve_dist, ve_thick)
ax.set(xlabel="Geodesic distance \nfrom primary "+system+" area (mm)",
ylabel="Cortical thickness (mm)")
ax.set(yticks=[1.0,2.0,3.0],yticklabels=['1.0','2.0','3.0'])
ax.legend(['right, R='+str(np.round(r_corr,decimals=2))+', p='+str(np.round(r_p,decimals=2)),
'left, R='+str(np.round(l_corr,decimals=2))+', p='+str(np.round(l_p,decimals=2)),
'von Economo, R='+str(np.round(ve_corr,decimals=2))+', p='+str(np.round(ve_p,decimals=2))])
return ax
# +
#plotting with von Economo
systems = ['visual','somatosensory','auditory','motor']
hemis=['left','right']
font = {'family' : 'sans-serif',
'style':'normal',
'size' : 30}
ve_thickness=pd.read_excel('/data1/users/kwagstyl/bigbrain/NeuralNetworks/von_economo/Thickness_table.xlsx')
tissue='crown_min'
ve_parcellation=np.loadtxt('/data1/users/kwagstyl/bigbrain/NeuralNetworks/von_economo/lh.economo.label.rsled.txt').astype(int)
for system in systems:
print(system)
TotalThickness={}
regions={}
distances={}
tear_mask={}
archi_mask={}
for k,hemi in enumerate(hemis):
TotalThickness[hemi]=np.loadtxt(os.path.join(rootdir,'thickness_'+hemi+'_total.txt'))[:163842]
regions[hemi]=np.loadtxt(os.path.join(geodesicdir,system+'_regions_'+hemi+'.txt')).astype(bool)[:163842]
distances[hemi]=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_'+hemi+'.txt'))[:163842]
tear_mask[hemi]=np.loadtxt(os.path.join(geodesicdir,'tear_mask_'+hemi+'.txt')).astype(bool)[:163842]
archi_mask[hemi]=np.loadtxt(os.path.join(surfdir,'archi_'+hemi+'.txt')).astype(bool)[:163842]
#mask archicortex
regions[hemi]=np.logical_and(regions[hemi],1-archi_mask[hemi])
regions[hemi]=np.logical_and(regions[hemi],1-tear_mask[hemi])
#only including one hemisphere for von economo
ve_regions = np.loadtxt(os.path.join(geodesicdir,system+'_regions_left.txt')).astype(bool)[:163842]
ve_distances=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_left.txt'))[:163842]
rois=np.unique(ve_parcellation[ve_regions])
#filter frontal from rois for somatosensory von economo due to minor misalignments
ve_dist_sys=[]
ve_thick_sys=[]
for roi in rois:
if system=='somatosensory' and ve_thickness.loc[roi,'area_name'] == 'FA' or ve_thickness.loc[roi,'area_name'] == 'FB':
pass
elif system =='motor' and ve_thickness.loc[roi,'area_name'] == 'PA':
pass
elif system =='auditory' and ve_thickness.loc[roi,'area_name'] == 'IB':
pass
elif ve_thickness.loc[roi,tissue]>0 and 'L' not in ve_thickness.loc[roi,'area_name']:
#print(ve_thickness.loc[roi,'area_name'])
ve_dist_sys.append(np.mean(ve_distances[np.logical_and(ve_parcellation==roi,ve_regions)]))
ve_thick_sys.append(ve_thickness.loc[roi,tissue])
plt.figure(figsize=(5,5))
plot_kde_hemis_plus_ve(distances['right'][regions['right']],TotalThickness['right'][regions['right']],
distances['left'][regions['left']],TotalThickness['left'][regions['left']],
ve_dist_sys, ve_thick_sys,
system)
plt.savefig(os.path.join(figdir,system+'total_thickness_both_hemis_plus_ve.pdf'),bbox_inches='tight')
# +
# systems = ['visual','somatosensory','auditory','motor']
# hemis=['left','right']
# for system in systems:
# TotalThickness={}
# regions={}
# distances={}
# tear_mask={}
# archi_mask={}
# for k,hemi in enumerate(hemis):
# TotalThickness[hemi]=np.loadtxt(os.path.join(rootdir,'thickness_'+hemi+'_total.txt'))[:163842]
# regions[hemi]=np.loadtxt(os.path.join(geodesicdir,system+'_regions_'+hemi+'.txt')).astype(bool)[:163842]
# distances[hemi]=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_'+hemi+'.txt'))[:163842]
# tear_mask[hemi]=np.loadtxt(os.path.join(geodesicdir,'tear_mask_'+hemi+'.txt')).astype(bool)[:163842]
# archi_mask[hemi]=np.loadtxt(os.path.join(surfdir,'archi_'+hemi+'.txt')).astype(bool)[:163842]
# #mask archicortex
# regions[hemi]=np.logical_and(regions[hemi],1-archi_mask[hemi])
# regions[hemi]=np.logical_and(regions[hemi],1-tear_mask[hemi])
# plt.figure(figsize=(7,7))
# plot_kde_hemis(distances['right'][regions['right']],TotalThickness['right'][regions['right']],
# distances['left'][regions['left']],TotalThickness['left'][regions['left']],
# system)
# plt.savefig(os.path.join(figdir,system+'total_thickness_both_hemis.pdf'),bbox_inches='tight')
# +
#von Economo gradients
import matplotlib
font = {'family' : 'sans-serif',
'style':'normal',
'size' : 22}
matplotlib.rc('font', **font)
def plot_gradients_stick_ve(Thickness,distances,shift,color='blue',system='visual',normalise=False, hatch=False):
"""plot gradients with c.i.s
input geodesic distances, layer thicknesses, region to include."""
beta=np.zeros((6,3))
for i in range(6):
if normalise==True:
y=Thickness["layer{0}".format(i+1)]/Thickness['total']
else:
y=Thickness["layer{0}".format(i+1)]
x=distances
#g=plot_kde(y,x)
#R, P = pearsonr(geodist[regions],Thickness["layer{0}".format(i+1)][regions])
beta[i,:3] = beta_confidence_interval(y,x)
plt.barh(np.arange(6)+1+shift,beta[:,0],height=0.2,color=color,edgecolor = 'black',
xerr=beta[:,1]-beta[:,0],capsize=3, label=system,hatch=hatch)
systems = ['visual','somatosensory','auditory','motor']
colourmaps = ['Greens','Oranges','Reds','Blues', ]
barWidth = 0.3
colors = [
'#ee7733',
'#cc3311',
'#228833','#4477aa']
include_total=True
plt.figure(figsize=(5,11))
hemis=['right', 'left']
patterns = ['//', '\\\\']
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], color=colors[0], lw=8, label=systems[0]),
Line2D([0], [0], color=colors[1], lw=8, label=systems[1]),
Line2D([0], [0], color=colors[2], lw=8, label=systems[2]),
Line2D([0], [0], color=colors[3], lw=8, label=systems[3]),
Patch(facecolor='white',hatch=patterns[0], edgecolor='black',
label='von Economo'),
]
gray_colours=[50,200,150,250,200,100]
cmap=plt.get_cmap('Greys')
for n in range(6):
plt.axhspan(n+0.5,n+1.5, color=cmap(gray_colours[n]), alpha=0.2, linewidth=0)
ve_thickness=pd.read_excel('/data1/users/kwagstyl/bigbrain/NeuralNetworks/von_economo/Thickness_table.xlsx')
ve_parcellation=np.loadtxt('/data1/users/kwagstyl/bigbrain/NeuralNetworks/von_economo/lh.economo.label.rsled.txt').astype(int)
for k,system in enumerate(systems):
ve_regions = np.loadtxt(os.path.join(geodesicdir,system+'_regions_left.txt')).astype(bool)[:163842]
ve_distances=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_left.txt'))[:163842]
rois=np.unique(ve_parcellation[ve_regions])
h=0
ve_dist_sys=[]
ve_thick_sys={}
ve_thick_sys["total"]=[]
for roi in rois:
#filter bad ROIs
if system=='somatosensory' and ve_thickness.loc[roi,'area_name'] == 'FA' or ve_thickness.loc[roi,'area_name'] == 'FB':
pass
elif system =='motor' and ve_thickness.loc[roi,'area_name'] == 'PA':
pass
elif system =='auditory' and ve_thickness.loc[roi,'area_name'] == 'IB':
pass
elif ve_thickness.loc[roi,'wall']>0 and 'L' not in ve_thickness.loc[roi,'area_name']:
ve_dist_sys.append(np.mean(ve_distances[np.logical_and(ve_parcellation==roi,ve_regions)]))
ve_thick_sys['total'].append(ve_thickness.loc[roi,'crown_min'])
for n in range(6):
if not "layer{0}".format(n+1) in ve_thick_sys.keys():
ve_thick_sys["layer{0}".format(n+1)]=[]
tissue='ve_'+str(n+1)
ve_thick_sys["layer{0}".format(n+1)].append(ve_thickness.loc[roi,tissue])
plot_gradients_stick_ve(ve_thick_sys,ve_dist_sys,shift=k/5.+h/10.-0.35,color=colors[k] ,system=system, hatch=patterns[h])
plt.yticks(np.arange(7)+1,['I','II','III','IV','V','VI'])
plt.ylim([0.7,6.8])
if include_total:
y=ve_thick_sys["total"] #[:1000]
x=ve_dist_sys #[:1000]
beta = beta_confidence_interval(y,x)
plt.barh(k/5.+h/10.-0.35,beta[0],height=0.2,color=colors[k],edgecolor = 'black',
xerr=beta[1]-beta[0],capsize=3,hatch=patterns[h])
plt.yticks(np.arange(7),['Whole\ncortex','I','II','III','IV','V','VI'])
plt.ylim([-0.5,6.5])
plt.ylabel('Cortical layer')
plt.xlabel('Thickness gradient from primary area')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.plot([0,0],[-0.8,6.5],color='k')
plt.legend(handles=legend_elements,loc=(1.201,0.8))
plt.xlim([-0.015,0.05])
#insert gray bars
plt.gca().invert_yaxis()
plt.savefig(os.path.join(figdir,'all_systems_gradients_ve.eps'),bbox_inches='tight')
# +
import matplotlib
font = {'family' : 'sans-serif',
'style':'normal',
'size' : 22}
matplotlib.rc('font', **font)
def plot_gradients_stick(Thickness,distances,regions,shift,color='blue',system=system,normalise=False, hatch=False):
"""plot gradients with c.i.s
input geodesic distances, layer thicknesses, region to include."""
beta=np.zeros((6,3))
for i in range(6):
if normalise==True:
y=Thickness["layer{0}".format(i+1)][regions]/np.mean(Thickness["layer{0}".format(i+1)][regions])
else:
y=Thickness["layer{0}".format(i+1)][regions]
x=distances[regions]
#g=plot_kde(y,x)
#R, P = pearsonr(geodist[regions],Thickness["layer{0}".format(i+1)][regions])
beta[i,:3] = beta_confidence_interval(y,x)
plt.barh(np.arange(6)+1+shift,beta[:,0],height=0.1,color=color,edgecolor = 'black',
xerr=beta[:,1]-beta[:,0],capsize=3, label=system,hatch=hatch)
systems = ['visual','somatosensory','auditory','motor']
colourmaps = ['Greens','Oranges','Reds','Blues', ]
barWidth = 0.3
colors = [
'#ee7733',
'#cc3311',
'#228833','#4477aa']
include_total=True
plt.figure(figsize=(10,11))
hemis=['right', 'left']
patterns = ['', '\\\\']
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], color=colors[0], lw=8, label=systems[0]),
Line2D([0], [0], color=colors[1], lw=8, label=systems[1]),
Line2D([0], [0], color=colors[2], lw=8, label=systems[2]),
Line2D([0], [0], color=colors[3], lw=8, label=systems[3]),
Patch(facecolor='white',hatch=patterns[0], edgecolor='black',
label=hemis[0]),
Patch(facecolor='white',hatch=patterns[1], edgecolor='black',
label=hemis[1])]
gray_colours=[50,200,150,250,200,100]
cmap=plt.get_cmap('Greys')
for n in range(6):
plt.axhspan(n+0.5,n+1.5, color=cmap(gray_colours[n]), alpha=0.2, linewidth=0)
for h,hemic in enumerate(hemis):
TotalThickness=np.loadtxt(os.path.join(rootdir,'thickness_'+hemic+'_total.txt'))[:163842]
Thickness={}
for n in range(6):
Thickness["layer{0}".format(n+1)]=np.loadtxt(os.path.join(rootdir,'thickness_'+hemic+'_layer'+str(n+1)+'.txt'))[:163842]
for k,system in enumerate(systems):
regions=np.loadtxt(os.path.join(geodesicdir,system+'_regions_'+hemic+'.txt')).astype(bool)[:163842]
archi_mask=np.loadtxt(os.path.join(surfdir,'archi_'+hemic+'.txt')).astype(bool)[:163842]
regions=np.logical_and(regions,1-archi_mask)
mask=np.loadtxt(os.path.join(geodesicdir,'tear_mask_'+hemic+'.txt')).astype(bool)[:163842]
distances=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_'+hemic+'.txt'))[:163842]
regions=np.logical_and(regions,1-mask)
#if system == 'auditory' :
# regions=np.logical_and(regions,1-mask)
# regions = np.logical_and(distances <30,regions)
#elif system == 'somatosensory':
# regions = np.logical_and(distances <51,regions)
#plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', yerr=yer1, capsize=7, label=system)
plot_gradients_stick(Thickness,distances,regions,shift=k/5.+h/10.-0.35,color=colors[k] ,system=system, hatch=patterns[h])
plt.yticks(np.arange(7)+1,['I','II','III','IV','V','VI'])
plt.ylim([0.7,6.8])
if include_total:
y=TotalThickness #[:1000]
x=distances #[:1000]
beta = beta_confidence_interval(y[regions],x[regions])
plt.barh(k/5.+h/10.-0.35,beta[0],height=0.1,color=colors[k],edgecolor = 'black',
xerr=beta[1]-beta[0],capsize=3,hatch=patterns[h])
plt.yticks(np.arange(7),['Whole\ncortex','I','II','III','IV','V','VI'])
plt.ylim([-0.5,6.5])
plt.ylabel('Cortical layer')
plt.xlabel('Thickness gradient from primary area')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.plot([0,0],[-0.8,6.5],color='k')
plt.legend(handles=legend_elements,loc=(1.201,0.8))
#insert gray bars
plt.gca().invert_yaxis()
plt.savefig(os.path.join(figdir,'all_systems_gradients.eps'),bbox_inches='tight')
# -
betas={}
for h,hemi in enumerate(hemis):
betas[hemi]={}
TotalThickness=np.loadtxt(os.path.join(rootdir,'thickness_'+hemi+'_layer3.txt'))[:163482]
for k,system in enumerate(systems):
regions=np.loadtxt(os.path.join(geodesicdir,system+'_regions_'+hemi+'.txt')).astype(bool)[:163482]
#regions=np.where(regions==1)[0]
distances=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_'+hemi+'.txt'))[:163482]
#Thickness=np.loadtxt(os.path.join(rootdir,'thickness_'+hemic+'_layer'+str(k+1)+'.txt'))[:163482]
betalist=[]
for w in np.arange(np.max(distances[regions])-10):
window=np.logical_and(regions, np.logical_and(distances>=w,distances <w+10))
beta = beta_confidence_interval(TotalThickness[window],distances[window])
betalist.append(beta[0])
betas[hemi][system]=betalist
systems = ['motor','visual','somatosensory','auditory']
for system in systems:
regions=np.loadtxt(os.path.join(geodesicdir,system+'_regions_'+hemic+'.txt')).astype(bool)[:163842]
#regions=np.where(regions==1)[0]
distances=np.loadtxt(os.path.join(geodesicdir,system+'_geodesic_distances_'+hemic+'.txt'))[:163842]
#if system == 'auditory':
# regions = np.logical_and(distances <26, regions)
plt.figure()
plot_kde(TotalThickness,distances,regions)
#plt.savefig(system+'total_thickness.pdf',bbox_inches='tight')
plt.figure()
stacked_plot(Thickness,distances, regions,n_bins=30, smoothing = 5)
plt.title(system)
#plt.savefig(os.path.join(figdir,system+'stacked_layers.pdf'),bbox_inches='tight')
# plt.figure()
# plot_gradients(Thickness,distances,regions)
# plt.title(system)
# plt.savefig(system+'layer_gradients.pdf',bbox_inches='tight')
from scipy.ndimage import filters
grad=np.diff(w[:,1])
sm_grad=filters.gaussian_filter1d(grad,sigma=10)
plt.plot(sm_grad)
w[np.where(np.diff(np.sign(sm_grad)))[0],0]
plt.vlines(np.where(np.diff(np.sign(sm_grad))<0)[0],ymin=-0.00001,ymax=0.00001)
| scripts/notebooks/Allsystems_gradients_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocessing
import numpy as np
import matplotlib.pyplot as plt
% matplotlib inline
plt.rcParams["figure.dpi"] = 200
from sklearn.datasets import load_boston
boston = load_boston()
from sklearn.model_selection import train_test_split
X, y = boston.data, boston.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0)
print(boston.DESCR)
fig, axes = plt.subplots(3, 5, figsize=(20, 10))
for i, ax in enumerate(axes.ravel()):
if i > 12:
ax.set_visible(False)
continue
ax.plot(X[:, i], y, 'o', alpha=.5)
ax.set_title("{}: {}".format(i, boston.feature_names[i]))
ax.set_ylabel("MEDV")
plt.boxplot(X)
plt.xticks(np.arange(1, X.shape[1] + 1),
boston.feature_names, rotation=30, ha="right")
plt.ylabel("MEDV")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsRegressor
scores = cross_val_score(KNeighborsRegressor(),
X_train, y_train, cv=10)
np.mean(scores), np.std(scores)
from sklearn.neighbors import KNeighborsRegressor
scores = cross_val_score(KNeighborsRegressor(),
X_train_scaled, y_train, cv=10)
np.mean(scores), np.std(scores)
from sklearn.ensemble import RandomForestRegressor
scores = cross_val_score(RandomForestRegressor(n_estimators=100, random_state=0),
X_train_scaled, y_train, cv=10)
np.mean(scores), np.std(scores)
from sklearn.ensemble import RandomForestRegressor
scores = cross_val_score(RandomForestRegressor(n_estimators=100, random_state=0),
X_train, y_train, cv=10)
np.mean(scores), np.std(scores)
# # Categorical Variables
import pandas as pd
df = pd.DataFrame({'salary': [103, 89, 142, 54, 63, 219],
'boro': ['Manhatten', 'Queens', 'Manhatten', 'Brooklyn', 'Brooklyn', 'Bronx']})
df
pd.get_dummies(df)
df = pd.DataFrame({'salary': [103, 89, 142, 54, 63, 219],
'boro': [0, 1, 0, 2, 2, 3]})
df
pd.get_dummies(df)
pd.get_dummies(df, columns=['boro'])
# ### Ensuring consistent encoding
# +
df = pd.DataFrame({'salary': [103, 89, 142, 54, 63, 219],
'boro': ['Manhatten', 'Queens', 'Manhatten', 'Brooklyn', 'Brooklyn', 'Bronx']})
df['boro'] = df['boro'].astype("category",
categories=['Manhatten', 'Queens', 'Brooklyn', 'Bronx', 'Staten Island'])
df
# -
pd.get_dummies(df)
# # Exercise
# Apply dummy encoding and scaling to the "adult" dataset consisting of income data from the census.
#
# Bonus: visualize the data.
data = pd.read_csv("data/adult.csv", index_col=0)
# +
# # %load solutions/load_adult.py
# -
| notebooks/04 - Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''zs_py39'': conda)'
# metadata:
# interpreter:
# hash: acea1e9fb1ca687a228f6dc71ee62aa15fcb20ac41dec3d8a9e155f35234403c
# name: python3
# ---
# # Application for similarity
import sys
sys.path.append("..")
sys.path.append("../../")
import paddle
import paddlenlp
from paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer
# Initialize the model and tokenizer
# +
MODEL_NAME = "ernie-1.0"
model = ErnieForSequenceClassification.from_pretrained(MODEL_NAME, num_classes=2)
tokenizer = ErnieTokenizer.from_pretrained(MODEL_NAME)
# -
# load model paramerters
# +
from paddlenlp.datasets import load_dataset
DATASET_NAME = 'lcqmc'
train_ds, dev_ds, test_ds = load_dataset(DATASET_NAME, splits=["train", "dev", "test"])
# Load the trained model.
# !wget --no-check-certificate -c https://trustai.bj.bcebos.com/lcqmc-ernie-1.0.tar
# !tar -xvf ./lcqmc-ernie-1.0.tar -C ../assets/
# !rm ./lcqmc-ernie-1.0.tar
state_dict = paddle.load(f'../assets/{DATASET_NAME}-{MODEL_NAME}/model_state.pdparams')
model.set_dict(state_dict)
# + tags=[]
from assets.utils import predict
label_map = {0 : 'negative', 1 : 'positive'}
true_labels = [1, 1, 0]
batch_size = 32
predict_results = predict(model, dev_ds, tokenizer, label_map, batch_size=batch_size)
count = 0
right = 0
for idx, example in enumerate(dev_ds):
count += 1
if label_map[example['label']] == predict_results[idx]:
right += 1
print('data size:', count)
print('acc:', right / count)
# -
# ## Prepare for Interpretations
# +
from trustai.interpretation.token_level import IntGradInterpreter
import numpy as np
from assets.utils import convert_example, load_data
from paddlenlp.data import Stack, Tuple, Pad
def preprocess_fn(data):
examples = []
if not isinstance(data, list):
data = [data]
for text in data:
input_ids, segment_ids = convert_example(text, tokenizer, max_seq_length=128, is_test=True)
examples.append((input_ids, segment_ids))
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input id
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment id
): fn(samples)
input_ids, segment_ids = batchify_fn(examples)
return paddle.to_tensor(input_ids, stop_gradient=False), paddle.to_tensor(segment_ids, stop_gradient=False)
# -
# ## IG Interpreter
# This process will take some time.
# +
from trustai.interpretation.token_level import IntGradInterpreter
interp_results = []
ig = IntGradInterpreter(model, device="gpu")
for idx, example in enumerate(dev_ds):
if idx % 1000 == 0:
print(idx)
interp_results += ig(preprocess_fn(example), steps=50)
# -
# ## Calculate sentence pair map scores.
# + tags=[]
from trustai.interpretation.token_level.common import get_rationales_and_non_ratioanles
from trustai.evaluation import Evaluator
evaluator = Evaluator()
map_scores = []
for idx, example in enumerate(dev_ds):
text_a, text_b = example['query'], example['title']
# get subword
subwords_a = tokenizer.tokenize(text_a)
subwords_b = tokenizer.tokenize(text_b)
# calculate attributions individually
attributions = interp_results[idx].attributions
attributions_a = attributions[1:len(subwords_a) + 1]
attributions_b = attributions[len(subwords_a) + 2:len(subwords_a) + len(subwords_b) + 2]
# sorted subword by attributions
sorted_tokens_a = [subwords_a[i] for i in sorted(range(len(subwords_a)), key=lambda j : attributions_a[j], reverse=False)]
sorted_tokens_b = [subwords_b[i] for i in sorted(range(len(subwords_b)), key=lambda j : attributions_b[j], reverse=False)]
# map score
map_score_a = evaluator._calc_map_by_bin(sorted_tokens_a, sorted_tokens_b)
map_score_b = evaluator._calc_map_by_bin(sorted_tokens_b, sorted_tokens_a)
map_scores.append((map_score_a + map_score_b) / 2)
print("map_scores mean:", np.mean(map_scores))
print("map_scores median:", np.median(map_scores))
print("map_scores min:", np.min(map_scores))
print("map_scores max:", np.max(map_scores))
# -
# Filter the data under different thresholds and calculate the accuracy
count = 0
right = 0
for i in np.linspace(0, 1, 11):
count = 0
right = 0
for idx, example in enumerate(dev_ds):
if predict_results[idx] == 'positive' and map_scores[idx] <= i:
count += 1
if label_map[example['label']] == predict_results[idx]:
right += 1
print("thresholds:", i, "data size:", count, "acc:", right / count if count != 0 else 1)
| tutorials/application/zh-similarity-application.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="H44FbWRlFIm5"
# # Using Neural Networks to solve Ordinary Diferential Equations
#
#
# + [markdown] id="jqGB_5d6u4Ar"
# Tne notebook partially borrowed from https://towardsdatascience.com/using-neural-networks-to-solve-ordinary-differential-equations-a7806de99cdd
# + [markdown] id="nbE72ZspcTeO"
# ## Intro
#
# The idea of solving an ODE using a Neural Network was firstly described by [Lagaris et al.](https://arxiv.org/abs/physics/9705023)
# The insight behind it is basically training a neural network in order to his solution satisfies the conditions required by a differential equation. In other words, we need to find a function whose derivative satisfies the ODE conditions. In this article, we will be going through the underlying mathematical foundations of this concept and then we will implement it using TensorFlow 2.0.
# + [markdown] id="dcq6Lqwfcf6R"
# ## Mathematical Foundations
#
# Let's say we have an ODE system, given by:
#
# $$u' = f(u,t) , t \in [0,1]$$
# $$u(0)=u_0$$
#
# Hence, we can understand the differential operation as a function on the domain $t$ with a known initial condition $u(0)=u_0$.
#
# As we know, Neural Networks are known as universal approximators. We will take advantage of this property of Neural Networks to use them to approximate the solution of the given ODE:
#
# $$NN(t) \approx u(t)$$
#
# Also, we may agree that the derivative of $NN(t)$ will give us a similar equation:
#
# $$NN'(t) = f(NN(t),t)$$
#
# So, if $NN(t)$ is really close to the true solution, then we could say that its derivate is also close to the derivative of the true solution, i.e.:
#
# $$NN'(t) \approx f(u,t) , t \in t$$.
#
# Thus, we can turn this condition into our loss function. We have the given derivative function $f(u,t)$ and we can calculate the Neural Network derivative $NN'(t)$ at each step. This motivates the following loss function (which is the mean squared error of the two values):
#
# $$L = \sqrt{\sum_i \left(\frac{dNN(t_i)}{dt} - f(u,t_i) \right)^2}$$
#
# You may remember the initial condition, we still need to handle that. The most straight-forward way would do this by adding an initial condition term to the cost function. It would look like this:
#
# $$L = \sqrt{(NN(0) - u_0)^2} + \sqrt{\sum_i \left(\frac{dNN(t_i)}{dt} - f(u,t_i) \right)^2}$$
#
# While that would work, it may not be the best approach. We all know the crucial importance of the loss function on the training of the Neural Network, and we also know that the number of terms on this function will impact directly the stability of our training. More terms on the loss function would (usually) imply unstable training.
# To avoid it, we can encode the initial condition into the loss in a more efficient way. Let's define a new function and use it instead of directly using the neural network:
#
# $$g(t) = u_0 + tNN(t)$$
#
# It's easy to see that $g(t)$ will always satisfy the initial condition, since $g(0)$ will lead to $tNN(t) = 0$, leaving just the initial condition on the expression($u_t$). This way, we can train $g(t)$ to satisfy the ODE system instead of the Neural Network. Then, it will automatically be a solution to
# the derivative function. We can incorporate this new idea into our loss function:
#
# $$L = \sqrt{\sum_i \left(\frac{dg(t_i)}{dt} - f(u(t_i),t_i) \right)^2}$$
# + [markdown] id="40INEDo14LXS"
# ## Implementation
#
# We are about to implement the described method in python using the TensorFlow library. In order to have a better understanding of the method, we will use a low-level design, avoiding a number of possible optimizations provided by the library. Our focus, at this moment, is to clearly understand and implement the ODE-solver Neural Network. For this reason, we will also choose a simply ODE:
#
# $$u'= 2x$$
# $$u(0)=1$$
#
# We can easily solve this problem by integrating both sides of the solution, leading to $u + C = x^2 +C$, and after fitting $C$ to obey the initial condition
# we have $u=x^2 + 1$. Nevertheless, instead of solving it analytically, let's try to solve using Neural Nets.
#
# $$u'= 2x$$
# $$ \int u' \,dx = \int 2x \,dx $$
# $$ u + C_1 = x^2 + C_2 $$
# $$ u = x^2 + C$$
# + [markdown] id="ZXjijxa49zXI"
# ### Define Variables
# + id="N5nYh-mG5o_b"
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# + id="5M126csrMRHM"
# initial condition
f0 = 1
# infinitesimal small number
inf_s = np.sqrt(np.finfo(np.float32).eps)
# Parameters
learning_rate = 0.01
training_steps = 1000
batch_size = 100
display_step = training_steps/10
# Network Parameters
n_input = 1 # input layer number of neurons
n_hidden_1 = 32 # 1st layer number of neurons
n_hidden_2 = 32 # 2nd layer number of neurons
n_output = 1 # output layer number of neurons
weights = {
'h1': tf.Variable(tf.random.normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random.normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random.normal([n_hidden_2, n_output]))
}
biases = {
'b1': tf.Variable(tf.random.normal([n_hidden_1])),
'b2': tf.Variable(tf.random.normal([n_hidden_2])),
'out': tf.Variable(tf.random.normal([n_output]))
}
# Stochastic gradient descent optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# + id="TQC1eiisMR9H"
# Create model
def multilayer_perceptron(x):
x = np.array([[[x]]], dtype='float32')
# Hidden fully connected layer with 32 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
# Hidden fully connected layer with 32 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
# Output fully connected layer
output = tf.matmul(layer_2, weights['out']) + biases['out']
return tf.nn.sigmoid(output)
# Universal Approximator
def g(x):
return x * multilayer_perceptron(x) + f0
# Given EDO
def f(x):
return 2*x
# Custom loss function to approximate the derivatives
def custom_loss():
summation = []
for x in np.linspace(0,1,10):
dNN = (g(x+inf_s)-g(x))/inf_s
summation.append((dNN - f(x))**2)
return tf.reduce_sum(tf.abs(summation))
# return tf.sqrt(tf.reduce_mean(tf.abs(summation)))
# + [markdown] id="Ame5hJrbiusy"
# $$\frac{dg(x)}{x} = \frac{g(x+n)-g(x)}{n},$$
# $$lim_{n\to-\infty}$$
# + id="PCOL8GKbM3K5"
def train_step():
with tf.GradientTape() as tape:
loss = custom_loss()
trainable_variables = list(weights.values()) + list(biases.values())
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
# + colab={"base_uri": "https://localhost:8080/"} id="RCwEn5gSOWEF" outputId="90e60a10-dc46-40d6-91b7-deabd5770030"
for i in range(training_steps):
train_step()
if i % display_step == 0:
print("loss: %f " % (custom_loss()))
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="yHwDFJUJVPwq" outputId="9ff6c6a6-5c6e-4cce-a3eb-ba77e7e9842d"
from matplotlib.pyplot import figure
figure(figsize=(10,10))
# True Solution (found analitically)
def true_solution(x):
return x**2 + 1
X = np.linspace(0, 1, 100)
result = []
for i in X:
# result.append(f(i))
result.append(g(i).numpy()[0][0][0])
S = true_solution(X)
plt.plot(X, S, label="Original Function")
plt.plot(X, result, label="Neural Net Approximation")
plt.legend(loc=2, prop={'size': 20})
plt.show()
# + id="TgYqfcg2Jghx"
def ode_solve(z0, t0, t1, f, h):
"""
Простейший метод эволюции ОДУ - метод Эйлера
"""
# h_max = 0.05
# n_steps = math.ceil((abs(t1 - t0)/h_max).max().item())
# h = (t1 - t0)/n_steps
n_steps = int((t1 - t0)/h)
t = t0
z = z0
z_hist = [z0]
for i_step in range(n_steps):
z = z + h * f(z, t)
t = t + h
z_hist += [z]
return z_hist
# + id="zkMN6CteJlbt"
def f(z, t):
return 2*t
y = ode_solve(1, 0, 1, f, 0.05)
x = [i/len(y) for i in range(len(y))]
# + id="nWmFSRelamti"
def G(t, u):
return f(u, t)
def rk4step(tau, tn, un, G):
k1 = G(tn, un)
k2 = G(tn + 0.5*tau, un + 0.5*tau*k1)
k3 = G(tn + 0.5*tau, un + 0.5*tau*k2)
k4 = G(tn + tau, un + tau*k3)
return un + tau * (k1 + 2*k2 + 2*k3 + k4) / 6.0
def rk4(N):
T = 1.0
tau = T / N
u = np.zeros(N+1)
u[0] = 1
for n in range(N):
u[n+1] = rk4step(tau, tau*n, u[n], G)
return u
N = 100
u = rk4(N)
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="bu8A0BrCbKYD" outputId="442116f7-1348-404c-e445-a2c829a6d52f"
plt.figure(figsize=(10, 10))
plt.plot(X, S, label="Original Function")
plt.plot(X, result, label="Neural Net Approximation")
plt.plot(x, y, label="Euler")
plt.plot(np.linspace(0, 1, N+1), u, label='RK4')
plt.legend(loc=2, prop={'size': 10})
plt.show()
# + [markdown] id="D8genk-D8RZK"
# ## Tensorflow.GradientTape instead of Manually computing the finite difference
#
# The same code, but now we are using `tf.GradientTape` to automatically take the derivative for us:
# + colab={"base_uri": "https://localhost:8080/", "height": 799} id="wkctSWX18Et-" outputId="11e20344-520d-49d9-fcbd-e5388ab97a3a"
weights = {
'h1': tf.Variable(tf.random.normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random.normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random.normal([n_hidden_2, n_output]))
}
biases = {
'b1': tf.Variable(tf.random.normal([n_hidden_1])),
'b2': tf.Variable(tf.random.normal([n_hidden_2])),
'out': tf.Variable(tf.random.normal([n_output]))
}
def train_step_t():
X = np.linspace(0,1,10)
with tf.GradientTape(persistent=True) as tape:
summation = []
for x in X:
x = tf.constant([[[x]]], dtype='float32')
tape.watch(x)
g_x = x * multilayer_perceptron(x) + f0
dNN = tape.gradient(g_x, x)
summation.append(dNN - f(x))
loss = tf.reduce_sum(tf.abs(summation))
trainable_variables = list(weights.values()) + list(biases.values())
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
for i in range(training_steps):
train_step_t()
if i % display_step == 0:
print("loss: %f " % (custom_loss()))
figure(figsize=(10,10))
# True Solution (found analitically)
def true_solution(x):
return x**2 + 1
X = np.linspace(0, 1, 100)
result = []
result2 = []
for i in X:
# result.append(f(i))
result.append(g(i).numpy()[0][0][0])
S = true_solution(X)
plt.plot(X, S, label="Original Function")
plt.plot(X, result, label="Neural Net Approximation")
plt.legend(loc=2, prop={'size': 20})
plt.show()
| sections/Kurdyukova2021Lab3/code/ODE_NN (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Autotranslation: Python to JavaScript and D3
#
# Generate a random graph with Python, then visualize it with a [D3](http://d3js.org/) interactive, force-directed graph.
#
# The first cell imports the BeakerX package and initializes the runtime.
#
# Then we generates the graph (one made of nodes and edges, like a social network graph)
# and store it in the BeakerX object.
#
# Then we load D3 and set its styles.
#
# Finally, a JavaScript cell gets the data from the BeakerX object and renders it with D3.
#
# This final cell was
# copied almost verbatim from the [D3 documentation](http://bl.ocks.org/mbostock/4062045). Other D3 examples
# should be similarly easy to get working in BeakerX.
from beakerx.object import beakerx
# +
from random import randrange
import math
nnodes = 100
nodes = []
links = []
for i in range(0, nnodes):
nodes.append({"name": str(i), "group": int(i*7/nnodes)})
for i in range(0, int(nnodes*1.15)):
source = i % nnodes
target = int(math.log(1 + randrange(nnodes), 1.3))
value = 10.0 / (1 + abs(source - target))
links.append({"source": source, "target": target, "value": value * value})
beakerx.graph = {"nodes":nodes, "links":links}
# + language="javascript"
# require.config({
# paths: {
# d3: '//cdnjs.cloudflare.com/ajax/libs/d3/4.9.1/d3.min'
# }});
# + language="html"
# <style>
# .node {
# stroke: #fff;
# stroke-width: 1.5px;
# }
#
# .link {
# stroke: #999;
# stroke-opacity: .6;
# }
# </style>
# + language="javascript"
#
# beakerx.displayHTML(element, '<div id="fdg"></div>');
#
# var graph = beakerx.graph
#
# var d3 = require(['d3'], function (d3) {
#
# var width = 600,
# height = 500;
#
# var color = d3.scaleOrdinal(d3.schemeCategory20);
#
# var simulation = d3.forceSimulation()
# .force("link", d3.forceLink().distance(30))
# .force("charge", d3.forceManyBody().strength(-200))
# .force("center", d3.forceCenter(width / 2, height / 2))
# .force("y", d3.forceY(width / 2).strength(0.3))
# .force("x", d3.forceX(height / 2).strength(0.3));
#
# var svg = d3.select("#fdg")
# .append("svg")
# .attr("width", width)
# .attr("height", height)
# .attr("transform", "translate("+[100, 0]+")");
#
# simulation
# .nodes(graph.nodes)
# .force("link")
# .links(graph.links);
#
# var link = svg.selectAll(".link")
# .data(graph.links)
# .enter().append("line")
# .attr("class", "link")
# .style("stroke-width", function(d) { return Math.sqrt(d.value); });
#
# var node = svg.selectAll(".node")
# .data(graph.nodes)
# .enter().append("circle")
# .attr("class", "node")
# .attr("r", 10)
# .style("fill", function(d) { return color(d.group); });
#
# node.append("title")
# .text(function(d) { return d.name; });
#
# simulation.on("tick", function() {
# link.attr("x1", function(d) { return d.source.x; })
# .attr("y1", function(d) { return d.source.y; })
# .attr("x2", function(d) { return d.target.x; })
# .attr("y2", function(d) { return d.target.y; });
#
# node.attr("cx", function(d) { return d.x; })
# .attr("cy", function(d) { return d.y; });
# });
#
# node.call(d3.drag()
# .on("start", dragstarted)
# .on("drag", dragged)
# .on("end", dragended)
# );
#
# function dragstarted(d) {
# if (!d3.event.active) simulation.alphaTarget(0.3).restart();
# d.fx = d.x;
# d.fy = d.y;
# }
#
# function dragged(d) {
# d.fx = d3.event.x;
# d.fy = d3.event.y;
# }
#
# function dragended(d) {
# if (!d3.event.active) simulation.alphaTarget(0);
# d.fx = null;
# d.fy = null;
# }
# });
# -
| doc/AutoTranslation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
# %matplotlib inline
# reading json with important fit hyperparameters
with open(os.path.join('..', 'fit_config.json'), 'r') as json_file:
config = json.load(json_file)
train_set = pd.read_csv(os.path.join('..', 'data', 'train.csv'))
test_set = pd.read_csv(os.path.join('..', 'data', 'test.csv'))
print('Train set shape:', train_set.shape)
print('Test set shape:', test_set.shape)
set(train_set.columns[train_set.columns != 'year_group']) == set(test_set.columns)
# ##### can make a concluision, that test set has the same feature columns as train set. "year_group" is response variable, "unique_num" is row identifier, while all the rest columns are feature vectors
train_set.head()
# test_set.head()
# ##### column names are encoded, so it will not be possible to provide some feature engineering based on their names - only by their values
# ### Check missing values
for col_name in train_set.columns:
if any(train_set[col_name].isnull()):
print(col_name)
# #### No missing values, so no need to fix them during modeling
# ### Work with categorical variables
cat_features = []
for col_name in train_set.columns[train_set.columns != 'year_group']:
if train_set[col_name].nunique() < config['cat_feature_unique'] or type(train_set[col_name]) == str:
cat_features.append(col_name)
print("Number of categorical features detected:", len(cat_features))
# ##### Almost half of the features in dataset are categorical due to our hyperparameters, so handling categorical variables will be essential for modeling
# ### Now we can check correlation between continuous variables
df = train_set.drop(cat_features + ['unique_num', 'year_group'], axis=1)
df.head()
corr_matrix = df.corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than config threshhold
to_drop = [column for column in upper.columns if any(upper[column] > config['correlation_drop_coef'])]
print('Some features we need to drop due to high Pearson correlation coefficient:', to_drop)
# ### Let's now visualize some distributions and build some plots for retrieving useful insights
sns.distplot(train_set.year_group)
train_set.year_group.value_counts()
# ##### As one can see, class '1' has much more less observations than the rest, so we should mention this while modeling: subsample/upsample classes' observations while building model, take it into account while constructing train/val sets etc
| notebooks/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Challenge
#
# Another approach to identifying fraudulent transactions is to look for outliers in the data. Standard deviation or quartiles are often used to detect outliers. Using this starter notebook, code two Python functions:
#
# * One that uses standard deviation to identify anomalies for any cardholder.
#
# * Another that uses interquartile range to identify anomalies for any cardholder.
#
# ## Identifying Outliers using Standard Deviation
# +
# Initial imports
import pandas as pd
import numpy as np
import random
from sqlalchemy import create_engine
# -
# Create a connection to the database
# engine = create_engine("postgresql://postgres:postgres@localhost:5432/fraud_detection")
engine = create_engine("postgresql://postgres:postgres@localhost:5432/HW7")
def get_customer_transaction (in_custid='all'):
#connect to database and retrieve customers transactions
#get customer data
# if in_custid = all, then retun all customers
# otherwise return customer with matching id
#read the data all card from database
# put this in try
query = """
SELECT cc.cardholder_id as cardholder, tr.date as hour, tr.amount
FROM credit_card cc
Left join transaction tr
ON (cc.card = tr.card);
"""
all_card_df = pd.read_sql(query, engine)
if in_custid == 'all':
return all_card_df
elif in_custid in range (all_card_df['cardholder'].min(), all_card_df['cardholder'].max()):
# get matching id if it is in valid range.
card_hold_df = all_card_df[all_card_df['cardholder'] == in_custid]
return card_hold_df
else:
# empty datafram
return (pd.DataFrame())
# do delete
# query = """
# SELECT cc.cardholder_id as cardholder, tr.date as hour, tr.amount
# FROM credit_card cc
# Left join transaction tr
# ON (cc.card = tr.card)
# where cc.cardholder_id = 2 ;
# """
# card_hold_df = pd.read_sql(query, engine)
# +
# to delete
cust_df = get_customer_transaction ()
mean_cust = cust_df['amount'].mean()
std_cust = cust_df['amount'].std()
outlier_df = cust_df.loc[(cust_df['amount'] > (mean_cust + 2 * std_cust)) | (cust_df['amount'] < (mean_cust - 2 * std_cust))]
outlier_df
cust_df['cardholder'].min()
# +
# Write function that locates outliers using standard deviation
def find_anamoly (in_cust_id):
# in_cardholder is the card holder id
# function to make SQL sql query
# retrieve transactions for that user
# compute outliers for that customer.
# read the data from daabase.
# df.loc[(df['column_name'] >= A) & (df['column_name'] <= B)]
cust_df = get_customer_transaction (in_cust_id)
#find mean and mean and standard deve.
mean_cust = cust_df['amount'].mean()
std_cust = cust_df['amount'].std()
#find anamolie
outlier_df = cust_df.loc[(cust_df['amount'] > (mean_cust + 2 * std_cust)) | (cust_df['amount'] < (mean_cust - 2 * std_cust))]
return outlier_df
# -
# ### to find three random card holder
# #### find maximum and minimum card holder number
# #### generate 3 random numumber in range
# #### find get anamoly in that range
#
# +
# Find anomalous transactions for 3 random card holders
all_card_holder_df = get_customer_transaction()
# get random sample in range of minumum cardholder id, and max cardholder id
card_holder_sample = random.sample(range(all_card_holder_df['cardholder'].min(), all_card_holder_df['cardholder'].max() +1), k=3)
#loop thrugh range and print results
#perhaps I could write a different function to work the data frame instead??
#function already exists, so goal is to finish homework :)
for car_holder in card_holder_sample:
print (f"anamolies for cardhoder {car_holder}\n")
print(find_anamoly(car_holder))
# +
# delete debug code.
# outlier_df = find_anamoly(18)
# outlier_df
# -
# delete
# Find anomalous transactions for 3 random card holders
# ## Identifying Outliers Using Interquartile Range
# +
# Write a function that locates outliers using interquartile range
# q25, q75 = percentile(data, 25), percentile(data, 75)
def find_int_quart_anamoly (in_cust_id):
# in_cardholder is the card holder id
# function to make SQL sql query
# retrieve transactions for that user
# compute outliers for that customer.
# read the data from daabase.
# df.loc[(df['column_name'] >= A) & (df['column_name'] <= B)]
cust_df = get_customer_transaction (in_cust_id)
#find mean and mean and standard deve.
q5, q95 = np.percentile(cust_df['amount'], 5), np.percentile(cust_df['amount'], 95)
# mean_cust = cust_df['amount'].mean()
# std_cust = cust_df['amount'].std()
#find anamolie
outlier_df = cust_df.loc[(cust_df['amount'] > (q95)) | (cust_df['amount'] < (q5))]
return outlier_df
# -
# Write a function that locates outliers using interquartile range
# +
# Find anomalous transactions for 3 random card holders
all_card_holder_df = get_customer_transaction()
# get random sample in range of minumum cardholder id, and max cardholder id
card_holder_sample = random.sample(range(all_card_holder_df['cardholder'].min(), all_card_holder_df['cardholder'].max() +1), k=3)
#loop thrugh range and print results
#perhaps I could write a different function to work the data frame instead??
#function already exists, so goal is to finish homework :)
for car_holder in card_holder_sample:
print (f"interquartile anamolies for cardhoder {car_holder}\n")
print(find_int_quart_anamoly(car_holder))
# -
| Starter_Files/Scratch/HW7 Challenbe v 1.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variables
# (and Comments)
# ## var·i·a·ble
# /ˈverēəb(ə)l/
# <ol>
# <li> not consistent or having a fixed pattern; liable to change. <br />
# <em>"the quality of hospital food is highly variable"</em></li>
# <br />
# <li> able to be changed or adapted. <br />
# <em> "the drill has variable speed" </em></li>
# </ol>
# Variables in Computer Science are generally any letter(s) that you can assign values. Typically these must start with a letter, and it is good practice to do so.
#
# Usually the first letter is lowercase for... reasons that are not important at the moment.
#
# You are able to put numbers in a variable name as long as the first character is a letter.
# ## Comments
# We can also make *comments* when programming, so we know what the code does. These are little notes for ourselves or anyone else that may want to read our code.
#
# Comments are very important because we cannot always remember what our train of thought was while coding if we are to look back after a few months or even a few weeks after writing something.
# Most commonly in my experience, single-line comments are denoted with:
# - __#__ (a hash-tag, also-known-as a pound-sign) - Notably used in Python and Ruby
# - __//__ (two forward-slashes) - Notably used in Java, JavaScript, and many C Languages (C, C++, C#)
#
# +
# This is a comment in Python
# -
# It is worth noting that there are an abundance of Programming Languages, and many have their own format for commenting. There are also multi-line comments, but all of this is beyond the scope atthe moment. Commenting is an important part of programming, and is usually easy to research when starting with a new language.
#
# *Further Reading: [Commenting Wikipedia Article](https://en.wikipedia.org/wiki/Comment_(computer_programming))*
# In this document we will be using Python's comment character, "__#__"
# ## Assigning values to variables
# Variables are... well variable. They are place-holders that hold values that can change.
#
# We can define "x" as a variable and give it a value.
# ### Integers
# For example, we can assign a number to a variable:
# ```
# Lets assign the value of 5 to x.
# ```
# or notated like so:
# ```
# x = 5
# ```
# We can also assign a negative number (less than zero) to a variable:
# ```
# x = -5
# ```
#
# The above examples are called *integers*. If you remember from any algebra course you may or may not have taken, it is any whole (non-decimal) number.
#
# It is worth noting that the last assignment will be the value that x will have. In this case it would -5.
#
# *(More about integers can be read [here on Wikipedia](https://en.wikipedia.org/wiki/Integer))*
# ### Decimals
#
# Speaking of decimals, we can also assign numbers *with* decimals, also known as a floating point number or more simply, a "float".
#
# *(The "floating point" in the name is in reference to the actual decimal point, and its placement when using Scientific Notation. More on floats can be read [here on Wikipedia](https://en.wikipedia.org/wiki/Floating-point_arithmetic).)*
#
# ### Letters and words
# We can also assign a letter or letters and/or word(s) to a variable:
# ```
# y = 'five'
# ```
#
# These are called "strings". Strings should generally be contained within quotes so not to confuse the words
#
#
# We can also assign a number or numbers to a string, but these cannot be used for computation (more on this in the Operations section):
# ```
# y = '5'
# ```
#
# While there are some reserved words for functions, you can make just about anything else a variable name.
# ### True or False
#
# Variables can also be given a value of True or False, typically like so:
# ```
# z = True
# ```
# or
# ```
# z = False
# ```
#
# These are called Boolean values (and sometimes called Binary since they only have two possible values).
# ## Recap on variables
#
# So in the most basic view, variables can generally be one of the following:
# - Integer (int)
# - Floating Point Number (float)
# - Letters (string)
# - True/False (bool)
#
# There are also more complex variables that can store multiple values of the types listed above.
#
# It is worth noting that there are varieties of the Integers and Floating Point Numbers that can be used for really really large numbers.
| 01_variables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Updating Microstate Files
#
# This jupyter notebook incorporates corrections to enumerated microstates files. I will also add canonical SMILES column to enumerated microstates files, in addition to canonical isomeric SMILES.
#
# Updated microstate list files to be created for 24 molecules (v1_6_1):
# * `SMX_microstates.csv`
# * `SMX_microstates_deprecated.csv`
# * `SMX_microstate_IDs_with_2D_depiction.xlsx`
import pandas as pd
from openeye.oechem import *
import oenotebook as oenb
import os
import glob
# ### Creating updated microstate list files with canonical SMILES for SAMPL6 repo
# +
path_to_correction_files = "corrections_for_v1_6_1_cumulative/"
path_to_corrected_files = "updated_microstate_lists/"
# Iterate over 24 molecules
for j in range(24):
mol_name = "SM"+str(j+1).zfill(2)
print(mol_name, "...")
# Read correction file
correction_file = path_to_correction_files + mol_name + "_correction.csv"
df_microstates = pd.read_csv(correction_file)
# Convert all SMILES to canonical isomeric SMILES
for i, row in enumerate(df_microstates.iterrows()):
smiles = df_microstates.loc[i,"canonical isomeric SMILES"]
mol = OEGraphMol()
OESmilesToMol(mol, smiles)
canonical_isomeric_smiles = OEMolToSmiles(mol)
df_microstates.loc[i, "canonical isomeric SMILES"] = canonical_isomeric_smiles
# Check if there is any deprecated microstate
correction = df_microstates["correction"]
deprecated_boolean = correction.isin(["deprecated"])
deprecated_label = False
for b in deprecated_boolean:
if b == False:
continue
if b == True:
print("Deprecated microstate found.")
deprecated_label = True
# Check if there is any added microstate
correction = df_microstates["correction"]
added_boolean = correction.isin(["added"])
added_label = False
for b in added_boolean:
if b == False:
continue
if b == True:
print("Added microstate found.")
added_label = True
# Write deprecated microstates to a separate file
if(deprecated_label):
df_deprecated = df_microstates.loc[df_microstates["correction"] == "deprecated"]
print("Number of deprecated microstates of {}: {}".format(mol_name, df_deprecated.shape[0]))
df_deprecated = df_deprecated.rename(columns = {"correction":"remarks"})
deprecated_microstates_file_name = path_to_corrected_files + mol_name + "_microstates_deprecated.csv"
df_deprecated.to_csv(deprecated_microstates_file_name, index=False)
print("Created:" , deprecated_microstates_file_name)
print("\n")
# Write new microstates list with deprecated microstates removed and new microstates added.
if(deprecated_label and added_label):
df_remaining = df_microstates.loc[df_microstates["correction"] != "deprecated"]
df_remaining = df_remaining.loc[df_remaining["correction"] != "added"]
print("Number of remaining microstates of {}: {}".format(mol_name, df_remaining.shape[0]))
df_added = df_microstates.loc[df_microstates["correction"] == "added"]
print("Number of new microstates of {}: {}".format(mol_name, df_added.shape[0]))
df_updated = df_microstates.loc[df_microstates["correction"] != "deprecated"]
print("Total number of microstates in updated list of {}: {}".format(mol_name, df_updated.shape[0]))
elif(added_label): # no deprecated
df_remaining = df_microstates.loc[df_remaining["correction"] != "added"]
print("Number of remaining microstates of {}: {}".format(mol_name, df_remaining.shape[0]))
df_added = df_microstates.loc[df_microstates["correction"] == "added"]
print("Number of new microstates of {}: {}".format(mol_name, df_added.shape[0]))
df_updated = df_microstates
print("Total number of microstates in updated list of {}: {}".format(mol_name, df_updated.shape[0]))
elif(deprecated_label): # no added
df_updated = df_microstates.loc[df_microstates["correction"] != "deprecated"]
print("Total number of microstates in updated list of {}: {}".format(mol_name, df_updated.shape[0]))
else:
df_updated = df_microstates
print("No correction to microstate list.")
print("Total number of microstates in updated list of {}: {}".format(mol_name, df_updated.shape[0]))
df_updated = df_updated.loc[:,("microstate ID","canonical isomeric SMILES")]
df_updated = df_updated.reset_index(drop=True)
# Add canonical SMILES to df_updated and write SMXX_microstate.csv
df_updated["canonical SMILES"] = None
for i, row in enumerate(df_updated.iterrows()):
can_iso_smiles = df_updated.loc[i,"canonical isomeric SMILES"]
mol = OEGraphMol()
OESmilesToMol(mol, can_iso_smiles)
canonical_smiles = OECreateCanSmiString(mol)
df_updated.loc[i, "canonical SMILES"] = canonical_smiles
updated_microstates_file_name = path_to_corrected_files + mol_name + "_microstates.csv"
df_updated.to_csv(updated_microstates_file_name, index=False)
print("Created:" , updated_microstates_file_name)
print("\n")
# Create Excel file with 2D depiction for updated microstates list
# Organize colums to create csv input file for csv2xlsx.py script
df_2D_input = pd.DataFrame()
df_2D_input["Molecule"] = df_updated["canonical isomeric SMILES"]
df_2D_input["Microstate ID"] = df_updated["microstate ID"]
df_2D_input["microstate ID"] = df_updated["microstate ID"]
df_2D_input["canonical isomeric SMILES"] = df_updated["canonical isomeric SMILES"]
df_2D_input["canonical SMILES"] = df_updated["canonical SMILES"]
csv_file_name = path_to_corrected_files + "{}_microstate_IDs_with_2D_depiction.csv".format(mol_name)
xlsx_file_name = path_to_corrected_files + "{}_microstate_IDs_with_2D_depiction.xlsx".format(mol_name)
df_2D_input.to_csv(csv_file_name, index=False)
# !python csv2xlsx.py $csv_file_name $xlsx_file_name
# !trash $csv_file_name
print("Created: ",xlsx_file_name)
print(mol_name, ": Done!")
print("\n")
# -
| pKa_microstate_enumeration/corrected_microstates_v1_6_1/update_microstate_lists_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ゼロから始める線形回帰
#
# 高性能な機械学習のライブラリは単純な問題をいとも簡単に解いてしまうでしょうが、中身の見えないライブラリに頼りすぎると、ニューラルネットワークがどのように動いているのか詳細を知ることは決してできないでしょう。まず最初に、自ら手を動かして、ゼロから全てを構築してみましょう。ここで利用してよいのはautogradとNDArrayのみとします。まず、[autograd chapter](../chapter01_crashcourse/autograd.ipynb)の章にあったように、必要なライブラリをインポートします。強力な`gluon`もインポートするのですが、今回はデータの読み込みにだけ利用します。
#
# <!--
# Powerful ML libraries can eliminate repetitive work, but if you rely too much on abstractions, you might never learn how neural networks really work under the hood. So for this first example, let's get our hands dirty and build everything from scratch, relying only on autograd and NDArray. First, we'll import the same dependencies as in the [autograd chapter](../chapter01_crashcourse/autograd.ipynb). We'll also import the powerful `gluon` package but in this chapter, we'll only be using it for data loading. -->
from __future__ import print_function
import mxnet as mx
from mxnet import nd, autograd, gluon
mx.random.seed(1)
# ## contextのセット
#
# 計算が実際に行われるcontextを指定しようと思います。このチュートリアルはシンプルなので、calculator watchでも実行できると思います。しかし、より良いやり方を学ぶためにも、ここでは2つのcontextを指定します。1つはデータ用、もう1つはモデル用です。
#
# <!--
# We'll also want to specify the contexts where computation should happen. This tutorial is so simple that you could probably run it on a calculator watch. But, to develop good habits we're going to specify two contexts: one for data and one for our models. -->
data_ctx = mx.cpu()
model_ctx = mx.cpu()
# ## 線形回帰
#
# まず回帰問題を見ることからはじめます。これは、データ点$x$が与えられたときに、*実数の回帰対象*$y$を予測するタスクです。最も単純でありながら、現在も有用な方法である線形回帰では、予測値は入力されたデータの*線形*結合で表現されると仮定します(なので、*線形*回帰という名前がついています)。
#
# <!--
# To get our feet wet, we'll start off by looking at the problem of regression.
# This is the task of predicting a *real valued target* $y$ given a data point $x$.
# In linear regression, the simplest and still perhaps the most useful approach,
# we assume that prediction can be expressed as a *linear* combination of the input features
# (thus giving the name *linear* regression):
# -->
#
# $$\hat{y} = w_1 \cdot x_1 + ... + w_d \cdot x_d + b$$
#
# データ点の集合$X$と対応する回帰対象$\boldsymbol{y}$が与えられたときに、データ点$\boldsymbol{x}_i$と対応するラベル$y_i$との関係を近似的に表現する*重み*のベクトル
# $\boldsymbol{w}$とバイアス$b$(オフセットや切片とも呼ばれます)を探します。もう少し数学的な記法を使うなら、予測値$\boldsymbol{\hat{y}}$と対応するデータ点の集合$X$を行列とベクトルの乗算で以下の用に表現できます。
#
# $$\boldsymbol{\hat{y}} = X \boldsymbol{w} + b$$
#
# <!--
#
# Given a collection of data points $X$, and corresponding target values $\boldsymbol{y}$,
# we'll try to find the *weight* vector $\boldsymbol{w}$ and bias term $b$
# (also called an *offset* or *intercept*)
# that approximately associate data points $\boldsymbol{x}_i$ with their corresponding labels ``y_i``.
# Using slightly more advanced math notation, we can express the predictions $\boldsymbol{\hat{y}}$
# corresponding to a collection of datapoints $X$ via the matrix-vector product:
#
# $$\boldsymbol{\hat{y}} = X \boldsymbol{w} + b$$ -->
#
# 線形回帰へと進む前に、以下の2つが必要になります。
#
# * 現在のモデルの良さを測る方法
# * モデルがより良くなるように、モデルを操作する方法
#
# <!-- Before we can get going, we will need two more things
#
# * Some way to measure the quality of the current model
# * Some way to manipulate the model to improve its quality
# -->
#
# ### 二乗誤差
#
# モデルが良いかどうかを述べるためには、そのモデルの良さを図る方法が必要になります。一般的には、予測が正解からどれだけ離れているかを表す*ロス関数*を定義します。古くから行われている線形回帰では、通常、二乗誤差を利用します。具体的には、ロスはすべてのデータ点に対して、二乗誤差$(y_i-\hat{y})^2$の和をとったもので、以下のように示されます。
# <!--
# In order to say whether we've done a good job,
# we need some way to measure the quality of a model.
# Generally, we will define a *loss function*
# that says *how far* are our predictions from the correct answers.
# For the classical case of linear regression,
# we usually focus on the squared error.
# Specifically, our loss will be the sum, over all examples, of the squared error $(y_i-\hat{y})^2$ on each: -->
#
# $$\ell(y, \hat{y}) = \sum_{i=1}^n (\hat{y}_i-y_i)^2.$$
#
# 1次元のデータであれば、単一の特徴と回帰対象の関係を簡単に可視化できます。つまり、線形回帰式と、データ点ごとの誤差を可視化することができます。二乗誤差は*外れ値の影響を非常に受ける*ということに注意してください。以下の可視化した線形回帰式をみると、たった一つの外れ値が大きなロスにつながっています。
#
# <!--
# For one-dimensional data, we can easily visualize the relationship between our single feature and the target variable. It's also easy to visualize a linear predictor and it's error on each example.
# Note that squared loss *heavily penalizes outliers*. For the visualized predictor below, the lone outlier would contribute most of the loss.
# -->
#
# 
#
#
# ### モデルに対する操作
#
# 誤差を最小化するためには、モデルを改良していく仕組みが必要です。これは*パラメータ*の
# $\boldsymbol{w}$と$b$の値を上手く選ぶことによって実現され、学習アルゴリズムが行います。学習データを($X$, $y$)、モデルの関数を$\hat{y} = X\boldsymbol{w} + b$とおくと、学習とは利用可能なデータにもとづいて$\boldsymbol{w}$と$b$を決定することです。
#
# <!-- For us to minimize the error,
# we need some mechanism to alter the model.
# We do this by choosing values of the *parameters*
# $\boldsymbol{w}$ and $b$.
# This is the only job of the learning algorithm.
# Take training data ($X$, $y$) and the functional form of the model $\hat{y} = X\boldsymbol{w} + b$.
# Learning then consists of choosing the best possible $\boldsymbol{w}$ and $b$ based on the available evidence. -->
#
#
# ### 歴史的経緯
#
# ひょっとすると、線形回帰が古くからの統計的モデルであることをご存知のかたもいるかもしれません。[Wikipediaによれば](https://en.wikipedia.org/wiki/Regression_analysis#History)、ルジャンドルが1805年に最小二乗法を開発し、のちの1809年にガウスが再発見しました。推測が入りますが、何度か最小二乗法の論文について話していたルジャンドルは、ガウスが彼の論文を引用しないことにいらだっていたそうです。
#
# <!--
# You might reasonably point out that linear regression is a classical statistical model.
# [Wikipediaによれば](https://en.wikipedia.org/wiki/Regression_analysis#History),
# Legendre first developed the method of least squares regression in 1805,
# which was shortly thereafter rediscovered by Gauss in 1809.
# Presumably, Legendre, who had Tweeted about the paper several times,
# was peeved that Gauss failed to cite his arXiv preprint.
# -->
#
# 
#
# 起源についての話はさておいて、ルジャンドルとガウスが線形回帰についての仕事をしていたら、それが最初のDeep Learningの研究者になったかもしれない、と思うかもしれません。もし、線形回帰がDeep Learningに属さないのであれば、なぜニューラルネットワークのチュートリアルの最初に線形モデルを持ってきたのでしょうか。実は、線形回帰は、最も単純で有効なニューラルネットワークとして表現されることがわかっています。ニューラルネットワークは、向きのあるエッジでつながった、ノード(別名ニューロン)の集合です。たいていのネットワークは、ノードの集合をさらにレイヤとし、各レイヤからの出力はさらに上のレイヤへと入力されていきます。ノードの値を計算するためには、ノードへの入力の重み付け和を計算し(重みは$w$とする)、*活性化関数*を適用します。線形回帰の場合は2つのレイヤを利用します。1つは入力(オレンジで図示)と、もう1つは出力に対応する1ノードのレイヤ(緑で図示)です。出力ノードのための活性化関数は、値をそのまま返す関数を利用します。
#
# <!-- Matters of provenance aside, you might wonder - if Legendre and Gauss
# worked on linear regression, does that mean there were the original deep learning researchers?
# And if linear regression doesn't wholly belong to deep learning,
# then why are we presenting a linear model
# as the first example in a tutorial series on neural networks?
# Well it turns out that we can express linear regression
# as the simplest possible (useful) neural network.
# A neural network is just a collection of nodes (aka neurons) connected by directed edges.
# In most networks, we arrange the nodes into layers with each feeding its output into the layer above.
# To calculate the value of any node, we first perform a weighted sum of the inputs (according to weights ``w``)
# and then apply an *activation function*.
# For linear regression, we only have two layers, one corresponding to the input (depicted in orange)
# and a one-node layer (depicted in green) corresponding to the ouput.
# For the output node the activation function is just the identity function. -->
#
# 
#
# Deep Learningの観点から線形回帰を見る必要はありませんが、そういった見方をすることはできます(そういうふうに見ていきたいと思います)。議論したようなコンセプトをコードでしっかりと理解するために、実際にゼロから線形回帰のニューラルネットワークのコードを書いてみましょう。
#
# <!--
# While you certainly don't have to view linear regression through the lens of deep learning,
# you can (and we will!).
# To ground the concepts that we just discussed in code,
# let's actually code up a neural network for linear regression from scratch. -->
#
# 進めるにあたって単純な合成データセットをつくります。データセットは、データ点``X[i]``と対応するラベル``y[i]``について、次のようなランダムサンプリングで作ります。入力は、平均が$0$で分散が$1$の正規分布からサンプリングします。特徴量は互いに独立で、別の言い方をすればdiagonal covariance (対角成分のみ共分散が存在する)となります。ラベルは`y[i] = 2 * X[i][0]- 3.4 * X[i][1] + 4.2 + noise` の計算式で決定します。`noise`は平均が$0$、分散が$.01$であるような正規分布からサンプリングします。ラベルを決定する式について、もう少し数学的に書くと以下のようになります。
#
# <!--
# To get going, we will generate a simple synthetic dataset by sampling random data points ``X[i]`` and corresponding labels ``y[i]`` in the following manner. Our inputs will each be sampled from a random normal distribution with mean $0$ and variance $1$. Our features will be independent. Another way of saying this is that they will have diagonal covariance. The labels will be generated accoding to the *true* labeling function `y[i] = 2 * X[i][0]- 3.4 * X[i][1] + 4.2 + noise` where the noise is drawn from a random gaussian with mean ``0`` and variance ``.01``. We could express the labeling function in mathematical notation as: -->
#
# $$y = X \cdot w + b + \eta, \quad \text{for } \eta \sim \mathcal{N}(0,\sigma^2)$$
#
# +
num_inputs = 2
num_outputs = 1
num_examples = 10000
def real_fn(X):
return 2 * X[:, 0] - 3.4 * X[:, 1] + 4.2
X = nd.random_normal(shape=(num_examples, num_inputs), ctx=data_ctx)
noise = .1 * nd.random_normal(shape=(num_examples,), ctx=data_ctx)
y = real_fn(X) + noise
# -
# ``X``の各行は2次元のデータ点で、``Y``の各行は1次元の回帰対象の値になります。
#
# <!-- Notice that each row in ``X`` consists of a 2-dimensional data point and that each row in ``Y`` consists of a 1-dimensional target value. -->
print(X[0])
print(y[0])
# 合成データ`X`は`data_ctx`の上で動いていること、noiseもまた`data_ctx`の上で動いていることから、`X`と`noise`の組み合わせとして`real_fn`で計算されるラベル`y`もまた`data_ctx`上で動きます。ランダムに選択した点に対して、既知の最適なパラメータ(=ラベルを決定する式の係数)であれば、回帰対象のラベルに非常に近い予測をすることができます。
#
# <!-- Note that because our synthetic features `X` live on `data_ctx` and because our noise also lives on `data_ctx`, the labels `y`, produced by combining `X` and `noise` in `real_fn` also live on `data_ctx`.
# We can confirm that for any randomly chosen point,
# a linear combination with the (known) optimal parameters
# produces a prediction that is indeed close to the target value -->
print(2 * X[0, 0] - 3.4 * X[0, 1] + 4.2)
# ``matplotlib``というPythonのグラフ作成ライブラリを利用して散布図を作ることで、2番目の特徴(``X[:, 1]``)と回帰対象``Y``の対応を可視化することができます。``matplotlib``がインストールされていることを確認してください。もしインストールされていなければ、コマンドラインで``pip2 install matplotlib`` (Python 2の場合)や``pip3 install matplotlib`` (Python 3の場合)とするとインストールできると思います。``matplotlib``で散布図を作成するには、``.asnumpy()``とという関数を利用して、``X``と``y``をNumpy arrayの形式に変換する必要があります。
#
# <!-- We can visualize the correspondence between our second feature (``X[:, 1]``) and the target values ``Y`` by generating a scatter plot with the Python plotting package ``matplotlib``. Make sure that ``matplotlib`` is installed. Otherwise, you may install it by running ``pip2 install matplotlib`` (for Python 2) or ``pip3 install matplotlib`` (for Python 3) on your command line.
#
# In order to plot with ``matplotlib`` we'll just need to convert ``X`` and ``y`` into NumPy arrays by using the `.asnumpy()` function. -->
import matplotlib.pyplot as plt
plt.scatter(X[:, 1].asnumpy(),y.asnumpy())
plt.show()
# ## データイテレータ
#
# ニューラルネットワークを利用し始めると、データポイント全体を速く回して処理したくなると思います。例えば、ある時点において``k``個のデータのかたまりを取り出したり、データをシャッフルしたりする場合などがあります。MXNetのデータイテレータは、データの取り出しと加工について素晴らしい機能をいくつも提供しています。特に、シンプルな``DataLoader``クラスを使うことがあると思います。これは、学習するときに直感的に利用できる``ArrayDataset``を提供しています。
#
# <!-- Once we start working with neural networks, we're going to need to iterate through our data points quickly. We'll also want to be able to grab batches of ``k`` data points at a time, to shuffle our data. In MXNet, data iterators give us a nice set of utilities for fetching and manipulating data. In particular, we'll work with the simple ``DataLoader`` class, that provides an intuitive way to use an ``ArrayDataset`` for training models.
# -->
#
# `gluon.data.ArrayDataset(X, y)`を呼び出せば、`X`と`y`を`ArrayDataset`に読み出すことができます。`X`が例えば画像のような多次元で、`y`がラベルの1次元配列であっても問題ありません。一つ守るべきこととしては、それらは第1の軸に沿って同じ長さである必要があります。つまり`len(X) == len(y)`です。
# <!--
#
# We can load `X` and `y` into an ArrayDataset, by calling `gluon.data.ArrayDataset(X, y)`. It's ok for `X` to be a multi-dimensional input array (say, of images) and `y` to be just a one-dimensional array of labels. The one requirement is that they have equal lengths along the first axis, i.e., `len(X) == len(y)`. -->
#
# `ArrayDataset`が与えられれば、`ArrayDataset`からランダムにバッチ(データの集まり)をとってくる`DataLoader`を作ることができます。その際、2つの引数が必要になります。まず1つ目は`batch_size`で一度にいくつのデータ点をとってくるかです。2つ目は、データセットからデータを反復的に取るなかで、そのデータをシャッフルするかどうかです。
#
# <!--
# Given an `ArrayDataset`, we can create a DataLoader which will grab random batches of data from an `ArrayDataset`. We'll want to specify two arguments. First, we'll need to say the `batch_size`, i.e., how many examples we want to grab at a time. Second, we'll want to specify whether or not to shuffle the data between iterations through the dataset. -->
batch_size = 4
train_data = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y),
batch_size=batch_size, shuffle=True)
# DataLoader (``train_data``)をいったん初期化してしまえば、Pythonのリストであるかのように、`train_data`から繰り返しバッチをとることが簡単に行なえます。好きな反復方法を利用することもできて、foreachであれば`for data, label in train_data`、enumerationsであれば`for i, (data, label) in enumerate(train_data)`となります。以下では1つのバッチを取り出して、すぐにループから抜けてみます。
#
# <!-- Once we've initialized our DataLoader (``train_data``), we can easily fetch batches by iterating over `train_data` just as if it were a Python list. You can use your favorite iterating techniques like foreach loops: `for data, label in train_data` or enumerations: `for i, (data, label) in enumerate(train_data)`.
# First, let's just grab one batch and break out of the loop. -->
for i, (data, label) in enumerate(train_data):
print(data, label)
break
# 同じコードを再び動かしたとしても、異なるバッチが得られることに気づくと思います。これは`DataLoader`において、`shuffle=True`としているためです。
#
# <!--
# If we run that same code again you'll notice that we get a different batch. That's because we instructed the `DataLoader` that `shuffle=True`. -->
for i, (data, label) in enumerate(train_data):
print(data, label)
break
# 最後に、もしデータセット全体を渡していたとしたら、バッチの数を数えてみてください。2,500バッチあることに気づくと思います。10,000のデータに対して、`DataLoader`のバッチサイズを4にしているので、このバッチの数を計算することができます。
#
# <!-- Finally, if we actually pass over the entire dataset, and count the number of batches, we'll find that there are 2500 batches. We expect this because our dataset has 10,000 examples and we configured the `DataLoader` with a batch size of 4. -->
for i, (data, label) in enumerate(train_data):
pass
print(i+1)
# ## モデルのパラメータ
#
# パラメータのためのメモリを確保して初期値をセットしましょう。パラメータの初期化も`model_ctx`上で行います。
#
# <!-- Now let's allocate some memory for our parameters and set their initial values. We'll want to initialize these parameters on the `model_ctx`. -->
w = nd.random_normal(shape=(num_inputs, num_outputs), ctx=model_ctx)
b = nd.random_normal(shape=num_outputs, ctx=model_ctx)
params = [w, b]
# 続くセルでは、データにより適合するように、これらのパラメータを更新していきます。これには、パラメータに関して*ロス関数*の勾配を計算する(つまり多次元の微分)が必要になります。ロスを減少させる方向へとパラメータを更新します。しかし、まずは勾配のためのメモリを確保します。
#
# <!-- In the succeeding cells, we're going to update these parameters to better fit our data. This will involve taking the gradient (a multi-dimensional derivative) of some *loss function* with respect to the parameters. We'll update each parameter in the direction that reduces the loss. But first, let's just allocate some memory for each gradient. -->
for param in params:
param.attach_grad()
# ## ニューラルネットワーク
#
# 次にモデルを定義します。今回の場合、最も単純で*有用な*線形回帰のモデルを対象としています。線形モデルの出力を計算するために、与えられた入力とモデルの重み(``w``)を掛け算して、オフセット``b``を加えます。
#
# <!-- Next we'll want to define our model. In this case, we'll be working with linear models, the simplest possible *useful* neural network. To calculate the output of the linear model, we simply multiply a given input with the model's weights (``w``), and add the offset ``b``. -->
def net(X):
return mx.nd.dot(X, w) + b
# これは簡単ですね。
# ## ロス関数
#
# モデルの学習では、学習の期間が過ぎていくとともに、モデルを良くしていくことを目的としています。この目的をもっと具体化するために、まずは*良い*ということを定義する必要があります。この場合、予測値と実際の値との二乗誤差を利用します。
#
# <!-- Train a model means making it better and better over the course of a period of training. But in order for this goal to make any sense at all, we first need to define what *better* means in the first place. In this case, we'll use the squared distance between our prediction and the true value. -->
def square_loss(yhat, y):
return nd.mean((yhat - y) ** 2)
# ## 最適化手法
#
# 線形回帰は実はclosed-formな解をもっており、行列の演算によって解析的に求めることが可能です。しかし、多くの人が興味をもつようなモデルというのは解析的に解くことはできません。そこで、Stochastic Gradient Descent (確率的勾配降下法)を利用します。各ステップにおいて、データセットからランダムサンプリングしたバッチを利用して、重みに対するロスの勾配を計算します。そして、ロスを減少させる方向へと少しパラメータを変化させます。その変化の大きさは学習率``lr``によって決定します。
#
# <!-- It turns out that linear regression actually has a closed-form solution. However, most interesting models that we'll care about cannot be solved analytically. So we'll solve this problem by stochastic gradient descent. At each step, we'll estimate the gradient of the loss with respect to our weights, using one batch randomly drawn from our dataset. Then, we'll update our parameters a small amount in the direction that reduces the loss. The size of the step is determined by the *learning rate* ``lr``. -->
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
# ## 学習ループの実行
#
# さて全ての必要なピースがそろったところで、学習ループを書いて、それらをつなげる必要があります。まずは、データセットをモデルに入力する回数であるエポック``epoch``を決めます。データセットを入力するたびに、``train_data``からデータと対応するラベルのバッチを反復的に取り出します。各バッチでは、次のような処理を行います。
#
# * データをネットワークに入力して前向きに実行し、予測値``yhat``とロス``loss``を計算
# * ネットワークの最後から勾配を順番に逆方向に計算 (``loss.backward()``)
# * SGDの最適化手法を利用してモデルのパラメータを更新
#
# <!-- Now that we have all the pieces, we just need to wire them together by writing a training loop.
# First we'll define ``epochs``, the number of passes to make over the dataset. Then for each pass, we'll iterate through ``train_data``, grabbing batches of examples and their corresponding labels.
#
# For each batch, we'll go through the following ritual:
#
# * Generate predictions (``yhat``) and the loss (``loss``) by executing a forward pass through the network.
# * Calculate gradients by making a backwards pass through the network (``loss.backward()``).
# * Update the model parameters by invoking our SGD optimizer. -->
#
# +
epochs = 10
learning_rate = .0001
num_batches = num_examples/batch_size
for e in range(epochs):
cumulative_loss = 0
# inner loop
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(model_ctx)
label = label.as_in_context(model_ctx).reshape((-1, 1))
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
SGD(params, learning_rate)
cumulative_loss += loss.asscalar()
print(cumulative_loss / num_batches)
# -
# ## 学習プロセスの可視化
#
# 以降の章では、もっと現実的なデータ、高等なモデル、複雑なロス関数、などを扱います。しかし、根本的な考え方は同じで、学習ループはどこでもみかけます。これらのチュートリアルは単体で完結するので、ここで述べた処理について理解が深まっていくと思います。モデルを更新するだけではなく、その内容を記録しておきたいと考えるようになると思います。また、学習の進捗をトラッキングしたり、可視化したいと思うかもしれません。そこで、ちょっと洗練された学習ループについて以下で紹介したいと思います。
# <!--
# In the succeeding chapters, we'll introduce more realistic data, fancier models, more complicated loss functions, and more. But the core ideas are the same and the training loop will look remarkably familiar. Because these tutorials are self-contained, you'll get to know this ritual quite well. In addition to updating our model, we'll often want to do some bookkeeping. Among other things, we might want to keep track of training progress and visualize it graphically. We demonstrate one slighly more sophisticated training loop below. -->
# +
############################################
# Re-initialize parameters because they
# were already trained in the first loop
############################################
w[:] = nd.random_normal(shape=(num_inputs, num_outputs), ctx=model_ctx)
b[:] = nd.random_normal(shape=num_outputs, ctx=model_ctx)
############################################
# Script to plot the losses over time
############################################
def plot(losses, X, sample_size=100):
xs = list(range(len(losses)))
f, (fg1, fg2) = plt.subplots(1, 2)
fg1.set_title('Loss during training')
fg1.plot(xs, losses, '-r')
fg2.set_title('Estimated vs real function')
fg2.plot(X[:sample_size, 1].asnumpy(),
net(X[:sample_size, :]).asnumpy(), 'or', label='Estimated')
fg2.plot(X[:sample_size, 1].asnumpy(),
real_fn(X[:sample_size, :]).asnumpy(), '*g', label='Real')
fg2.legend()
plt.show()
learning_rate = .0001
losses = []
plot(losses, X)
for e in range(epochs):
cumulative_loss = 0
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(model_ctx)
label = label.as_in_context(model_ctx).reshape((-1, 1))
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
SGD(params, learning_rate)
cumulative_loss += loss.asscalar()
print("Epoch %s, batch %s. Mean loss: %s" % (e, i, cumulative_loss/num_batches))
losses.append(cumulative_loss/num_batches)
plot(losses, X)
# -
# ## まとめ
#
# mxnet.ndarrayやmxnet.autogradを利用して、ゼロから統計モデルを構築する方法を紹介しました。次のチュートリアルでは、この基礎にもとづいて、最新のニューラルネットワークの基本的なアイデアや、そのようなモデルをMXNetの`gluon`を用いて、わずかなコードで非常に抽象的な記述ができることぉ紹介します。
#
# <!-- You've seen that using just mxnet.ndarray and mxnet.autograd, we can build statistical models from scratch. In the following tutorials, we'll build on this foundation, introducing the basic ideas behind modern neural networks and demonstrating the powerful abstractions in MXNet's `gluon` package for building complex models with little code. -->
# ## 次は
# [gluonを利用した線形回帰](../chapter02_supervised-learning/linear-regression-gluon.ipynb)
# For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
| chapter02_supervised-learning/linear-regression-scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchsummary import summary
import numpy as np
import pickle
import tqdm
import itertools
from itertools import islice
# -
import urllib.request
import shutil
import zipfile
import os
import pickle
from collections import Counter
import numpy as np
# # %load_ext autoreload
# # %autoreload 2
from column_models import *
# +
url = 'http://mattmahoney.net/dc/text8.zip'
filename = 'text8.zip'
train_size = 99000000
if not os.path.isfile(filename):
print('Downloading text8 dataset...')
with urllib.request.urlopen(url) as response, \
open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
rawdata = zipfile.ZipFile(filename).read('text8').decode('utf-8')
# -
len(rawdata)
rawdata[:200]
# +
# train_split = rawdata[:train_size]
# valid_split = rawdata[train_size:]
# vocab = Counter()
# +
# for c in train_split:
# vocab[c] += 1
# vocab_cut = {k: v for k, v in vocab.items() if v > 10}
# vocab_sorted = sorted(vocab_cut.items(), key=lambda x: x[1], reverse=True)
# wordmap = {k: id + 1 for id, (k, _) in enumerate(vocab_sorted)}
# +
# len(wordmap)
# +
# wordmap
# +
# word_to_ix = {"hello": 0, "world": 1}
# embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
# lookup_tensor = torch.tensor([word_to_ix["hello"]], dtype=torch.long)
# hello_embed = embeds(lookup_tensor)
# print(hello_embed)
# +
# print(embeds(torch.tensor([0,0,0,1], dtype=torch.long)))
# print(embeds(torch.tensor(1, dtype=torch.long)))
# +
## This is not THE most efficient thing, is a step to learn ... alter will deal with memory issues
# to change things for the better the input text should be read by chunks
# splicing through the text such as to create the input vectors
def get_next_batch_txt(in_txt, index=0, char_len=1024, step=128, batch_size=128):
"""
Returns a generator of batches for the given input.
:param in_txt: text used to generate the tensor data
:param index: index where to begin the next batch
:param char_len: number of characters lenght of the tensor
:param step: number of characters to jump for the next step (we don't want to repeat too many things ... )
:param batch_size: number of samples in each batch
:returns A generator of batches for the given input with dimension [seq_len,batch_size,1]
"""
ret = []
for b in range(batch_size):
# get the new part
# print("get next ", index)
ret.append(in_txt[index:min(index+char_len, len(in_txt)-1)])
# go to next
index += step
if index >= len(in_txt) - step:
index = -1
break
# tensorize
# TODO now make sure all the parts are equal, else complete with
return ret, index
# simple tokenizer, should do something to make it fast instead of char by char ... ?
def tokenize_txt(txt, tokendict, dim):
"""
"""
ret = np.empty(dim)
for i in range(len(txt)):
c = txt[i]
if c in tokendict:
ret[i] = tokendict[c]
else:
ret[i] = 0
return ret
def tokenize_batch(batch, tokendict, dim):
"""
"""
ret = []
for b in batch:
ret.append(tokenize_txt(b, tokendict,dim))
return np.stack(ret)
def tensorize_batch(batch):
"""
"""
# shape should be ?? (batch_size, one_hot, sequence_width) <<- the first one and NOT: (seq_len, batch_size, input_size)
# so as an example could be: (1024, 128, 1) where there are 1024 for each of the 128 batches and each char is dim 1
# this is BEFORE the embedding layer
#TODO
# ret = torch.tensor(batch.transpose(0,1)).unsqueeze(1).long()
# ret = torch.tensor(batch.transpose(0,1)).unsqueeze(0).long()
# ret = torch.tensor(batch.transpose(0,1)).long()
ret = torch.tensor(batch).long() # .transpose(1,2)
return ret
# -
def get_all_batches(txt, token_dict, char_len=1024, step=128, batch_size=128):
index = 0
# batches = []
while index >= 0:
# print(index)
batch, index = get_next_batch_txt(txt, index, char_len, step, batch_size)
tok = tokenize_batch(batch, token_dict, char_len)
tens = tensorize_batch(tok)
# batches.append(batch) # yield batch
yield batch, tok, tens
# return batches
# +
# list(get_all_batches(rawdata[:100], txt2num_2seg, 20, 5, 12))
# +
# batch_gen = get_all_batches(rawdata[:100], txt2num_2seg, 1024, 128, 128)
# -
in_txt = rawdata
index=0
char_len=1024
step=128
batch_size=128
# +
# txtbatch = get_next_batch_txt(in_txt, index=0, char_len=1024, step=128, batch_size=128)
# tok_batch = tokenize_batch(txtbatch, txt2num_2seg, char_len)
# ten_batch = tensorize_batch(tok_batch)
# +
# utf8codes.shape
# +
# embeds = nn.Embedding(*(utf8codes.shape))
# +
# embeds.weight.data.copy_(torch.from_numpy(utf8codes))
# -
# print(embeds(torch.tensor([0,0,0,1], dtype=torch.long)))
# +
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
# -
# ls ../utf8-codes/
# load the UTF-8 codes
utf8codes = np.load("../utf8-codes/utf8_code_matrix_2seg.pkl.npy")
txt2code_2seg = load_obj("../utf8-codes/txt2code_2seg.pkl")
code2txt_2seg = load_obj("../utf8-codes/code2txt_2seg.pkl")
txt2num_2seg = load_obj("../utf8-codes/txt2num_2seg.pkl")
num2txt_2seg = load_obj("../utf8-codes/num2txt_2seg.pkl")
txt2num_2seg['a']
utf8codes.shape
# +
# model = FFColNet(utf8codes)
# -
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
def train(model, criterion, optimizer, batch_gen):
for b,tok,x in batch_gen:
# x = x.unsqueeze(2).to(device)
x = x.long().to(device)
y = x
# print(x.shape, y.shape)
model.zero_grad()
output = model(x)
ycode = model.embeds(y.long()[:,-128:])
# print(output.shape, ycode.shape, y.shape)
loss = criterion(output, ycode)
loss.backward()
# clip_global_norm(model, 0.25)
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optimizer.step()
print('Loss: {:.5f}'.format(loss.item()))
break
# def prime_factors(n):
# i = 2
# factors = []
# while i * i <= n:
# if n % i:
# i += 1
# else:
# n //= i
# factors.append(i)
# if n > 1:
# factors.append(n)
# return factors
# +
# prime_factors(324)
# -
len(rawdata)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# import importlib
# importlib.reload('column_models.MixedConvAttentionColumn')
# model = FFColNet(utf8codes,1024).to(device)
# model = NLPGatedConv1DColumnV1(utf8codes, 324).to(device)
# model = MixedConvAttentionColumn(utf8codes, c_in=[324, 64, 128, 128]) # .to(device)
# model = MixedConvLinearColumns(utf8codes)
# model = MixedConvAttentiveColumns(utf8codes,
# channels=[64, 64, 128, 128, 64], # channels for blocks
# b_layers=[5, 5, 5], # number of layers for each bloc
# c_attentive=[64, 64, 64, 64, 64], # in and out channels of the linear layers inputs
# )
model = MixedColsTest()
# model = MixedColsTest(utf8codes)
# model = ConvColumn(utf8codes, c_in=[324, 64, 128, 128]).to(device)
model = model.to(device)
count_parameters(model)
criterion = nn.BCEWithLogitsLoss().to(device) #The problem was the loss function... CrossEntropyLoss()
# criterion = nn.NLLLoss().to(device)
# optimizer = optim.Adagrad(model.parameters(), lr=0.001, lr_decay=1e-5, weight_decay=1e-5)
optimizer = optim.Adam(model.parameters(),lr=1e-3)
batch_gen = get_all_batches(rawdata, txt2num_2seg, 1024, 128, 40)
# %%time
# %time train(model, criterion, optimizer, batch_gen)
# +
# b0 = list(batch_gen)[0]
# +
# summary(model, (1, 256,1024))
# -
131072/1024.
| predictors/sequence/text/langmodels/test-setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import numpy.polynomial as P
import scipy as sp
from matplotlib import pyplot as plt
from tqdm import tqdm
#from sklearn.preprocessing import PolynomialFeatures
from multiprocessing import Pool
import multiprocessing
import ZVnbrosse
from potentials import PotentialOne, PotentialTwo
from zv_cv import Eval_ZVCV
from samplers import MCMC_sampler,Generate_train,ULA_light
from baselines import set_function,construct_ESVM_kernel,GenerateSigma
from martingale import approx_q,test_traj
from optimize import Run_eval_test,optimize_parallel_new
from utils import *
import copy
N_burn = 1*10**4 # Burn in period
N_train = 1*10**5 # Number of samples on which we optimize
N_test = 2*10**3 # Number of samples
step = 0.02 # Step size
#put 0.5 for MALA
#step = 0.2
n_traj = 24 # Number of independent MCMC trajectories for test
f_type = "sum"
# Choose density parameters
d = 2
#B = np.pi/2
mu = 0.5
sigma = 0.5
M = 1.0
Cur_pot = PotentialTwo(M,mu,sigma,d)
# ### Generate data
# +
sampler = {"sampler":"ULA","burn_type":"full","main_type":"full"} # Sampling method
if sampler["sampler"] == "ULA":
res = Generate_train(n_traj, sampler, Cur_pot, step, N_burn, N_train, d)
res = np.asarray(res)
traj,traj_grad = res[:,0,:,:],res[:,1,:,:]
else:
res = Generate_train(n_traj, sampler, Cur_pot, step, N_burn, N_train, d)
traj = []
traj_grad = []
for i in range(len(res)):
traj.append(res[i][0])
traj_grad.append(res[i][1])
print("accepted = ",res[i][2])
traj = np.asarray(traj)
traj_grad = np.asarray(traj_grad)
# -
print(traj.shape)
print(traj_grad.shape)
traj_grad = (-1)*traj_grad
test_seed = 1453
nbcores = multiprocessing.cpu_count()
trav = Pool(nbcores)
res = trav.starmap(Eval_ZVCV, [(traj[i,:,:],traj_grad[i,:,:],f_type) for i in range (n_traj)])
trav.close()
res_arr = np.asarray(res)
print(res_arr.shape)
# ### Comparison plots
title = ""
labels = ['Vanilla\n ULA', 'ULA \nwith ZV-1', 'ULA \nwith CV-1']
data = [res_arr[:,0],res_arr[:,1],res_arr[:,3]]
boxplot_ind(data, title, labels)
title = ""
labels = ['ULA \nwith ZV-1', 'ULA \nwith CV-1']
data = [res_arr[:,1],res_arr[:,3]]
boxplot_ind(data, title, labels)
title = ""
labels = ['Vanilla\n ULA', 'ULA \nwith ZV-2', 'ULA \nwith CV-2']
data = [res_arr[:,0],res_arr[:,2],res_arr[:,4]]
boxplot_ind(data, title, labels)
title = ""
labels = ['ULA \nwith ZV-2', 'ULA \nwith CV-2']
data = [res_arr[:,2],res_arr[:,4]]
boxplot_ind(data, title, labels)
| Code/Potential_three.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %load_ext nb_black
import json
from itertools import combinations
import pandas as pd
from food_ke.stubs import (
EVALUATION_FILTER_ATTRIBUTES,
PERFORMANCE_METRICS_MACRO_PATH,
PERFORMANCE_METRICS_MICRO_PATH,
)
from food_ke.composition_measurement import (
CompositionMeasurementSet,
CompositionMeasurement,
)
from food_ke.scripts.results_comparison import calc_pr
# + tags=["parameters"]
path = "../data/results_comparison.json"
# -
results = json.load(open(path))
# +
def filter_keys_single(d: dict, keys: list) -> dict:
new = {}
for k in keys:
new[k] = d[k]
return new
def filter_keys(l, k):
return [filter_keys_single(d, k) for d in l]
# +
eval_not_doi = [x for x in EVALUATION_FILTER_ATTRIBUTES if x != "doi"]
key_combinations = []
for n in range(1, len(eval_not_doi) + 1):
key_combinations += [["doi"] + list(x) for x in combinations(eval_not_doi, n)]
def create_pr_df(es_set, gs_set) -> pd.DataFrame:
results = pd.DataFrame(
index=[", ".join(x) for x in key_combinations],
columns=["precision", "recall"], # eval_not_doi +
)
for key in key_combinations:
precision, recall, matches = calc_pr(
set(es_set.filter_fields(key).measurements),
set(gs_set.filter_fields(key).measurements),
)
results.loc[", ".join(key), :] = [precision, recall]
results["precision"] = results["precision"].astype("float")
results["recall"] = results["recall"].astype("float")
return results
# +
import pandas as pd
es = []
gs = []
results_dfs = []
extracted_macro = []
goldstandard_macro = []
for doi, values in results.items():
# if doi == "https://doi.org/10.1021/jf000892m":
if "extracted" in values.keys():
extracted = filter_keys(values["extracted"], EVALUATION_FILTER_ATTRIBUTES)
else:
extracted = []
if "goldstandard" in values.keys():
goldstandard = filter_keys(values["goldstandard"], EVALUATION_FILTER_ATTRIBUTES)
else:
goldstandard = []
extracted = pd.DataFrame(extracted)
es.append(extracted)
goldstandard = pd.DataFrame(goldstandard)
gs.append(goldstandard)
es_set = CompositionMeasurementSet.from_df(extracted)
gs_set = CompositionMeasurementSet.from_df(goldstandard)
extracted_macro += es_set.measurements
goldstandard_macro += gs_set.measurements
results_dfs.append(create_pr_df(es_set, gs_set))
extracted_macro = CompositionMeasurementSet(extracted_macro)
goldstandard_macro = CompositionMeasurementSet(goldstandard_macro)
micro_avg_results = (
pd.concat(results_dfs).groupby(level=0).mean().loc[results_dfs[0].index, :]
)
macro_avg_results = create_pr_df(extracted_macro, goldstandard_macro)
# -
es[2]
gs[2]
# - micro-average would take all results dfs and average them
# - macro-average would concatenate all the individual extracted and goldstandard dfs and then compute one table on the combined results
macro_avg_results
micro_avg_results
micro_avg_results.to_csv(PERFORMANCE_METRICS_MICRO_PATH)
macro_avg_results.to_csv(PERFORMANCE_METRICS_MACRO_PATH)
| ipynb/auto_results_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Law School Admissions Data - Cleaning
# Code to clean self-reported law school admissions data from Law School Numbers.
# +
import pandas as pd
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import re
import numpy as np
from collections import ChainMap
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# -
# ### Import raw data
df_law = pd.read_csv(r'C:\Users\Jenny\lawschool\mylsn_raw.csv')
df_law.head()
len(set(df_law['app_id']))
# ### Clean categorical data
# What races are listed?
df_law.groupby('race')['race'].count().sort_values(ascending=False)
# +
# Create a list with all the race options
list_race = list(df_law['race'].unique())
# Create a list for each race. Note that each list is mutually exclusive to prevent overlap between lists.
tag_black = list(set([m.group() for l in list_race for m in [re.search(r'.*lack.*|.frican(-)?(\s)?.merican|AA|aa|.*fro.*|BLK|BLACK|.*frican.*|.*FRICA.*', str(l))] if m]))
list_black = list(set([x for x in list_race if x in tag_black]))
list_not_black = [x for x in list_race if x not in list_black]
tag_hispanic = list(set([m.group() for l in list_not_black for m in [re.search(r'.*ispanic.*|.*atin.*|mex.*|Mex.*|.*uerto.*|.*hican.*', str(l))] if m]))
list_hispanic = list(set([x for x in list_not_black if x in tag_hispanic]))
list_not_black_hispanic = [x for x in list_race if x not in list_black + list_hispanic]
tag_asian = list(set([m.group() for l in list_not_black_hispanic for m in [re.search(r'Asian|asian|asian.*|.*outh .*sia.*|india.*|India.*|East|east|.*ilipin.*|.*hinese|.*orea.*', str(l))] if m]))
list_asian = list(set([x for x in list_not_black_hispanic if x in tag_asian]))
list_not_black_hispanic_asian = [x for x in list_race if x not in list_black + list_hispanic + list_asian]
tag_white = list(set([m.group() for l in list_not_black_hispanic_asian for m in [re.search(r'\Dhite\s?.*|w|W|.auc(\D)*', str(l))] if m]))
list_white = list(set([x for x in list_not_black_hispanic_asian if x in tag_white]))
list_other = [x for x in list_race if x not in list_black + list_hispanic + list_asian + list_white]
# +
# Create dict_race and map it over df_law['race']. This will consolidate our race data into 5 categories.
dict_black = {x:'black' for x in list_black}
dict_hispanic = {x:'hispanic' for x in list_hispanic}
dict_asian = {x:'asian' for x in list_asian}
dict_white = {x:'white' for x in list_white}
dict_other = {x: 'other' for x in list_other}
dict_race = {**dict_black, **dict_hispanic, **dict_asian, **dict_white, **dict_other}
df_law['race'] = df_law['race'].map(dict_race)
# +
# There's a separate 'urm' (underrepresented minority) column.
# For any applicant where 'urm' is positive and 'race' is listed as 'white' or 'other', adjust 'race' to 'other_urm'.
df_law['race'] = df_law.apply(lambda x: x['race'].replace('other', 'other_urm') if x['urm'] == 1 else x['race'], axis=1)
df_law['race'] = df_law.apply(lambda x: x['race'].replace('white', 'other_urm') if x['urm'] == 1 else x['race'], axis=1)
# -
# Which races are listed now?
df_law.groupby('race')['race'].count().sort_values(ascending=False)
# +
# Consolidate 'yearsout'
dict_yearsout = {
'1-2 Years': '1-2 years',
'In Undergrad': 'undergrad',
'in undergrad': 'undergrad',
'3-4 Years': '3-4 years',
'5-9 Years': '5-9 years',
'10+ Years': '10+ years',
'nan': 'unspecified'
}
df_law['yearsout'] = df_law['yearsout'].map(dict_yearsout)
df_law.groupby('yearsout')['yearsout'].count()
# -
# Most schools look ok, but these should be consolidated
df_law['school'] = df_law['school'].apply(lambda x: x.replace('arkansas-fayetteville', 'arkansas'))
df_law['school'] = df_law['school'].apply(lambda x: x.replace('rutgers-camden', 'rutgers'))
df_law['school'] = df_law['school'].apply(lambda x: x.replace('rutgers-newark', 'rutgers'))
# Consolidate sexes
df_law['sex'] = df_law['sex'].apply(lambda x: str(x).replace('Female', 'female'))
df_law['sex'] = df_law['sex'].apply(lambda x: str(x).replace('Male', 'male'))
df_law['sex'] = df_law['sex'].apply(lambda x: str(x).replace('nan', 'unspecified'))
# +
# Create a list with all the school type options
list_schooltype = list(df_law['schooltype'].unique())
# Create a list for each schooltype
tag_ivy = list(set([m.group() for l in list_schooltype for m in [re.search(r'.vy.*|.VY|HYP.*|hyp.*|Columbia.*|Yale.*|Harvard.*|Penn|Cornell.*|Brown.*|Dartmouth.*|Princeton.*', str(l))] if m]))
list_ivy = list(set([x for x in list_schooltype if x in tag_ivy]))
list_not_ivy = [x for x in list_schooltype if x not in list_ivy]
tag_liberal = list(set([m.group() for l in list_not_ivy for m in [re.search(r'.*iberal.*', str(l))] if m]))
list_liberal = list(set([x for x in list_not_ivy if x in tag_liberal]))
list_not_ivy_liberal = [x for x in list_schooltype if x not in list_ivy + list_liberal]
tag_public = list(set([m.group() for l in list_not_ivy_liberal for m in [re.search(r'.*ublic.*|.*tate.*|UC.*|uc.*|University of Florida|University of.*|SUNY|CUNY|UVA|UGA|UNC|UT.*|UCF|.*erkeley.*|.utgers.*|.exas.*|.emple.*|.*SU|.*ichigan.*|UW.*|.urdue.*|Indiana.*|Georgia.*|.ervice.*|.lemson.*', str(l))] if m]))
list_public = list(set([x for x in list_not_ivy_liberal if x in tag_public]))
list_not_ivy_liberal_public = [x for x in list_schooltype if x not in list_ivy + list_liberal + list_public]
tag_private = list(set([m.group() for l in list_not_ivy_liberal_public for m in [re.search(r'.*rivate.*|NYU|BYU|Boston.*|GWU|New York University|.yracuse.*|.orthwestern.*|.uke.*|Stanford.*|Vanderbilt.*|Georgetown.*|Emory.*|.op .en|.op 10|.op .ive|.op 5|.op 20|.*otre.*|T20|American.*|.ordham.*|.ulane.*|.righam.*|NESCAC|Jesuit.*|Baylor.*|.*hoenix.*|Washington University.*', str(l))] if m]))
list_private = list(set([x for x in list_not_ivy_liberal_public if x in tag_private]))
list_other_schooltype = [x for x in list_schooltype if x not in list_ivy + list_liberal + list_public + list_private]
# +
# Create dict_schooltype and map it over df_law['schooltype']. This will consolidate our schooltype data into 4 categories.
dict_ivy = {x:'ivy' for x in list_ivy}
dict_public = {x:'public' for x in list_public}
dict_private = {x:'private' for x in list_private}
dict_other_schooltype = {x: 'other' for x in list_other_schooltype}
dict_schooltype = {**dict_ivy, **dict_public, **dict_private, **dict_other_schooltype}
df_law['schooltype'] = df_law['schooltype'].map(dict_schooltype)
# -
# Rename 'state' to 'applicant_state' to specify it's where applicants are from.
# Which states are applicants from?
df_law = df_law.rename(columns={'state': 'applicant_state'})
df_law['applicant_state'].unique()
# +
dict_applicant_state = {
'New York': 'ny',
'California': 'ca',
'Georgia': 'ga',
'Illinois': 'il',
'Colorado': 'co',
'Kentucky': 'ky',
'Virginia': 'va',
'Tennessee': 'tn',
'Missouri': 'mo',
'Iowa': 'ia',
'Other': 'other',
'Maryland': 'md',
'Ohio': 'oh',
'Texas': 'tx',
'Michigan': 'mi',
'Florida': 'fl',
'Montana': 'mt',
'Connecticut': 'ct',
'Indiana': 'in',
'Alaska': 'ak',
'Maine': 'me',
'Arkansas': 'ak',
'Utah': 'ut',
'North Carolina': 'nc',
'Louisiana': 'la',
'Washington - D.C.': 'dc',
'Massachusetts': 'ma',
'Washington': 'wa',
'Wisconsin': 'wi',
'Alabama': 'al',
'Pennsylvania': 'pa',
'New Jersey': 'nj',
'South Carolina': 'sc',
'Kansas': 'ks',
'Arizona': 'az',
'North Dakota': 'nd',
'Rhode Island': 'ri',
'Oregon': 'or',
'South Dakota': 'sd',
'Oklahoma': 'ok',
'Minnesota': 'mn',
'Mississippi': 'ms',
'Delaware': 'de',
'West Virginia': 'wv',
'Vermont': 'vt',
'Nevada': 'nv',
'New Hampshire': 'nh',
'New Mexico': 'nm',
'Hawaii': 'hi',
'Nebraska': 'ne',
'Idaho': 'id',
'Wyoming': 'wy',
'california': 'ca',
'new york': 'ny',
'ohio': 'oh',
'north carolina': 'nc',
'washington': 'wa',
'virginia': 'va',
'missouri': 'mo',
'new hampshire': 'nh',
'colorado': 'co',
'other': 'other',
'connecticut': 'ct',
'michigan': 'mi',
'texas': 'tx',
'utah': 'ut',
'georgia': 'ga',
'florida': 'fl',
'washington - d.c.': 'dc',
'massachusetts': 'ma',
'kansas': 'ks',
'indiana': 'in',
'pennsylvania': 'pa',
'arizona': 'az',
'wisconsin': 'wi',
'nebraska': 'ne',
'new jersey': 'nj',
'illinois': 'il',
'oregon': 'or',
'vermont': 'vt',
'tennessee': 'tn',
'maryland': 'md',
'alabama': 'al',
'kentucky': 'ky',
'rhode island': 'ri',
'montana': 'mt',
'minnesota': 'mn',
'nevada': 'nv',
'iowa': 'ia',
'mississippi': 'ms',
'south carolina': 'sc',
'louisiana': 'la',
'arkansas': 'ak',
'oklahoma': 'ok',
'delaware': 'de',
'south dakota': 'sd',
'hawaii': 'hi',
'maine': 'me',
'idaho': 'id',
'alaska': 'ak',
'west virginia': 'wv',
'new mexico': 'nm',
'wyoming': 'wy'
}
df_law['applicant_state'] = df_law['applicant_state'].map(dict_applicant_state)
# +
# Create school_state for the state where the college is located
dict_school_state = {
'gulc': 'dc',
'uva': 'va',
'penn': 'pa',
'nyu': 'ny',
'chicago': 'il',
'harvard': 'ma',
'columbia': 'nu',
'ucla': 'ca',
'cornell': 'ny',
'northwestern': 'il',
'gw': 'dc',
'michigan': 'mi',
'berkeley': 'ca',
'usc': 'ca',
'lsu': 'la',
'mercer': 'ga',
'loyola-neworleans': 'la',
'memphis': 'tn',
'siu': 'il',
'gsu': 'ga',
'mississippi': 'ms',
'stanford': 'ca',
'yale': 'ct',
'davis': 'ca',
'hastings': 'ca',
'vanderbilt': 'tn',
'duke': 'nc',
'wm': 'va',
'texas': 'tx',
'wl': 'va',
'newyork': 'ny',
'buffalo': 'ny',
'westernnewengland': 'ma',
'touro': 'ny',
'pace': 'ny',
'cooley': 'mi',
'suffolk': 'ma',
'albany': 'ny',
'stjohns': 'ny',
'hofstra': 'ny',
'widener': 'de',
'newengland': 'ma',
'emory': 'ga',
'bc': 'ma',
'colorado': 'co',
'niu': 'il',
'stu': 'fl',
'floridacoastal': 'fl',
'stetson': 'fl',
'barry': 'fl',
'phoenix': 'az',
'johnmarshal-chicago': 'il',
'nku': 'ky',
'louisville': 'ky',
'whittier': 'ca',
'valparaiso': 'in',
'pepperdine': 'ca',
'southwestern': 'ca',
'brooklyn': 'ny',
'cardozo': 'ny',
'fordham': 'ny',
'gmu': 'va',
'setonhall': 'nj',
'rutgers': 'nj',
'tennessee': 'tn',
'uconn': 'ct',
'maryland': 'md',
'calwestern': 'ca',
'nccu': 'nc',
'udc': 'dc',
'sanfranciso': 'ca',
'missouri-kc': 'mo',
'howard': 'dc',
'nova': 'fl',
'kansas': 'ks',
'thomasjefferson': 'ca',
'westernstate': 'ca',
'sandiego': 'ca',
'unc': 'nc',
'baylor': 'tx',
'catholic': 'dc',
'american': 'dc',
'washu': 'mo',
'osu': 'oh',
'bu': 'ma',
'smu': 'tx',
'depaul': 'il',
'msu': 'mi',
'detroit-mercy': 'mi',
'wayne': 'mi',
'fiu': 'fl',
'indiana': 'in',
'quinnipiac': 'ct',
'marquette': 'wi',
'duquesne': 'pa',
'indiana-indy': 'in',
'lewisandclark': 'or',
'villanova': 'pa',
'wake': 'nc',
'northeastern': 'ma',
'oklahoma': 'ok',
'capital': 'oh',
'cincinnati': 'oh',
'casewestern': 'oh',
'kentucky': 'oh',
'utah': 'ut',
'nevada': 'nv',
'minnesota': 'mn',
'temple': 'pa',
'syracuse': 'ny',
'arizona': 'az',
'slu': 'mo',
'missouri': 'mo',
'illinois': 'il',
'notredame': 'in',
'georgia': 'ga',
'tulane': 'la',
'miami': 'fl',
'santaclara': 'ca',
'loyola': 'ca',
'florida': 'fl',
'fsu': 'fl',
'seattle': 'wa',
'houston': 'tx',
'pacific': 'ca',
'washington': 'wa',
'oregon': 'or',
'asu': 'az',
'wisconsin': 'wi',
'gonzaga': 'wa',
'newhampshire': 'nh',
'maine': 'me',
'rogerwilliams': 'ri',
'chicago-kent': 'il',
'arkansas': 'ak',
'byu': 'ut',
'wvu': 'wv',
'iowa': 'ia',
'loyala-chicago': 'il',
'pitt': 'pa',
'alabama': 'al',
'samford': 'al',
'richmond': 'va',
'chapman': 'ca',
'texastech': 'tx',
'vermont': 'vt',
'pennstate': 'pa',
'denver': 'co',
'southcarolina': 'sc',
'hawaii': 'hi',
'washburn': 'ks',
'drake': 'ia',
'creighton': 'ne',
'dayton': 'oh',
'baltimore': 'md',
'regent': 'va',
'goldengate': 'ca',
'nebraska': 'ne',
'avemaria': 'fl',
'willamette': 'or',
'tulsa': 'ok',
'newmexico': 'nm',
'mississippicollege': 'ms',
'texasam': 'tx',
'campbell': 'nc',
'hamline': 'mn',
'cuny-queens': 'ny',
'texassouthern': 'tx',
'southern': 'la',
'idaho': 'id',
'clevelandstate': 'oh',
'toledo': 'oh',
'akron': 'oh',
'ocu': 'ok',
'southtexas': 'tx',
'stmarys': 'tx',
'onu': 'oh',
'appalachian': 'va',
'wyoming': 'wy',
'stthomas': 'mn',
'montana': 'mt',
'williammitchell': 'mn',
'northdakota': 'nd',
'charleston': 'sc',
'johnmarshal-atl': 'ga',
'southdakota': 'sd',
'charlotte': 'nc',
'drexel': 'pa',
'famu': 'fl',
'jones': 'al',
'elon': 'nc',
'nashville': 'tn',
'laverne': 'ca',
'interamerican': 'pr',
'pontifical': 'pr',
'puertorico': 'pr',
'toronto': 'on',
'irvine': 'ca',
'dalhousie': 'ns',
'brittishcolumbia': 'bc',
'calgary': 'ab',
'ottawa': 'on',
'queens': 'on',
'york': 'on',
'saskatchewan': 'sk',
'liberty': 'va',
'alberta': 'ab',
'westernontario': 'on',
'windsor': 'on',
'mcgill': 'qc',
'manitoba': 'mb',
'victoria': 'bc',
'newbrunswick': 'nb',
'massachusetts': 'ma',
'belmont': 'tn'
}
df_law['school_state'] = df_law['school'].map(dict_school_state)
# -
# What majors are listed?
df_law.groupby('major')['major'].count().sort_values(ascending=False)
# +
# Create a list with all the major options
list_major = list(df_law['major'].unique())
# Create a list for each major
tag_polisci = list(set([m.group() for l in list_major for m in [re.search(r'.*olitical.*|.*Pol.*|.*pol.*|.*olitics.*', str(l))] if m]))
list_polisci = list(set([x for x in list_major if x in tag_polisci]))
tag_history = list(set([m.group() for l in list_major for m in [re.search(r'.*ist.*', str(l))] if m]))
list_history = list(set([x for x in list_major if x in tag_history]))
tag_english = list(set([m.group() for l in list_major for m in [re.search(r'.*nglish.*|.it.*|.* .it|.riting|.*ompar.*', str(l))] if m]))
list_english = list(set([x for x in list_major if x in tag_english]))
tag_econ = list(set([m.group() for l in list_major for m in [re.search(r'.*econ.*|.*Econ.*', str(l))] if m]))
list_econ = list(set([x for x in list_major if x in tag_econ]))
tag_phil = list(set([m.group() for l in list_major for m in [re.search(r'.*phil.*|.*Phil.*', str(l))] if m]))
list_phil = list(set([x for x in list_major if x in tag_phil]))
tag_psych = list(set([m.group() for l in list_major for m in [re.search(r'.*syc.*', str(l))] if m]))
list_psych = list(set([x for x in list_major if x in tag_psych]))
tag_biz = list(set([m.group() for l in list_major for m in [re.search(r'.*inanc.*|.ccounting|acct|.ax.*|.*usiness.*|.*dmin.*|.*dvert.*|.*arketing.*|mktg|.*anagement.*|.*rgan.*|.*mgmt.*|.*iz.*', str(l))] if m]))
list_biz = list(set([x for x in list_major if x in tag_biz]))
tag_cj = list(set([m.group() for l in list_major for m in [re.search(r'.*cj.*|.*CJ.*|.*rim.*|.RIM.*|.*ustic.*', str(l))] if m]))
list_cj = list(set([x for x in list_major if x in tag_cj]))
tag_govt = list(set([m.group() for l in list_major for m in [re.search(r'.ov.*|.*olicy.*|.*ublic.*|.*gov.*|.*Gov.*|.*iploma.*|.*lobal.*|.ntl.*|.nternational.*|.oreign.*|IR|ir', str(l))] if m]))
list_govt = list(set([x for x in list_major if x in tag_govt]))
tag_humanities = list(set([m.group() for l in list_major for m in [re.search(r'.*ociol.*|.*uman.*|.*iberal.*|.ocial.*|.*usic.*|.*nthro.*|.* .elation.*|.*tudies|.*lassi.*|.*panish.*|.*rchitec.*|.*elig.*|.*iddle.*|.rabic.*|.*heat.*|.*art.*|.*Art.*|.*rama.*|.*rench.*|.*erman.*|.*ussian.*|.port.*|.*edu.*|.*Edu.*|.*heology.*|.*ilm.*|.*urop.*|.*hinese.*|.*sian.*|.*atin.*|.*east.*|.*East.*|.*apan.*|.*omen.*|.*ender.*|.*frica.*|.*inguist.*|.omm.*|.*omm|.*ommun.*|.*adio.*|.*tv.*|.*elevision.*|.*hetoric.*|.roadc.*|.*elecom.*|.*ournal.*|PR|pr|.*edia.*', str(l))] if m]))
list_humanities = list(set([x for x in list_major if x in tag_humanities]))
tag_law = list(set([m.group() for l in list_major for m in [re.search(r'.*aw.*|.*egal.*|.re .aw|.re-?.aw', str(l))] if m]))
list_law = list(set([x for x in list_major if x in tag_law]))
tag_stem = list(set([m.group() for l in list_major for m in [re.search(r'.*ng.*|.omputer.*|EE.*|ee.*|CS.*|cs.*|.lectical.*|.*ompu.*|.omp .ci.*|.io.*|.*bio.*|.*Bio.*|STEM|.tem.*|.*hem.*|.*hysic.*|.*cience.*|.*euro.*|.*ath.*|.*harm.*|.*enetic.*|.*cology*|.*oology.*|.*tatistic.*|.tats|.*nfo.*|.*uant.*|.*inesiology.*|.*pplied.*|.*otany.*|.*eology.*|.*nviron.*|.*med.*|.*Med.*|.*olecu.*|.*gri.*|.*orest.*|.*graphy.*|.*tech.*|.*Tech.*|.*aero.*|.*Aero.*|.*ealth.*|.*ogni.*|.*nerg.*|.*atur.*|.*arine.*', str(l))] if m]))
list_stem = list(set([x for x in list_major if x in tag_stem]))
list_top_majors = [list_polisci, list_history, list_english, list_econ, list_phil, list_psych, list_biz, list_cj, list_govt, list_humanities, list_law, list_stem]
list_other = [x for x in list_major if x not in [i for sublist in list_top_majors for i in sublist]]
# +
# Create dict_major and map it over df_law['major']. This will consolidate our major data.
dict_polisci = {x:'political science' for x in list_polisci}
dict_history = {x:'history' for x in list_history}
dict_english = {x:'english' for x in list_english}
dict_econ = {x:'economics' for x in list_econ}
dict_phil = {x:'philosophy' for x in list_phil}
dict_psych = {x:'psychology' for x in list_psych}
dict_biz = {x:'business' for x in list_biz}
dict_cj = {x:'criminal justice' for x in list_cj}
dict_govt = {x:'government' for x in list_govt}
dict_humanities = {x:'humanities' for x in list_humanities}
dict_law = {x:'law' for x in list_law}
dict_stem = {x:'stem' for x in list_stem}
dict_other = {x:'other' for x in list_other}
list_dict_major = [dict_polisci, dict_history, dict_english, dict_econ, dict_phil, dict_psych, dict_biz, dict_cj, dict_govt, dict_humanities, dict_law, dict_stem, dict_other]
dict_major = dict(ChainMap(*list_dict_major))
df_law['major'] = df_law['major'].map(dict_major)
# -
df_law.groupby('major')['major'].count().sort_values(ascending=False)
# What is listed under 'status'?
df_law['status'].unique()
# +
# For the sake of this analysis, status will be 1 if 'accepted', 0 if 'rejected', 'waitlisted', or 'pending'.
dict_status = {
'Ac': 1,
'Re': 0,
'Pe': 0, # this means pending (the school didn't respond or the applicant didn't update with their admissions results)
'Wa': 0,
'AcWa': 1, # this means waitlisted and then accepted from the waitlist
'ReWa': 0 # this means waitlisted and then rejected from the waitlist
}
df_law['status'] = df_law['status'].map(dict_status)
# -
# Parse the 'sent' and 'decision' dates, but almost half the values are missing so they may be ignored in the analysis
df_law['sent'] = df_law['sent'].apply(lambda x: None if x == '--' else pd.to_datetime(x, format = '%m/%d/%y'))
df_law['decision'] = df_law['decision'].apply(lambda x: None if x == '--' else pd.to_datetime(x, format = '%m/%d/%y'))
# Drop unnecessary columns & rename some columns
df_law = df_law.drop(['Unnamed: 0', 'complete_ts', 'decision_ts', 'aa', 'urm', 'lsat1', 'lsat2', 'lsat3', 'cycle'], axis=1)
df_law = df_law.rename(columns={'status': 'accepted', 'sentmonth': 'sent_month', 'decisionmonth': 'decision_month', 'app_id': 'applicant_id', 'money': 'scholarship', 'ed':'early_decision', 'nontrad': 'nontraditional', 'feewaiver': 'fee_waiver', 'yearsout': 'years_out', 'schooltype': 'school_type'})
df_law.head()
# ### Save df_law to csv
df_law.to_csv(r'lawschool_clean.csv')
| law_school_admissions-master/.ipynb_checkpoints/lawschool_cleaning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import tensorflow as tf
import numpy as np
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import json
# -
# # Initial global var
# +
## 미리 Global 변수를 지정하자. 파일 명, 파일 위치, 디렉토리 등이 있다.
DATA_IN_PATH = './data_in/'
DATA_OUT_PATH = './data_out/'
TRAIN_Q1_DATA_FILE = 'train_q1.npy'
TRAIN_Q2_DATA_FILE = 'train_q2.npy'
TRAIN_LABEL_DATA_FILE = 'train_label.npy'
DATA_CONFIGS = 'data_configs.json'
## 학습에 필요한 파라메터들에 대해서 지정하는 부분이다.
BATCH_SIZE = 16
EPOCH = 2
HIDDEN = 64
BUFFER_SIZE = 10000
NUM_LAYERS = 3
DROPOUT_RATIO = 0.3
TEST_SPLIT = 0.1
RNG_SEED = 13371447
EMBEDDING_DIM = 128
MAX_SEQ_LEN = 31
# -
# # Load Dataset
# +
## 데이터를 불러오는 부분이다. 효과적인 데이터 불러오기를 위해, 미리 넘파이 형태로 저장시킨 데이터를 로드한다.
q1_data = np.load(open(DATA_IN_PATH + TRAIN_Q1_DATA_FILE, 'rb'))
q2_data = np.load(open(DATA_IN_PATH + TRAIN_Q2_DATA_FILE, 'rb'))
labels = np.load(open(DATA_IN_PATH + TRAIN_LABEL_DATA_FILE, 'rb'))
prepro_configs = None
with open(DATA_IN_PATH + DATA_CONFIGS, 'r') as f:
prepro_configs = json.load(f)
# -
VOCAB_SIZE = prepro_configs['vocab_size']
# # Split train and test dataset
q1_data_len = np.array([min(len(x), MAX_SEQ_LEN) for x in q1_data])
q2_data_len = np.array([min(len(x), MAX_SEQ_LEN) for x in q2_data])
# +
## 데이터를 나누어 저장하자. sklearn의 train_test_split을 사용하면 유용하다. 하지만, 쿼라 데이터의 경우는
## 입력이 1개가 아니라 2개이다. 따라서, np.stack을 사용하여 두개를 하나로 쌓은다음 활용하여 분류한다.
X = np.stack((q1_data, q2_data), axis=1)
y = labels
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=TEST_SPLIT, random_state=RNG_SEED)
train_Q1 = train_X[:,0]
train_Q2 = train_X[:,1]
test_Q1 = test_X[:,0]
test_Q2 = test_X[:,1]
# +
def rearrange(base, hypothesis, labels):
features = {"base": base, "hypothesis": hypothesis}
return features, labels
def train_input_fn():
dataset = tf.data.Dataset.from_tensor_slices((train_Q1, train_Q2, train_y))
dataset = dataset.shuffle(buffer_size=len(train_Q1))
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(rearrange)
dataset = dataset.repeat(EPOCH)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def eval_input_fn():
dataset = tf.data.Dataset.from_tensor_slices((test_Q1, test_Q2, test_y))
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(rearrange)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
# -
# # Model setup
# +
def Malstm(features, labels, mode):
TRAIN = mode == tf.estimator.ModeKeys.TRAIN
EVAL = mode == tf.estimator.ModeKeys.EVAL
PREDICT = mode == tf.estimator.ModeKeys.PREDICT
embedding = tf.keras.layers.Embedding(VOCAB_SIZE, EMBEDDING_DIM)
base_embedded_matrix = embedding(features['base'])
hypothesis_embedded_matrix = embedding(features['hypothesis'])
# Question 1에 대한 LSTM모델
q_lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units = 64, activation = tf.nn.tanh)
q_lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units = 64, activation = tf.nn.tanh)
_, q_output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw = q_lstm_fw_cell,
cell_bw = q_lstm_bw_cell,
inputs = base_embedded_matrix,
dtype = tf.float32,
scope='query')
# 마지막 state 값을 뽑아 추출한다
q_final_state = tf.concat([q_output_states[0].h, q_output_states[1].h], axis=1)
# Question 2 (유사한 쿼리)에 대한 LSTM모델
s_lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units = 64, activation = tf.nn.tanh)
s_lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units = 64, activation = tf.nn.tanh)
_, s_output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw = s_lstm_fw_cell,
cell_bw = s_lstm_bw_cell,
inputs = hypothesis_embedded_matrix,
dtype = tf.float32,
scope='sim_query')
#LSTM의 마지막 state 값을 추출한다.
sim_final_state = tf.concat([s_output_states[0].h, s_output_states[1].h], axis=1)
# merged_matrix = tf.concat([base_sementic_matrix, hypothesis_sementic_matrix], -1)
# logit_layer = tf.keras.layers.dot([base_sementic_matrix, hypothesis_sementic_matrix], axes=1, normalize=True)
with tf.variable_scope('output_layer'):
# logit_layer = K.exp(-K.sum(K.abs(base_sementic_matrix - hypothesis_sementic_matrix), axis=1, keepdims=True))
logit_layer = tf.exp(-tf.reduce_sum(tf.abs(q_final_state - sim_final_state), axis=1, keepdims=True))
logit_layer = tf.squeeze(logit_layer, axis=-1)
if PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'is_duplicate':logit_layer
})
#prediction 진행 시, None
if labels is not None:
labels = tf.to_float(labels)
# loss = tf.reduce_mean(tf.keras.metrics.binary_crossentropy(y_true=labels, y_pred=logit_layer))
loss = tf.losses.mean_squared_error(labels=labels, predictions=logit_layer)
# loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(labels, logit_layer))
if EVAL:
accuracy = tf.metrics.accuracy(labels, tf.round(logit_layer))
eval_metric_ops = {'acc': accuracy}
return tf.estimator.EstimatorSpec(
mode=mode,
eval_metric_ops= eval_metric_ops,
loss=loss)
elif TRAIN:
global_step = tf.train.get_global_step()
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step)
return tf.estimator.EstimatorSpec(
mode=mode,
train_op=train_op,
loss=loss)
# -
# # Training & Eval
# +
# os.environ["CUDA_VISIBLE_DEVICES"]="0" #For TEST
model_dir = os.path.join(os.getcwd(), DATA_OUT_PATH + "/checkpoint/rnn/")
os.makedirs(model_dir, exist_ok=True)
config_tf = tf.estimator.RunConfig(save_checkpoints_steps=500,
save_checkpoints_secs=None,
keep_checkpoint_max=2,
log_step_count_steps=200)
lstm_est = tf.estimator.Estimator(Malstm, model_dir=model_dir, config=config_tf)
# -
lstm_est.train(train_input_fn)
lstm_est.evaluate(eval_input_fn)
# # Load test dataset & create submit dataset to kaggle
# +
TEST_Q1_DATA_FILE = 'test_q1.npy'
TEST_Q2_DATA_FILE = 'test_q2.npy'
TEST_ID_DATA_FILE = 'test_id.npy'
test_q1_data = np.load(open(DATA_IN_PATH + TEST_Q1_DATA_FILE, 'rb'))
test_q2_data = np.load(open(DATA_IN_PATH + TEST_Q2_DATA_FILE, 'rb'))
test_id_data = np.load(open(DATA_IN_PATH + TEST_ID_DATA_FILE, 'rb'))
# -
predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={"base":test_q1_data,
"hypothesis":test_q2_data},
shuffle=False)
predictions = np.array([p['is_duplicate'] for p in lstm_est.predict(input_fn=predict_input_fn)])
# +
print(len(predictions)) #2345796
output = pd.DataFrame( data={"test_id":test_id_data, "is_duplicate": list(predictions)} )
output.to_csv( "rnn_predict.csv", index=False, quoting=3 )
| 5.TEXT_SIM/OLD/5.3.3_Quora_LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 4 Supplement
# ### 这个文档应在Day 3之前看,相比Day 3更详细易懂
# ### 机器学习的本质就是借助数学模型理解数据。当我们给模型装上可以适应观测数据的可调 参数时,“学习”就开始了;此时的程序被认为具有从数据中“学习”的能力。一旦模型 可以拟合旧的观测数据,那么它们就可以预测并解释新的观测数据。
# - 有监督学习
# 是指对数据的若干特征与若干标签(类型)之间的关联性进行建模的过程;只要模型被确定,就可应用到新的未知数据上。
# 这类学习过程可以进一步分为分类 (classification)任务与回归(regression)任务。在分类任务中,标签都是离散值;而在回归任务中,标签都是连续值。
# - 无监督学习
# 是指对不带任何标签的数据特征进行建模,通常被看成是一种“让数据自己介 绍自己”的过程。这类模型包括聚类(clustering)任务和降维(dimensionality reduction) 任务。聚类算法可以将数据分成不同的组别,而降维算法追求用更简洁的方式表现数据。
# - 半监督学习(semi-supervised learning):通常可以在数据标签不完整时使用
#
# 小结
#
# 有监督学习:可以训练带标签的数据以预测新数据的标签的模型。
#
# - 分类:可以预测两个或多个离散分类标签的模型。
#
# - 回归:可以预测连续标签的模型。
#
# 无监督学习:识别无标签数据结构的模型。
#
# - 聚类: 检测、识别数据显著组别的模型。
#
# - 降维:从高维数据中检测、识别低维数据结构的模型
#
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
# -
# ## 有监督学习示例:简单线性回归
# ### (1) 选择模型类
# +
from sklearn.linear_model import LinearRegression
rng = np.random.RandomState(42)
x = 10 * rng.rand(50)
y = 2 * x - 1 + rng.randn(50)
plt.scatter(x, y);
# -
# ### (2) 选择模型超参数
# 有一些重要的参数必须在选择模型类时确定好。这些参数通常被称为**超参数**
# <br>在 Scikit-Learn 中,我们通常在模型初始化阶段选择超参数
# <br>需要注意的是,对模型进行实例化其实仅仅是存储了超参数的值。我们还没有将模型应 用到数据上:
# <br>Scikit-Learn 的 API 对**选择模型**和**将模型应用到数据**区别得很清晰。
model = LinearRegression(fit_intercept=True) # fit_intercept:超参数
# ### (3) 将数据整理成特征矩阵和目标数组
# 虽然我们的目标数组已经有了 y(长度为 n_samples 的数组),但还需要将数据 x 整理成 `[n_samples, n_features]` 的形式。
X = x[:, np.newaxis]
# ### (4) 用模型拟合数据
# 现在就可以将模型应用到数据上了,这一步通过模型的 fit() 方法即可完成:
model.fit(X, y)
# fit() 命令会在模型内部进行大量运算,运算结果将存储在模型属性中,供用户使用。
# 在 Scikit-Learn 中,所有通过 fit() 方法获得的模型参数都带一条下划线。例如,在线 性回归模型中,模型参数如下所示:
model.coef_ # 斜率 前面定义的谢亏为2,截距为1
model.intercept_ # 截距
# ### (5) 预测新数据的标签
xfit = np.linspace(-1, 11)
Xfit = xfit[:, np.newaxis]
yfit = model.predict(Xfit)
# 最后,把原始数据和拟合结果都可视化出来
plt.scatter(x, y)
plt.plot(xfit, yfit);
# ## 有监督学习示例:鸢尾花数据分类
import seaborn as sns
iris = sns.load_dataset('iris')
iris.tail()
X_iris = iris.drop('species', axis=1) #feature matrix
y_iris = iris['species'] #target vector
# **数据集都用标准数据类型(NumPy数组、Pandas DataFrame、SciPy稀疏矩阵)表示,参数名称用标准的Python字符串。**
# <br>对于那些零元素数目远远多于非零元素数目,并且非零元素的分布没有规律的矩阵称为稀疏矩阵(sparse)
# <br><br>为什么需要分成训练集(training set)和测试集(testing set)?
# <br>由于需要用模型之前没有接触过的数据评估它的训练效果,因此得先将数据分割成**训练集(training set)和测试集(testing set)**。
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X_iris, y_iris,random_state=1)
print(Xtrain.shape,Xtest.shape,ytrain.shape,ytest.shape)
# 用 accuracy_score 工具验证模型预测结果的准确率:
'整理好数据之后,用下面的模型来预测标签:'
from sklearn.naive_bayes import GaussianNB # 1.选择模型类w
model = GaussianNB() # 2.初始化模型
model.fit(Xtrain, ytrain) # 3.用模型拟合数据
y_model = model.predict(Xtest) # 4.对新数据进行预测
from sklearn.metrics import accuracy_score
accuracy_score(ytest, y_model)
# ## 无监督学习示例:鸢尾花数据降维
# 鸢尾花数据集由四个维度构成,降维的任务是要找到一个可以保留数据本质特征的低维矩阵来表示高维数据。
# <br>降维通常用 于辅助数据可视化的工作,毕竟用二维数据画图比用四维甚至更高维的数据画图更方便
# <br>
# <br>下面将使用**主成分分析(principal component analysis,PCA)方法**,这 是一种快速线性降维技术。我们将用模型返回两个主成分,也就是用二维数据表示鸢尾花 的四维数据。
# <br>
# 简述一下 PCA 的算法步骤:
#
# 设有 n 条 d 维数据。
#
# * 将原始数据按列组成 n 行 d 列矩阵 X
# * 将 X 的每一列(代表一个属性)进行零均值化,即减去这一列的均值
# * 求出协方差矩阵 C=\frac{1}{m}XX^\mathsf{T}
# * 求出协方差矩阵的特征值及对应的特征向量
# * 将特征向量按对应特征值大小从上到下按行排列成矩阵,取前 k 行组成矩阵 P
# * Y=PX 即为降维到 k 维后的数据
# <br>**PCA 本质上是将方差最大的方向作为主要特征,并且在各个正交方向上将数据“离相关”,也就是让它们在不同正交方向上没有相关性。**
from sklearn.decomposition import PCA # 1.选择模型类
model = PCA(n_components=2) # 2.设置超参数--n_components:Number of components to keep.初始化模型
model.fit(X_iris) # 3.拟合数据,注意这里不用y变量
X_2D = model.transform(X_iris) # 4. 将数据转换为二维
X_2D.shape
# **画出结果**
# <br>由图也可看出:虽然 PCA 算法根本不知道花的种类标签,但不同种类的花 还是被很清晰地区分开来!
# <br>这表明用一种比较简单的分类方法就能够有效地学习这份数据集.
iris['PCA1'] = X_2D[:, 0]
iris['PCA2'] = X_2D[:, 1]
sns.lmplot("PCA1", "PCA2", hue='species', data=iris, fit_reg=False);
# ## 无监督学习示例:鸢尾花数据聚类
# 聚类算法是要对没有任何标签的数据集进行分组。 我们将用一个强大的聚类方法——**高斯混合模型(Gaussian mixture model,GaussianMixture)**
from sklearn.mixture import GaussianMixture # 1.选择模型类
#The class GMM is deprecated in 0.18 and will be removed in 0.20. Use class GaussianMixture instead.
model = GaussianMixture(n_components=3, covariance_type='full') # 2.设置超参数,初始化模型
model.fit(X_iris) # 3.拟合数据,注意不需要y变量
y_gm = model.predict(X_iris) # 4. 确定簇标签
# 将簇标签添加到鸢尾花的 DataFrame 中,然后用 Seaborn 画出结果
# <br>由图也可知:setosa(山鸢尾 花)类的花在簇 0 中被完美地区分出来,遗憾是图中 versicolor(变色鸢尾 花)和 virginicaI(维吉尼亚鸢尾花)还有一点混淆.
iris['cluster'] = y_gm
sns.lmplot("PCA1", "PCA2", data=iris, hue='species', col='cluster', fit_reg=False);
| Code/Day 4 Supplement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/amirhosseingholami90/Battery/blob/master/Copy_of_Untitled2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="6YTfxa-DfDwL" outputId="2952bf59-29fd-4194-9cec-3447b5d12fb0"
# Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('data_reg.csv')
X = dataset.iloc[:, 0:6].values
y = dataset.iloc[:, 6].values
print(dataset.shape)
print(X.shape)
print(y.shape)
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
# + colab={"base_uri": "https://localhost:8080/", "height": 229} id="27pjNx4MfOlt" outputId="9efe03e2-b315-4b3d-ddcf-57b88d211677"
print(X.shape)
print(y.shape)
# Visualising the Linear Regression results
plt.scatter(X[:,1], y, color = 'red')
plt.plot(X, lin_reg.predict(X), color = 'blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results
plt.scatter(X[:,1], y, color = 'red')
plt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results (for higher resolution and smoother curve)
# X_grid = np.arange(min(X[:,0:2]), max(X[:,0:2]), 0.1)
# X_grid = X_grid.reshape((len(X_grid), 1))
# plt.scatter(X[:,1], y, color = 'red')
# plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = 'blue')
# plt.title('Truth or Bluff (Polynomial Regression)')
# plt.xlabel('Position level')
# plt.ylabel('Salary')
# plt.show()
# Predicting a new result with Linear Regression
unseen_data = [[300,17,70,600,15,50]]
print(X[0])
res1 = lin_reg.predict(unseen_data)
print(res1)
# Predicting a new result with Polynomial Regression
res2 = lin_reg_2.predict(poly_reg.fit_transform(unseen_data))
print(res2)
# + id="vutBsQpak4xp"
| Copy_of_Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 第3章 pandasでデータを処理しよう
#
# ### 3-7: 統計量の算出
# +
# リスト3.7.1:列ごとの平均値を算出
import os
import pandas as pd
base_url = (
"https://raw.githubusercontent.com/practical-jupyter/sample-data/master/anime/"
)
anime_master_csv = os.path.join(base_url, "anime_master.csv")
df = pd.read_csv(anime_master_csv)
df.mean()
# -
# リスト3.7.3:Seriesの合計値を算出
df["members"].sum()
# リスト3.7.5:基本統計量
df.describe().round(1)
# リスト3.7.6:基本統計量のパーセンタイルを変更
df.describe(percentiles=[0.1, 0.9]).round(1)
# リスト3.7.7:文字列型の基本統計量
df[["genre", "type"]].describe()
| sample-code/notebooks/3-07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# We left off with the disturbing realization that even though we are satisfied the requirements of the sampling theorem, we still have errors in our approximating formula. We can resolve this by examining the Whittaker interpolating functions which are used to reconstruct the signal from its samples.
# +
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(-5,5,300) # redefine this here for convenience
fig,ax = plt.subplots()
fs = 5.0
ax.plot(t, np.sinc(fs * t))
ax.grid()
ax.annotate('This keeps going...',
xy=(-4,0),
xytext=(-5+.1,0.5),
arrowprops={'facecolor':'green','shrink':0.05},fontsize=14)
ax.annotate('... and going...',
xy=(4,0),
xytext=(3+.1,0.5),
arrowprops={'facecolor':'green','shrink':0.05},fontsize=14)
# fig.savefig('figure_00@.png', bbox_inches='tight', dpi=300)
# -
# Notice in the above plot that the function extends to infinity in either direction. This basically means that the signals we can represent must also extend to infinity in either direction which then means that we have to sample forever to exactly reconstruct the signal! So, on the one hand the sampling theorem says we only need a sparse density of samples, this result says we need to sample forever. No free lunch here!
#
# This is a deep consequence of dealing with band-limited functions which, as we have just demonstrated, are **not** time-limited. Now, the new question is how to get these signals into a computer with finite memory. How can we use what we have learned about the sampling theorem with these finite-duration signals?
# ## Approximately Time-Limited Functions
# Let's back off a bit and settle for functions that are *approximately* time-limited in the sense that almost all of their energy is concentrated in a finite time-window:
#
# $$ \int_{-\tau}^\tau |f(t)|^2 dt = E-\epsilon$$
#
# where $E$ is the total energy of the signal:
#
# $$ \int_{-\infty}^\infty |f(t)|^2 dt = E$$
#
# Now, with this new definition, we can seek out functions that are band-limited but come very, very (i.e. within $\epsilon$) close to being time-limited as well. In other words, we want functions $\phi(t)$ so that they are band-limited:
#
# $$ \phi(t) = \int_{-W}^W \Phi(\nu) e^{2 \pi j \nu t} dt $$
#
# and coincidentally maximize the following:
#
# $$ \int_{-\tau}^\tau |\phi(t) |^2 dt$$
#
# After a complicated derivation, this boils down to solving the following eigenvalue equation:
#
# $$ \int_{-\tau}^\tau \phi(x)\frac{\sin(2\pi W(t-x))}{\pi(t-x)} dx = \lambda \phi(t)$$
#
# The set of $\phi_k(t)$ eigenfunctions form the basis for arbitrary
# approximately time-limited functions. In other words, we can express
#
# $$ f(t) = \sum_k a_k \phi_k(t) $$
#
# Note that
# the $\phi_k(t)$ functions are not time-limited, but only time-concentrated in the $[-\tau,\tau]$ interval. With a change of variables, we can write this in normalized form as
#
# $$ \int_{-1}^1 \psi(x)\frac{\sin(2\pi\sigma(t-x)/4)}{\pi(t-x)} dx = \lambda \psi(t)$$
#
# where we define$\sigma = (2\tau)(2W)$ as the time-bandwidth product.The advantage of this change of variables is that $\tau$ and $W$ are expressed as a single term. Furthermore, this is the form of a classic problem where the $\psi$ functions turn out to be the angular prolate spheroidal wave functions. Let's see what these $\psi$ functions look like but solving this form of
# the eigenvalue problem
def kernel(x,sigma=1):
'convenient function to compute kernel of eigenvalue problem'
x = np.asanyarray(x)
y = np.pi*np.where(x == 0,1.0e-20, x)
return np.sin(sigma/2*y)/y
# Now, we are ready to setup the eigenvalues and see how they change with the time-bandwidth product.
# +
nstep = 100 # quick and dirty integral quantization
t = np.linspace(-1,1,nstep) # quantization of time
dt = np.diff(t)[0] # differential step size
def eigv(sigma):
return np.linalg.eigvalsh(kernel(t-t[:,None],sigma)).max() # compute max eigenvalue
sigma = np.linspace(0.01,4,15) # range of time-bandwidth products to consider
fig,ax = plt.subplots()
ax.plot(sigma, dt*np.array([eigv(i) for i in sigma]),'-o')
ax.set_xlabel('time-bandwidth product $\sigma$', fontsize=14)
ax.set_ylabel('max eigenvalue', fontsize=14)
ax.axis(ymax=1.01)
ax.grid()
# fig.savefig('figure_00@.png', bbox_inches='tight', dpi=300)
# -
# The largest eigenvalue is the fraction of the energy of the contained in the interval $[-1,1]$. Thus, this means
# that for $\sigma \gt 3$, $\psi_0(t)$ is the eigenfunction that is most concentrated in that interval. Now, let's look at the this eigenfunction under those conditions.
# +
sigma = 3
w, v = np.linalg.eigh(kernel(t-t[:, None], sigma))
maxv = v[:, w.argmax()]
fig, ax = plt.subplots()
ax.plot(t, maxv)
ax.set_xlabel('time', fontsize=18)
ax.set_ylabel('$\psi_0(t)$', fontsize=22)
ax.set_title('Eigenvector corresponding to e-value=%3.4f;$\sigma$=%3.2f'%(w.max()*dt, sigma))
# fig.savefig('figure_00@.png', bbox_inches='tight', dpi=300)
# -
# Note that we'll see this shape again when we take up window functions.
#
# What does this all mean? By framing our problem this way, we made a connection between the quality of our reconstruction via the Whittaker interpolant and the time-bandwidth product. Up until now, we did not have a concrete way of relating limitations in time to limitations in frequency. Now that we know how to use the time-bandwidth product, let's go back to the original formulation with the separate $\tau$ and $W$ terms as in the following:
#
# $$ \int_{-\tau}^\tau \phi(x)\frac{\sin(2\pi W (t-x))}{\pi(t-x)} dx = \lambda \phi(t)$$
#
# and then re-solve the eigenvalue problem.
# +
def kernel_tau(x,W=1):
'convenient function to compute kernel of eigenvalue problem'
x = np.asanyarray(x)
y = np.pi*np.where(x == 0,1.0e-20, x)
return np.sin(2*W*y)/y
nstep = 300 # quick and dirty integral quantization
t = np.linspace(-1,1, nstep) # quantization of time
tt = np.linspace(-2,2, nstep) # extend interval
sigma = 5
W = sigma/2./2./t.max()
w,v = np.linalg.eig(kernel_tau(t-tt[:,None],W))
ii = np.argsort(w.real)
maxv = v[:, w.real.argmax()].real
fig,ax = plt.subplots()
ax.plot(tt, maxv/np.sign(maxv[nstep//2])) # normalize to keep orientation upwards
ax.set_xlabel('time',fontsize=14)
ax.set_ylabel(r'$\phi_{max}(t)$',fontsize=18)
ax.set_title('$\sigma=%d$'%(2*W*2*t.max()),fontsize=16)
# fig.savefig('figure_00@.png', bbox_inches='tight', dpi=300)
# -
# $\DeclareMathOperator{\sinc}{sinc}$
#
# This looks suspicously like the $\sinc$ function. In fact, in the limit as $\sigma \rightarrow \infty$, the eigenfunctions devolve into time-shifted versions of the $\sinc$ function. These are the same functions used in the Whittaker interpolant. Now we have a way to justify the interpolant by appealing to large $\sigma$ values.
# ## Summary
# In this section, at first blush, it may look like we accomplished nothing. We started out investigating why is it that we have some residual error in the reconstruction formula using the Whittaker approximation functions. Then, we recognized that we cannot have signals that are simultaneously time-limited and band-limited. This realization drove us to investigate "approximately" time-limited functions. Through carefully examining the resulting eigenvalue problem, we determined the time-bandwidth conditions under which the
# Whittaker interopolant is asymptotically valid. As you can imagine, there is much more to this story and many powerful theorems place bounds on the quality and dimensionality of this reconstruction, but for us, the qualifying concept of time-bandwidth product is enough for now.
#
# ## References
#
# * This is in the [IPython Notebook format](http://ipython.org/) and was converted to HTML using [nbconvert](https://github.com/ipython/nbconvert).
#
# * See [Signal analysis](http://books.google.com/books?id=Re5SAAAAMAAJ) for more detailed mathematical development.
#
# * The IPython notebook corresponding to this post can be found [here](https://github.com/unpingco/Python-for-Signal-Processing/blob/master/Sampling_Theorem_Part_2.ipynb).
# %qtconsole
| notebook/.ipynb_checkpoints/Sampling_Theorem_Part_2_v2-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# # 📝 Exercise M7.03
#
# As with the classification metrics exercise, we will evaluate the regression
# metrics within a cross-validation framework to get familiar with the syntax.
#
# We will use the Ames house prices dataset.
# +
import pandas as pd
import numpy as np
ames_housing = pd.read_csv("../datasets/house_prices.csv")
data = ames_housing.drop(columns="SalePrice")
target = ames_housing["SalePrice"]
data = data.select_dtypes(np.number)
target /= 1000
# -
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# The first step will be to create a linear regression model.
# +
# Write your code here.
# -
# Then, use the `cross_val_score` to estimate the generalization performance of
# the model. Use a `KFold` cross-validation with 10 folds. Make the use of the
# $R^2$ score explicit by assigning the parameter `scoring` (even though it is
# the default score).
# +
# Write your code here.
# -
# Then, instead of using the $R^2$ score, use the mean absolute error. You need
# to refer to the documentation for the `scoring` parameter.
# +
# Write your code here.
# -
# Finally, use the `cross_validate` function and compute multiple scores/errors
# at once by passing a list of scorers to the `scoring` parameter. You can
# compute the $R^2$ score and the mean absolute error for instance.
# +
# Write your code here.
| notebooks/metrics_ex_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mm
# language: python
# name: mmdet
# ---
import sys
import cv2
from tqdm import tqdm_notebook as tqdm
sys.path.append('D:\API\cocoapi\PythonAPI') # 你下载的 cocoapi 所在路径
import skimage.io as io
from pycocotools.coco import COCO # 载入 cocoz
# %matplotlib inline
CLASS=['tennis-court', 'container-crane', 'storage-tank', 'baseball-diamond', 'plane', 'ground-track-field', 'helicopter', 'airport', 'harbor', 'ship', 'large-vehicle', 'swimming-pool', 'soccer-ball-field', 'roundabout', 'basketball-court', 'bridge', 'small-vehicle', 'helipad']
coco=COCO("/home/xfr/rssid/data/annotation/annos_rscup_train.json")
ret = {x:[] for x in CLASS}
imgIds = coco.getImgIds()
for imgid in tqdm(imgIds):
img = coco.loadImgs(imgid)[0]
area = img['height']*img['width']
annIds = coco.getAnnIds(imgIds=[imgid], iscrowd=None)
anns = coco.loadAnns(annIds)
for ann in anns:
cls = CLASS[ann['category_id']]
if(cls == "airport"):
print(img)
sub_area = ann['bbox'][2]*ann['bbox'][3]
ret[cls].append(sub_area/area)
# I = cv2.imread('/home/xfr/rssid/data/val/images/'+img['file_name'])
# annIds = coco.getAnnIds(imgIds=[imgid], iscrowd=None)
# anns = coco.loadAnns(annIds)
# for ann in anns:
# poly = np.array(ann["segmentation"][0])
# xx = poly[0::2]
# yy = poly[1::2]
# poly = np.array([[xx[0],yy[0]], [xx[1],yy[1]], [xx[2],yy[2]], [xx[3],yy[3]]])
# temp = np.array([poly], np.int32)
# cv2.polylines(I, temp, 1, (0, 255, 0), 1)
# cv2.imwrite("./original_val/"+img['file_name'], I)
# +
import numpy as np
l = len(ret['airport'])
ratios = sorted(np.round(np.array(ret[key]),3))
print(ratios)
# -
CLASS=['tennis-court', 'container-crane', 'storage-tank', 'baseball-diamond', 'plane', 'ground-track-field', 'helicopter', 'airport', 'harbor', 'ship', 'large-vehicle', 'swimming-pool', 'soccer-ball-field', 'roundabout', 'basketball-court', 'bridge', 'small-vehicle', 'helipad']
coco=COCO("/home/xfr/rssid/data/annotation/annos_rscup_train.json")
import mmcv
ret = {x:[] for x in CLASS}
imgIds = coco.getImgIds()
info = {}
for imgid in tqdm(imgIds):
img = coco.loadImgs(imgid)[0]
origin_img = cv2.imread("/home/xfr/rssid/data/train/images/"+img['file_name'])
origin_name = img['file_name'].split(".")[0]
annIds = coco.getAnnIds(imgIds=[imgid], iscrowd=None)
anns = coco.loadAnns(annIds)
count = 0
for ann in anns:
cls = CLASS[ann['category_id']]
if(cls == "airport"):
file_name ="{}_count_{}".format(origin_name, count)+".jpg"
count += 1
info[file_name] = {}
xmin, ymin, w, h = ann["bbox"]
poly = np.array(ann["segmentation"][0]).reshape(4,2)
print(xmin, ymin, w, h)
cv2.rectangle(origin_img, (int(xmin), int(ymin)), (int(xmin+w), int(ymin+h)), (0,0,255),3)
cv2.polylines(origin_img, np.array([poly], np.int32), 1, (0, 255, 0), 2)
print(file_name)
# plt.imshow(origin_img)
# plt.show()
poly -= np.array([xmin, ymin])
expand_factor = min(0.15, xmin/w, ymin/h)
poly += np.array([expand_factor*w, expand_factor*h])
xmin = xmin-expand_factor*w
ymin = ymin- expand_factor*h
xmax = xmin+(1+expand_factor*2)*w
ymax = ymin+(1+expand_factor*2)*h
print(xmin, ymin, xmax, ymax)
info[file_name]["bbox"] = [0, 0, xmax-xmin, ymax - ymin]
sub_img = origin_img[int(ymin):int(ymax), int(xmin):int(xmax),:]
info[file_name]["seg"] = poly
print(poly)
cv2.polylines(sub_img, np.array([poly], np.int32), 1, (0, 255, 0), 2)
print(file_name)
# plt.imshow(sub_img)
# plt.show()
cv2.imwrite("/home/xfr/rssid/airport/"+file_name, sub_img)
mmcv.dump(info,"/home/xfr/rssid/info.pkl")
with open("/home/xfr/rssid/info.pkl", 'w') as f:
mmcv.dump(info,"/home/xfr/rssid/info.pkl")
pwd
# +
import matplotlib.pyplot as plt
imgIds = coco.getImgIds()
for imgid in tqdm(imgIds):
img = coco.loadImgs(imgid)[0]
I = cv2.imread('/home/xfr/rssid/data/val/images/'+img['file_name'])
annIds = coco.getAnnIds(imgIds=[imgid], iscrowd=None)
anns = coco.loadAnns(annIds)
for ann in anns:
poly = np.array(ann["segmentation"][0])
xx = poly[0::2]
yy = poly[1::2]
poly = np.array([[xx[0],yy[0]], [xx[1],yy[1]], [xx[2],yy[2]], [xx[3],yy[3]]])
temp = np.array([poly], np.int32)
cv2.polylines(I, temp, 1, (0, 255, 0), 1)
cv2.imwrite("./original_val/"+img['file_name'], I)
# for
# print(annIds)
#
# -
# get all images containing given categories, select one at random
catIds = coco.getCatIds(catNms=['small-vehicle'])
print(catIds)
imgIds = coco.getImgIds(catIds=catIds)
# imgIds = coco.getImgIds(imgIds=[335328])
img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
I = io.imread('/home/xfr/rssid/data/train/images/'+img['file_name'])
plt.imshow(I)
plt.axis('off')
print(img)
annIds = coco.getAnnIds(imgIds=[img['id']], iscrowd=None)
print(len(annIds))
anns = coco.loadAnns(annIds)
for ann in anns:
bbox = ann['bbox']
x = [int(x) for x in bbox]
cv2.rectangle(I, (x[0],x[1]), (x[0]+x[2],x[1]+x[3]), (0,255,0), 2)
cv2.putText(I, str(ann['category_id']), (x[0],x[1]), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
plt.imshow(I)
ratios = []
for ann in anns:
xmin, ymin, w, h = ann['bbox']
ratio = round(w/h, 1)
ratios.append(ratio)
l = len(ratios)
ratios.sort()
print(ratios[int(0.05*l)])
print(ratios[int(0.95*l)])
import numpy as np
quantiles = np.arange(0.05, 1.175, 0.225)
for q in quantiles:
print(ratios[int(q*l)])
import matplotlib.pyplot as plt
CLASS=['tennis-court', 'container-crane', 'storage-tank', 'baseball-diamond', 'plane', 'ground-track-field', 'helicopter', 'airport', 'harbor', 'ship', 'large-vehicle', 'swimming-pool', 'soccer-ball-field', 'roundabout', 'basketball-court', 'bridge', 'small-vehicle', 'helipad']
#CLASS={'tennis-court', 'container-crane', 'storage-tank', 'baseball-diamond', 'plane', 'ground-track-field', 'helicopter', 'airport', 'harbor', 'ship', 'large-vehicle', 'swimming-pool', 'soccer-ball-field', 'roundabout', 'basketball-court', 'bridge', 'small-vehicle', 'helipad'}
coco=COCO("/home/xfr/rssid/rscup/annotation/annos_rscup_train.json")
class_to_ind = dict(zip(CLASS, range(len(CLASS))))
num_class = dict(zip(CLASS, [0]*len(CLASS)))
catIds = coco.getCatIds(catNms=['small-vehicle'])
imgIds = coco.getImgIds(catIds=catIds)
print(len(imgIds))
max_num = []
for cls in tqdm(CLASS):
for imgid in imgIds:
annIds = coco.getAnnIds(imgIds=[imgid], catIds = [class_to_ind[cls]], iscrowd=None)
num_class[cls] += len(annIds)
max_num.append(len(annIds))
print(num_class)
catIds = coco.getCatIds(catNms=['helipad'])
imgIds = coco.getImgIds(catIds=catIds)
info = coco.loadImgs(imgIds)[0]
print(info)
# for
# print(annIds)
#
sorted(max_num)[::-1]
x = list(num_class.keys())
y = list(num_class.values())
plt.bar(range(len(y)), y)
plt.show()
np.sum(np.array([list(num_class.values())]))
| analyse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A framework for petrophysically and geologically guided geophysical inversion (PGI)
#
# _<NAME> and <NAME>_
#
# <img src="figures/Framework.png" width=100% align="center">
#
# These notebooks were used to generate the figures shown in the article [A framework for petrophysically and geologically guided geophysical inversion](https://doi.org/10.1093/gji/ggz389). We perform PGI over various examples: a 1D MT layered-earth, a DC profile over 2 cylinders, a stitched FDEM survey for saline water intrusion
# ## Contents
#
# There are 3 notebooks in this repository:
#
# - [1_MT_PGI_Sharp_Smooth.ipynb](notebooks/MT/1_MT_PGI_Sharp_Smooth.ipynb)
#
# - Magnetotelluric data are acquired over a layered-earth that has sharp and smooth features. The PGI algorithm is provided with the true petrophysical distribution and the goal is to use it along with the MT data to find a solution that has the desired contrast features.
#
#
# - [2_DC_PGI_2cylinders.ipynb](notebooks/DC/2_DC_PGI_2cylinders.ipynb)
#
# - A DC resistivity profile is acquired over two cylinders. We illustrate the performance of this framework when no physical property mean values are available, and compared it to the result with full petrophysical information. We highlight then how geological information from borehole logs can be incorporated into this framework.
#
#
# - [3_FDEM_PGI_Bookpurnong.ipynb](notebooks/FDEM/3_FDEM_PGI_Bookpurnong.ipynb)
#
# - This example illustrates an application of the PGI approach on a field frequency-domain EM dataset, in conjunction with a structurally constraining regularization, without using extensive geological or petrophysical information. We demonstrate how to use this framework to test hypothesis, such as a recovering a specific number of distinct units, and to build confidence, or doubts, in geological features displayed by the inversions.
#
# ## Usage
#
# To setup your software environment, we recommend you use the provided conda environment
#
# ```
# conda env create -f environment.yml
# conda activate pgi-environment
# ```
#
# alternatively, you can install dependencies through pypi
#
# ```
# pip install -r requirements.txt
# ```
#
# Please [make an issue](https://github.com/simpeg-research/Astic-2019-PGI/issues/new) if you encounter any problems while trying to run the notebooks.
# ## Citation
#
#
# <NAME>., <NAME>, and <NAME>, 2021, Petrophysically and geologically guided multi-physics inversion using a dynamic Gaussian mixture model: Geophysical Journal International, 224(1), 40–68. https://doi.org/10.1093/gji/ggaa378
#
# <NAME>., and <NAME>, 2019, A framework for petrophysically and geologically guided geophysical inversion using a dynamic Gaussian mixture model prior: Geophysical Journal International, 219(3), 1989-2012. https://doi.org/10.1093/gji/ggz389
#
# <NAME>. and <NAME>, 2018, Petrophysically guided geophysical inversion using a dynamic Gaussian mixture model prior. In SEG Technical Program Expanded Abstracts 2018 (pp. 2312-2316). https://doi.org/10.1190/segam2018-2995155.1
#
#
# ```
# @article{PGI_Joint,
# author = {<NAME> and Heagy, <NAME> and Oldenburg, <NAME>},
# title = "{Petrophysically and geologically guided multi-physics inversion using a dynamic Gaussian mixture model}",
# journal = {Geophysical Journal International},
# volume = {224},
# number = {1},
# pages = {40-68},
# year = {2020},
# month = {08},
# issn = {0956-540X},
# doi = {10.1093/gji/ggaa378},
# url = {https://doi.org/10.1093/gji/ggaa378},
# eprint = {https://academic.oup.com/gji/article-pdf/224/1/40/34193255/ggaa378.pdf},
# }
#
# @article{PGI_framework,
# author = {<NAME> and Oldenburg, <NAME>},
# title = "{A framework for petrophysically and geologically guided geophysical inversion using a dynamic Gaussian mixture model prior}",
# journal = {Geophysical Journal International},
# volume = {219},
# number = {3},
# pages = {1989-2012},
# year = {2019},
# month = {08},
# issn = {0956-540X},
# doi = {10.1093/gji/ggz389},
# url = {https://doi.org/10.1093/gji/ggz389},
# eprint = {http://oup.prod.sis.lan/gji/article-pdf/219/3/1989/30144784/ggz389.pdf},
# }
#
# @inbook{Astic2018,
# author = {<NAME> and <NAME>},
# title = {Petrophysically guided geophysical inversion using a dynamic Gaussian mixture model prior},
# booktitle = {SEG Technical Program Expanded Abstracts 2018},
# chapter = {},
# pages = {2312-2316},
# year = {2018},
# doi = {10.1190/segam2018-2995155.1},
# URL = {https://library.seg.org/doi/abs/10.1190/segam2018-2995155.1},
# eprint = {https://library.seg.org/doi/pdf/10.1190/segam2018-2995155.1}
# }
# ```
#
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Parshav-Shah/ISYS5002_portfolio/blob/main/amazon_prices.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="nAphLm9Vlzo5"
# !pip install kora -q
'''load packages'''
from bs4 import BeautifulSoup
from kora.selenium import wd
# + id="Jyp2DYzImzk-"
# Establish Session
url = 'https://www.amazon.com.au'
wd.get(url)
# + id="DCZLhnslm9Nm"
# workout with url pattern
template = 'https://www.amazon.com.au/s?k=ultra+wide+moniter&ref=nb_sb_noss_1'
search_term = 'ultra wide moniter'
search_term = search_term.replace(' ', '+')
url = template.format(search_term)
wd.get(url)
wd
# + id="AQz3cNSMnA-E"
def get_url(search_term):
template = 'https://www.amazon.com.au/s?k=ultra+wide+moniter&ref=nb_sb_noss_1'
# replace with + sign.
search_term = search_term.replace(' ', '+')
url = template.format(search_term)
return url
# + id="MT7rDgkAn2u9"
search_term = input('What do you want to search??')
url = get_url(search_term)
wd.get(url)
# + id="IFVLuMHtoBep"
soup = BeautifulSoup(wd.page_source, 'html.parser')
# + [markdown] id="h4ee8kjVoYB3"
# # Extract the Collection
# + id="9FVKD7tYoaWi"
result = soup.find_all('div', {'data-component-type': 's-search-result'})
result[0]
# + id="anR8Mto_olFI"
result[0].h2.a.text
# + id="7SbZ48oPsNFk"
def extract_description(item):
description = item.h2.a.text
return description
# + id="kqz-KDsxI-eU"
extract_description(result[10])
# + [markdown] id="1XWF5LHBsjlJ"
# # Prototype the record
# + id="cC0bk72XsQ-V"
item = result[0]
price_parent = item.find('span', 'a-price')
price= price_parent.find('span', 'a-offscreen')
price.text
# + id="jESVEqVVsyuA"
def extract_price(item):
try:
price_parent = item.find('span', 'a-price')
price= price_parent.find('span', 'a-offscreen')
price= price.text
except AttributeError:
price = ''
return price
# + id="iO16HydCs3zv"
extract_price(result[0])
# + id="64GHbNF8s734"
def extract_record(item):
return {
'description' : extract_description(item),
'Price' : extract_price(item)
}
# + id="fBB-xo7HtIsm"
extract_record(result[0])
# + id="4QZ_hBYRtxny"
type(result)
# + id="LhWHC-W7t3mH"
for r in result:
print(extract_record(r))
# + id="BfmPYjmRuMwU"
records = []
for r in result:
records.append(extract_record(r))
records[0]
# + id="5YQZ4-6KNa0B"
import pandas as pd
df = pd.DataFrame.from_records(records)
df.head()
# + [markdown] id="bQK8lneQvDxO"
# # Generalize the pattern
# + id="gF3DcIRRvHnP"
# + id="QqKbxj1kwcYc"
records = []
results = soup.find_all('div', {'data-component-type': 's-search-result'})
for item in results:
records.append(extract_record(item))
# + [markdown] id="_ZRk5fnhxeRZ"
# # Error Handling
# + id="vkIT_BNUxFvl"
# + id="pYKIDlY0x64Y"
# + id="phNc9nIpyImi"
# + [markdown] id="TGU8ZMgxyaWt"
# # Find next page
# + id="7JzOCBBRyd5B"
# + [markdown] id="FanXzdQm2SHO"
# # Putting it all together
# + id="sP0evzCj2O2d"
# !pip install kora -q
'''load packages'''
from bs4 import BeautifulSoup
from kora.selenium import wd
def get_url(search_term):
template = 'https://www.amazon.com.au/s?k=ultra+wide+moniter&ref=nb_sb_noss_1'
# replace with + sign.
search_term = search_term.replace(' ', '+')
url = template.format(search_term)
return url
def extract_description(item):
description = item.h2.a.text
return description
def extract_price(item):
try:
price_parent = item.find('span', 'a-price')
price= price_parent.find('span', 'a-offscreen')
price= price.text
except AttributeError:
price = ''
return price
def extract_record(item):
return {
'description' : extract_description(item),
'Price' : extract_price(item)
}
# + id="SmzW9RwEASoq" outputId="cd582bb4-f67c-4ca5-acd9-2d5ebf7a6d42" colab={"base_uri": "https://localhost:8080/", "height": 221}
# Establish Session
url = 'https://www.amazon.com.au'
wd.get(url)
search_term = input('What do you want to search??')
url = get_url(search_term)
wd.get(url)
soup = BeautifulSoup(wd.page_source, 'html.parser')
records = []
results = soup.find_all('div', {'data-component-type': 's-search-result'})
for item in results:
records.append(extract_record(item))
#wd.close()
import pandas as pd
df = pd.DataFrame.from_records(records)
df.head()
# + id="BF-zepIwAetK"
# + id="sOFfhTFDDpto"
| amazon_prices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import math
import os
import struct
import zipfile
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from read_roi import read_roi_zip
import tifffile.tifffile
from scipy.ndimage import interpolation
from skimage.transform import rotate
from scipy import stats
from scipy import misc
import csv
from scipy.optimize import curve_fit
from lmfit import Model
from lmfit import Parameter
# +
################################################################################################################################
############# INPUT ############################################################################################################
################################################################################################################################
###########################
#### FILES ################
###########################
paths = ['path1/', 'path2/', 'path3/', 'path4/']
cometfiles = ['stack1.tif', 'stack2.tif', 'stack3.tif', 'stack4.tif']
roifiles = ['RoiSet1.zip', 'RoiSet2.zip', 'RoiSet3.zip', 'RoiSet4.zip']
###########################
#### ORGANIZATION #########
###########################
#Group data based on which are the same condition
replicates = [[0, 1], [2, 3]] #So here, the first two are one condition, the second two are another condition
conditions = ['Condition1', 'Condition2'] #Same order as list of lists in "groups"
savepath = 'savepath/'
###########################
#### VARIABLES ############
###########################
# Velocity filter data
lower_range = 4.7 #um/min
upper_range = 5.7
# Microscope-based settings
upix = 0.107 #pixel size
spf = 1 #seconds per frame
# Fitting variables
sigma = 130 #SD of the psf. sigma = FWHM/2.355.
km0 = 0.24 #Initial guess of maturation rate
###############################################################################################################
###########################
### ADVANCED VARIABLES ####
###########################
#Set to 1 to rescale image. This must be done on pseudo-flatfield corrected images. Leave at 0 otherwise.
#If on, you shouldn't compare absolute intensities, only normalized intensities.
rescale_flag = 0
#Set to 1 to normalize each individual comet before creating an average.
#Leave this at 0 if you're not sure.
normalize_flag = 0
###########################
#### CONSTANTS ############
###########################
#Amount by which to interpolate the image.
#This allows for subpixel localization of peak intensities.
subfactor = 6
#x-axis; in um; length of comet to analyze (centered around this value).
#Keep large (>4 um).
comet_length = 5
#y-axis; in um; width of comet images (centered around this value).
#Not important, center of comet width defines comet profile anyway.
comet_width = 0.6
#height of cropped region; in um. Width calculated from A to B distance
imheight = 1
#space to add on the x axis when cropping comets; in um
spacex = comet_length/2 + 0.55
#Region to search relative to the location identified in a preceding frame (in um)
#(Comet should have moved forward from one frame to the next, so only search ahead.)
search_radius_x = 0.5
search_radius_y = 0.4
#Standard deviations to show in final profile, leave at 1! For troubleshooting only.
stds_final = 1
##########################
### CONVERSION FACTORS ###
##########################
#Multiply pixel values by this to get real distances.
#Divide distances by this to get pixel values.
to_um = upix/subfactor #microns
to_nm = to_um * 1000 #nanometers
imheight = int(imheight/upix) #crop is done before subfactor, no need for subfactor in this equation
spacex = int(spacex/upix)
comet_width = int(comet_width/to_um)
comet_length = int(comet_length/to_um)
searchrad_x = int(search_radius_x/to_um)
searchrad_y = int(search_radius_y/to_um)
# +
################################################################################################################################
############# FUNCTIONS ########################################################################################################
################################################################################################################################
###############################################
##### ALIGN COMETS BY PEAK EB INTENSITY #######
###############################################
def process_comets(A, B, Z):
#Initialize lists and arrays
allcomets_stack = []
aligned_comets_profs = []
aligned_comet = np.zeros((comet_width, comet_length))
peakpixels = []
###############################
#### BASIC CALCULATIONS #######
###############################
#Total number of comets
numcomets = Z[1] - Z[0] + 1
#Length from start point to end point
length = int(math.sqrt((B[0]-A[0])**2+(B[1]-A[1])**2))
#The angle from start point to end point
angle = math.atan2(B[1]-A[1], B[0]-A[0])
#New start location
new_x = int((A[0] - im_size/2)*math.cos(-angle) - (A[1] - im_size/2)*math.sin(-angle) + im_size/2)+1
new_y = int((A[1] - im_size/2)*math.cos(-angle) + (A[0] - im_size/2)*math.sin(-angle) + im_size/2)+1
#Incrementor
cometnum = 0
###############################
#### EXTRACT COMETS ###########
###############################
for i in np.arange(Z[0]-1, Z[1], 1):
#Rotate image
im_rot = rotate(imstack[i], math.degrees(angle))
#Crop out region with comet
comet = im_rot[new_y-imheight-1:new_y+imheight, new_x-int(spacex):new_x+length+int(spacex)]
#Interpolate by subfactor
comet_sub = interpolation.zoom(comet, subfactor, order=3)
###########################################
#### SUBCROP BASED ON PEAK ALIGNMENT ####
##########################################
#################
#DEFINE SEARCH AREA
#Search extends backwards on the first comet
if cometnum == 0:
search_x = spacex*subfactor
search_y = np.shape(comet_sub)[0] - imheight*subfactor
search_area = comet_sub[search_y-searchrad_y:search_y+searchrad_y, search_x-searchrad_x:search_x+searchrad_x]
#The search area shifts forwards in subsequent comets
else:
search_x = peakpix_x[0]
search_y = peakpix_y[0]
search_area = comet_sub[search_y-searchrad_y:search_y+searchrad_y, search_x-int(searchrad_x/2):search_x+searchrad_x*2]
#################
#FIND PEAK PIXEL
peakpix_y, peakpix_x = np.where(comet_sub == np.amax(search_area))
peakpixels.append((peakpix_x, peakpix_y))
#################
#EXTRACT COMET AROUND PEAK PIXEL (i.e. align)
for x in np.arange(0, comet_length):
for y in np.arange(0, comet_width):
aroundpeak_x = peakpix_x - int(comet_length/2) + x
aroundpeak_y = peakpix_y - int(comet_width/2) + y
aligned_comet[y, x] = comet_sub[aroundpeak_y, aroundpeak_x]
#################
#Normalize if requested
if normalize_flag == 1:
aligned_comet = (aligned_comet - np.amin(aligned_comet))/(np.amax(aligned_comet) - np.amin(aligned_comet))
#################
#Store aligned comet in stack
allcomets_stack.append(np.array(aligned_comet))
#############
cometnum += 1
###############################
######## Find velocity ########
###############################
velocity = ((peakpixels[-1][0] - peakpixels[0][0]) * to_um) / (len(peakpixels)*spf / 60)
###############################
########### RETURN ############
###############################
return(allcomets_stack, peakpixels, velocity)
# +
################################################################################################################################
############# GET COMET PROFILES ###############################################################################################
################################################################################################################################
#Initialize lists
avgcomet_lst = []
profile_lst = []
profile_lst_norm = []
profilestd_lst = []
profilesem_lst = []
filtvelocities_lst = []
allvelocities_lst = []
###############################
##### ITERATE CONDITIONS ######
###############################
for condition_num in np.arange(0,len(conditions)):
print("CONDITION: " + str(conditions[condition_num]))
#Initialize lists
allcomets = []
filtvelocities = []
allvelocities = []
#Incrementor
totalcomets = 0
###############################
##### ITERATE REPLICATES ######
###############################
for replicate_num, replicate in enumerate(replicates[condition_num]):
##############################
#### LOAD STACK ##############
##############################
#################
#Load file
cometpath = os.path.join(paths[replicate], cometfiles[replicate])
im = tifffile.imread(cometpath)
#################
#Store frames in list
imstack = []
for i in im:
imstack.append(i)
#################
#Rescale if requested
if rescale_flag == 1:
imstack = np.array(imstack)
imstack = (imstack - imstack.min())/(imstack.max() - imstack.min())
#################
#Get some properties of the stack
num_slices = np.shape(im)[0]
im_size = np.shape(im)[1]
##########################
##### GET ROIS ###########
##########################
#################
#Load roi file
roipath = os.path.join(paths[replicate], roifiles[replicate])
rois = read_roi_zip(roipath) #Hadrien's package
#################
#Store values in list
roi_values = [ [k,v] for k, v in rois.items() ]
comet_rois = []
for i in np.arange(0, len(roi_values)-1, 2):
comet_rois.append([(roi_values[i][1]['x'][0], roi_values[i][1]['y'][0]),
(roi_values[i+1][1]['x'][0], roi_values[i+1][1]['y'][0]),
(roi_values[i][1]['position'], roi_values[i+1][1]['position'])])
###############################
######## ANALYZE COMETS ######
###############################
#Incrementor for comets outside of requested velocity range
excluded = 0
#Go throuch each comet
for comet_num, (A, B, Z) in enumerate(comet_rois):
#Check how many comets there are here to print
total_repcomets = len(comet_rois)
print("Working on replicate #" + str(replicate_num+1) + ", comet #" + str(comet_num+1) + "/" + str(total_repcomets) + ", excluded " + str(excluded) + " ", end='\r', flush=True)
#################
#Call process_comets function to get average aligned comet and profile
aligned_comet_stack, peakpixels, velocity = process_comets(A, B, Z)
#################
#Store all velocities in allvelocities list
allvelocities.append(velocity[0])
#################
#Store velocities that are within requested range in filtvelocities list
if lower_range <= velocity <= upper_range:
filtvelocities.append(velocity[0])
for i in aligned_comet_stack:
allcomets.append(i)
#################
#Exclude comets that are outside of the the velocity range
else:
excluded += 1
#Compile the number of comets there are in this dataset (including those that were excluded)
totalcomets += total_repcomets
#############################################
###### AVERAGE COMET AND PLOT PROFILE ######
#############################################
#################
#Average all comets and get errors
avg_aligned_comet = np.mean(allcomets, axis = 0)
avg_aligned_comet_std = np.std(allcomets, axis = 0)
avg_aligned_comet_sem = np.divide(avg_aligned_comet_std, np.sqrt(np.shape(allcomets)[0]))
#################
#Save average comet
misc.imsave((os.path.join(savepath, str(conditions[condition_num]) + ' Average Comet.png')), avg_aligned_comet)
#################
#Plot profile is the profile along center of comet (i.e. y=imheight/2)
centerpix = int(np.shape(avg_aligned_comet)[0]/2)
#################
#Extract profile and errors at center
avg_aligned_comet_prof = avg_aligned_comet[centerpix,:]
avg_aligned_comet_prof_std = avg_aligned_comet_std[centerpix,:]
avg_aligned_comet_prof_sem = avg_aligned_comet_sem[centerpix,:]
#################
#Store normalized profile in new list
avg_aligned_comet_prof_norm = (avg_aligned_comet_prof - np.amin(avg_aligned_comet_prof))/(np.amax(avg_aligned_comet_prof) - np.amin(avg_aligned_comet_prof))
###################
###### PLOT ######
###################
#Define y-axis tick labels for image
tickspacing_y = np.arange(0, avg_aligned_comet.shape[0], 100/to_nm).astype(int)
ticklabels_y = np.arange(0, avg_aligned_comet.shape[0]*to_nm, 100).astype(int)
fontsize = 25
fig, ax = plt.subplots(2,1, figsize=(22, 9))
ax = plt.subplot(2,1,1)
plt.imshow(avg_aligned_comet, cmap="viridis", aspect='auto', interpolation='none')
plt.yticks(tickspacing_y, ticklabels_y, fontsize = fontsize)
plt.xticks([], [], fontsize = fontsize)
plt.yticks(fontsize = fontsize)
plt.ylabel('Distance from peak (nm)', fontsize = fontsize)
ax.set_facecolor('white')
ax.grid(False)
ax = plt.subplot(2,1,2)
plt.plot(np.arange(-comet_length/2, comet_length/2), avg_aligned_comet_prof, linewidth = 2, color = 'Maroon')
plt.scatter(np.arange(-comet_length/2, comet_length/2), avg_aligned_comet_prof, linewidth = 4, s = 50, color = 'Maroon')
(_, caps, _) = plt.errorbar(np.arange(-comet_length/2, comet_length/2), avg_aligned_comet_prof, yerr = avg_aligned_comet_prof_sem, linewidth = 3, color = 'Maroon', capsize= 5, alpha = 0.7)
for cap in caps:
cap.set_markeredgewidth(1)
plt.xticks(fontsize = fontsize)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
plt.xlabel('Distance from peak (nm)', fontsize = fontsize)
plt.ylabel('Intensity (a.u.)', fontsize = fontsize)
ax.set_facecolor('white')
ax.grid(False)
plt.tight_layout()
plt.show()
########################################
### ACCUMULATE FOR COMBINED PLOT ######
########################################
avgcomet_lst.append(avg_aligned_comet)
profile_lst_norm.append(avg_aligned_comet_prof_norm)
profile_lst.append(avg_aligned_comet_prof)
profilestd_lst.append(avg_aligned_comet_prof_std)
profilesem_lst.append(avg_aligned_comet_prof_sem)
filtvelocities_lst.append(filtvelocities)
allvelocities_lst.append(allvelocities)
################################
### OUTPUT INFORMATION #########
################################
# Output Profiles
x = (np.arange(-comet_length/2, comet_length/2))*to_nm
for p in np.arange(0, len(profile_lst_norm)):
rows = zip(x, avg_aligned_comet_prof[::-1], avg_aligned_comet_prof_norm[::-1])
with open(os.path.join(savepath, conditions[condition_num] + ' Raw and Normalized Profiles - mirrored.csv'), 'w', newline='') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
# Output Report
analyzedcomets = len(allcomets)
velocity_output ='\nVelocity filtered: ' + str(lower_range) + ' to ' + str(upper_range) + ' um/min' + '\nAverage velocity = ' + str(np.around(np.mean(filtvelocities),1)) + ' +/- ' + str(np.around(np.std(filtvelocities),1)) + ' um/min' + '\nUnfiltered dataset has a mean of ' + str(np.around(np.mean(allvelocities),1)) + ' +/- ' + str(np.around(np.std(allvelocities),1))
totalcomets_output = '\n\nTotal of ' + str(analyzedcomets) + ' comets analyzed out of ' + str(totalcomets) + ' \n'
textoutput = velocity_output + totalcomets_output + '\n\nRescale flag = ' + str(rescale_flag) + '\nPixel size = ' + str(upix) + '\nSeconds per frame = ' + str(spf) + '\nSubsampling = ' + str(subfactor) + ' pixels' + '\n\nimheight = ' + str(imheight) + '\nspacex = ' + str(spacex) + '\ncomet_length = ' + str(comet_length) + '\ncomet_width = ' + str(comet_width) + '\nsearch_radius_x = ' + str(search_radius_x) + '\nsearch_radius_y = ' + str(search_radius_y)
text_file = open(savepath + str(conditions[condition_num]) + " Analysis Report.txt", "w")
text_file.write(textoutput)
text_file.close()
print("Done")
# +
################################################################################################################################
######### OVERLAY THE CONDITIONS ###############################################################################################
################################################################################################################################
#####PLOT
fontsize = 25
legend = []
x_space = 400
relative2peak = int(avg_aligned_comet.shape[1]*to_nm/2)-x_space*int(int(avg_aligned_comet.shape[1]*to_nm/2)/x_space)
tickspacing_y = np.arange(0, avg_aligned_comet.shape[0], 100/to_nm).astype(int)
ticklabels_y = np.arange(0, avg_aligned_comet.shape[0]*to_nm, 100).astype(int)
tickspacing_x = np.arange(relative2peak/to_nm, avg_aligned_comet.shape[1], x_space/to_nm).astype(int)
ticklabels_x = np.arange(-x_space*int(int(avg_aligned_comet.shape[1]*to_nm/2)/x_space), x_space*int(int(avg_aligned_comet.shape[1]*to_nm/2)/x_space) + x_space, x_space).astype(int)
maxplot = 0
for condition in np.arange(0,len(conditions)):
errplot_min = profile_lst[condition] - profilesem_lst[condition]
errplot_max = profile_lst[condition] + profilesem_lst[condition]
if np.amax(errplot_max) > maxplot:
maxplot = np.amax(errplot_max)
fig, ax = plt.subplots(1,2, figsize=(24, 10))
###############################################
colors = sns.color_palette(sns.color_palette("Reds_d", n_colors=len(profile_lst)-1)) + sns.color_palette(sns.color_palette("Blues_d", n_colors=1))
colors = sns.color_palette('dark')
#colors = ['Firebrick', 'Dodgerblue']
###############################################
ax = plt.subplot(2,2,1)
minplot = np.amin(profile_lst)
for condition, color in zip(np.arange(0,len(conditions)), colors):
plt.plot(np.arange(0, comet_length), profile_lst[condition], linewidth = 2, color = color, alpha = 0.7)
plt.scatter(np.arange(0, comet_length), profile_lst[condition], linewidth = 3, s = 60, edgecolor = color, facecolor = 'White', alpha = 1, label = conditions[condition])
(_, caps, _) = plt.errorbar(np.arange(0, comet_length), profile_lst[condition], yerr = profilesem_lst[condition], fmt='o', markersize=8, capsize=4, color = color)
for cap in caps:
cap.set_markeredgewidth(1)
plt.xticks(tickspacing_x, ticklabels_x, fontsize = fontsize)
plt.xlim(tickspacing_x[0], tickspacing_x[-1])
plt.yticks(fontsize = fontsize) #np.arange(0, 1.25, 0.25),
plt.ylim(minplot, maxplot)
plt.xlabel('Distance from peak (nm)', fontsize = fontsize)
plt.ylabel('Intensity (a.u.)', fontsize = fontsize)
plt.legend(fontsize = fontsize-3, loc = 'upper right', handletextpad = 0.5, handlelength = 0.2, markerfirst = False)
plt.grid(False)
plt.axhline(y = minplot, color='k', linewidth = 4)
plt.axvline(x = tickspacing_x[0], color='k', linewidth = 4)
ax.tick_params(direction='out', length=8, width=2, colors='Black')
ax.set_facecolor('white')
###############################################
ax = plt.subplot(2,2,2)
for condition, color in zip(np.arange(0,len(conditions)), colors):
prof_norm = (profile_lst[condition] - np.amin(profile_lst[condition]))/(np.amax(profile_lst[condition]) - np.amin(profile_lst[condition]))
prof_sem = (profilesem_lst[condition])/(np.amax(profile_lst[condition]) - np.amin(profile_lst[condition]))
plt.plot(np.arange(0, comet_length), prof_norm, linewidth = 2, color = color, alpha = 0.7)
plt.scatter(np.arange(0, comet_length), prof_norm, linewidth = 3, s = 60, edgecolor = color, facecolor = 'White', alpha = 1, label = conditions[condition])
(_, caps, _) = plt.errorbar(np.arange(0, comet_length), prof_norm, yerr = prof_sem, fmt='o', markersize=8, capsize=4, color = color)
for cap in caps:
cap.set_markeredgewidth(1)
plt.xticks(tickspacing_x, ticklabels_x, fontsize = fontsize)
plt.xlim(tickspacing_x[0], tickspacing_x[-1])
plt.yticks(fontsize = fontsize) #np.arange(0, 1.25, 0.25),
plt.ylim(0, 1)
plt.xlabel('Distance from peak (nm)', fontsize = fontsize)
plt.ylabel('Intensity (a.u.)', fontsize = fontsize)
plt.legend(fontsize = fontsize-3, loc = 'upper right', handletextpad = 0.5, handlelength = 0.2, markerfirst = False)
plt.grid(False)
plt.axhline(y = 0, color='k', linewidth = 4)
plt.axvline(x = tickspacing_x[0], color='k', linewidth = 4)
ax.tick_params(direction='out', length=8, width=2, colors='Black')
ax.set_facecolor('white')
plt.tight_layout()
#fig.savefig(os.path.join(savepath, "Averaged comet plot.pdf"))
# +
from scipy.stats import norm
################################################################################################################################
############# FIT PLOT PROFILES ################################################################################################
################################################################################################################################
###########################
def normalized_data(data, data_err):
normdata = (data-np.min(data))/(np.amax(data)-np.amin(data))
normerr = (data_err)/(np.amax(data)-np.amin(data))
return(normdata, normerr)
def normalize(x):
return((x-np.amin(x))/(np.amax(x)-np.amin(x)))
####################################
def binding_sites(x, pf, km, vg, d, nlat):
return(np.heaviside((x-d), 1)*(((pf/8)*np.exp(-(x-d)*km/vg) + nlat)))
def gaussian(x, sigma, xc):
#sigma = 1.25*sigma
return(norm.pdf(x, loc=xc, scale=sigma))
#return((1/(sigma*np.sqrt(2*3.14)))*np.exp((-1/2)*((x-xc)/sigma)**2))
#return(np.exp((-1/2)*((x-xc)/sigma)**2))
def convolved_profile(x, pf, km, vg, d, nlat, sigma, xc):
profile = binding_sites(x, pf, km, vg, d, nlat)
psf = gaussian(x, sigma, xc)
return(normalize(np.convolve(profile, psf, mode='same')))
##################################
fitparams_lst = []
for p,e,v in zip(profile_lst, profilesem_lst, filtvelocities_lst):
x_data = (np.arange(-comet_length/2, comet_length/2))*to_nm
data = p[::-1]
data_err = e[::-1]
data_weights = [1/i for i in data_err]
vg = np.mean(v)*1000/60
vg_err = np.std(v)*1000/60
data, data_err = normalized_data(data, data_err)
fig, ax = plt.subplots(1,4, figsize=(15, 4))
ax = plt.subplot(1,4,1)
plt.plot(x_data, binding_sites(x_data, 13, km0, vg, 0, data[-1]), color = 'blue')
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
ax = plt.subplot(1,4,2)
plt.plot(x_data, gaussian(x_data, sigma, 0), color = 'blue')
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
ax = plt.subplot(1,4,3)
plt.plot(x_data, convolved_profile(x_data, 13, km0, vg, 0, data[-1], sigma, 0), color = 'blue')
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
ax = plt.subplot(1,4,4)
plt.plot(x_data, data, color = 'blue')
plt.errorbar(x_data, data, yerr = data_err)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
plt.tight_layout()
plt.show()
print("nlat = " + str(data[-1]))
############################
gmodel = Model(convolved_profile)
result = gmodel.fit(data, x=x_data, weights=data_weights,
pf=Parameter('pf', value=13, vary=False, min=10, max=15),
km=Parameter('km', value=km0, vary=True, min=0.01, max=3),
vg=Parameter('vg', value=vg, vary=False),
d=Parameter('d', value=0, vary=False),
nlat=Parameter('nlat', value=data[-1], vary=True, min=data[-1]-0.05, max=data[-1]+0.05),
sigma=Parameter('sigma', value=sigma, vary=False, min=sigma),
xc=Parameter('xc', value=0, vary=True, min=-500, max=100))
#print(result.fit_report())
print(result.ci_report())
fpf = result.values['pf']
fkm = result.values['km']
fvg = result.values['vg']
fd = result.values['d']
fnlat = result.values['nlat']
fsigma = result.values['sigma']
fxc = result.values['xc']
km_lower = fkm - result.conf_interval()['km'][1][1]
km_upper = result.conf_interval()['km'][5][1] - fkm
kmci = (km_lower + km_upper)/2 #95% interval
print(vg, vg_err)
result_comet_length = fvg/fkm
result_comet_length_err = comet_length*np.sqrt((vg_err/vg)**2 + (kmci/fkm)**2)
print("\nTHE COMET LENGTH IS: " + str(result_comet_length) + " +/- " + str(result_comet_length_err))
fitparams = [fpf, fkm, fvg, fd, fnlat, fsigma, fxc]
fitparams_lst.append(fitparams)
####################################
convolved_fit = convolved_profile(x_data, fpf, fkm, fvg, fd, fnlat, fsigma, fxc)
shift = 0#int(np.where(convolved_fit == np.amax(convolved_fit))[0]-comet_length/2+1)
fig, ax = plt.subplots(1,1, figsize=(16, 4))
ax = plt.subplot(1,1,1)
plt.scatter(x_data, data, color = 'gray')
plt.plot(x_data[0:(np.shape(x_data)[0]-shift)], convolved_fit[shift:], color = 'red')
plt.ylim(0,1.1)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
plt.grid(False)
ax.set_facecolor('white')
plt.axhline(y = 0, color='k', linewidth=4)
#plt.axvline(x = 0, color='k', linewidth=2)
ax.tick_params(direction='out', length=4, width=2, colors='Black')
plt.tight_layout()
plt.show()
##################
# +
################################################################################################################################
############# OVERLAY FIT PLOT PROFILES ########################################################################################
################################################################################################################################
x_space = 300
relative2peak = int(avg_aligned_comet.shape[1]*to_nm/2)-x_space*int(int(avg_aligned_comet.shape[1]*to_nm/2)/x_space)
tickspacing_y = np.arange(0, avg_aligned_comet.shape[0], 100/to_nm).astype(int)
ticklabels_y = np.arange(0, avg_aligned_comet.shape[0]*to_nm, 100).astype(int)
tickspacing_x = np.arange(relative2peak/to_nm, avg_aligned_comet.shape[1], x_space/to_nm).astype(int)
ticklabels_x = np.arange(-x_space*int(int(avg_aligned_comet.shape[1]*to_nm/2)/x_space), x_space*int(int(avg_aligned_comet.shape[1]*to_nm/2)/x_space) + x_space, x_space).astype(int)
x_data = (np.arange(-comet_length/2, comet_length/2))*to_nm
fig, ax = plt.subplots(2,1, figsize=(7.5, 12))
colors = sns.color_palette(sns.color_palette("Reds_d", n_colors=1)) + sns.color_palette(sns.color_palette("Blues_d", n_colors=1))
ax = plt.subplot(2,1,1)
for condition, color in zip(np.arange(0,len(conditions)), colors):
prof_norm = (profile_lst[condition] - np.amin(profile_lst[condition]))/(np.amax(profile_lst[condition]) - np.amin(profile_lst[condition]))
prof_sem = (profilesem_lst[condition])/(np.amax(profile_lst[condition]) - np.amin(profile_lst[condition]))
plt.plot(np.arange(0, comet_length), convolved_profile(x_data, *fitparams_lst[condition]), linewidth = 4, color = color, alpha = 0.7)
plt.scatter(np.arange(0, comet_length), prof_norm[::-1], linewidth = 3, s = 60, edgecolor = color, facecolor = 'White', alpha = 1, label = conditions[condition])
(_, caps, _) = plt.errorbar(np.arange(0, comet_length), prof_norm[::-1], yerr = prof_sem[::-1], fmt='o', markersize=8, capsize=5, color = color)
for cap in caps:
cap.set_markeredgewidth(2)
plt.xticks(tickspacing_x, ticklabels_x, fontsize = fontsize)
plt.xlim(tickspacing_x[int(len(tickspacing_x)/2-2)], tickspacing_x[int(len(tickspacing_x)/2+2)])
plt.yticks(fontsize = fontsize) #np.arange(0, 1.25, 0.25),
plt.ylim(0, 1)
plt.xlabel('Distance from peak (nm)', fontsize = fontsize)
plt.ylabel('Normalized Intensity (a.u.)', fontsize = fontsize)
plt.legend(fontsize = fontsize-3, loc = 'upper right', handletextpad = 0.5, handlelength = 0.2, markerfirst = False)
plt.grid(False)
plt.axhline(y = 0, color='k', linewidth = 4)
plt.axvline(x = tickspacing_x[int(len(tickspacing_x)/2-2)], color='k', linewidth = 4)
ax.tick_params(direction='out', length=8, width=2, colors='Black')
ax.set_facecolor('white')
ax = plt.subplot(2,1,2)
for condition, color in zip(np.arange(0,len(conditions)), colors):
plt.plot(np.arange(0, comet_length), normalize(binding_sites(x_data, *fitparams_lst[condition][:-2])), linewidth = 4, color = color, alpha = 1)
plt.xticks(tickspacing_x, ticklabels_x, fontsize = fontsize)
plt.xlim(tickspacing_x[int(len(tickspacing_x)/2-2)], tickspacing_x[int(len(tickspacing_x)/2+2)])
plt.yticks(fontsize = fontsize) #np.arange(0, 1.25, 0.25),
plt.ylim(0, 1)
plt.xlabel('Distance from peak (nm)', fontsize = fontsize)
plt.ylabel('Normalized Probability', fontsize = fontsize)
plt.legend(fontsize = fontsize-3, loc = 'upper right', handletextpad = 0.5, handlelength = 0.2, markerfirst = False)
plt.grid(False)
plt.axhline(y = 0, color='k', linewidth = 4)
plt.axvline(x = tickspacing_x[int(len(tickspacing_x)/2-2)], color='k', linewidth = 4)
ax.tick_params(direction='out', length=8, width=2, colors='Black')
ax.set_facecolor('white')
plt.tight_layout()
#fig.savefig(os.path.join(savepath, "Normalized plots with fitted curves.pdf"))
# -
| EB_Comet/Comet Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import h5py
from tqdm import trange
from preprocessing import *
# MGI_Mammalian_Phenotype_Level_4_2019.txt, GO_Biological_Process_2018.txt, KEGG_2019_Human.txt
file_name = "KEGG_2019_Human.txt"
curr_name = "kegg_"
# # Preprocessing
# Converting the text file into a dictionary in which each phenotype is a key and its associated genes are in a list as a value.
d = {}
with open(file_name) as file:
for line in file:
lst = line.strip().split("\t")
ph = lst[0]
lst = lst[2:]
d[ph] = lst
# Creating the reverse of the above dictionary, in which each gene is a key and its value is a list of its associated phenotypes.
gene_dict = {}
for k,v in d.items():
for gene in v:
if gene not in gene_dict:
gene_dict[gene] = []
gene_dict[gene].append(k)
# Create a list of functions and genes in the correct index order for the gene set matrix, which is a binary matrix with genes as rows and functions or phenotypes as columns. If the gene is associated with a phenotype, the corresponding cells receives a value of 1. Otherwise, the cell receives a value of 0.
# It is possible to use a list of keys since Python dictionaries are unordered such that their order is dictated by when they were entered rather than their alphanumeric order.
curr_pheno = list(d.keys())
curr_genes = list(gene_dict.keys())
binary_matrix = np.zeros((len(curr_genes), len(curr_pheno)))
for row in range(len(binary_matrix)):
curr_gene = curr_genes[row]
for col in range(len(curr_pheno)):
f = curr_pheno[col]
if f in gene_dict[curr_gene]:
binary_matrix[row][col] = 1
binary_matrix.shape
pd.DataFrame(binary_matrix)
# Create dictionaries to easily get the index associated with a particular gene or function for later computations.
pheno_to_gene = {}
# to get indices associated with a particular phenotype
rev = np.transpose(binary_matrix)
for i in range(len(curr_pheno)):
pheno_to_gene[i] = np.where(rev[i] == 1)[0]
rev.shape
print(len(pheno_to_gene))
# # Mouse gene set library
#
# To compute this new matrix (which is not a Pearson correlation matrix), I will compare each gene to every other gene in a given set, get the average correlation and save that in a matrix that has genes has rows and phenotypes as columns.
# Start by getting the correlation matrix so that the average correlation for each gene for each set is easier to compute, by just taking the average of the rest of the correlations found associated with the given set.
cor = np.corrcoef(binary_matrix)
np.fill_diagonal(cor, None)
# Should be a square matrix M x M in which M = # of genes
cor.shape
pheno_to_gene = {}
# to get indices associated with a particular phenotype
rev = np.transpose(binary_matrix)
for i in range(len(curr_pheno)):
pheno_to_gene[i] = np.where(rev[i] == 1)[0]
# Convert to Pandas DataFrame to easily use
# .iloc function, which allows row selection
cor = pd.DataFrame(cor)
# For each phenotype, get the mean correlation of each gene to every other gene in the given phenotype.
curr_gslib = pd.DataFrame()
count = []
preds = []
for j in trange(len(curr_pheno)):
indices = pheno_to_gene[j]
preds.append(cor.iloc[:, indices].mean(axis=1))
curr_gslib = pd.concat(preds, axis=1)
curr_gslib = curr_gslib.fillna(0)
# # Making predictions
# In this section, I will be making predictions about the TCGA dataset based on the following formula:
#
# 
#
# In other words, G is the TCGA correlation matrix and GF is the matrix of genes and phenotypes that have the mean correlations of a given gene to every other gene in a given set (the mouse gene set library). The goal is to be able to get the new gene set library GF' to make predictions about the TCGA dataset.
# ## Gathering variables
# Start by reloading the mouse gene set library, phenotypes and genes, and the TCGA correlation matrix and genes.
tcga = h5py.File("tcga.hdf5", "r+")
list(tcga.keys())
tcga_cor = tcga['full correlation matrix']
data = tcga['data']
meta = tcga['meta']
tcga_genes = [ str(g[0])[2:-1] for g in meta['genes'] ]
# +
# mat = np.matrix(tcga_cor)
# +
# pd.DataFrame(mat)
# +
# Restore symmetric matrix
# for i in trange(len(mat)):
# row = mat[i].T
# mat[:, i] = row
# +
# pd.DataFrame(mat)
# +
# tcga.create_dataset("full correlation matrix", data=mat)
# -
# # Filling in new gene set library
# 
# For each gene in TCGA genes for each phenotype, sum the correlation with every other gene multiplied by its correlation to the mouse gene set function, and divide the total by the correlations of the genes to the gene set function. This can be calculated by creating a new mouse gene set library that had all of the TCGA genes as rows in order, so that we may perform a dot product for the numerator and a summation in the denominator. Since self-correlations had been set to 0 in the correlation matrix, we can still dot each row of the correlation matrix with each col of the gene set library matrix to get the numerator, and the self-correlation would negate its product. For the denominator, we can sum the gene set library row but subtract the entry for the current gene.
fil = h5py.File("auc_data.hdf5", "r+")
data = fil['data']
meta = fil['meta']
list(data.keys())
# meta.create_dataset(curr_name + "pheno", data=pd.DataFrame(curr_pheno).astype("S"))
curr_pheno = meta[curr_name + "pheno"]
curr_pheno = [ str(p[0])[2:-1] for p in curr_pheno ]
curr_gslib = data[curr_name + "gslib"]
binary_matrix = data[curr_name + "bin_mat"]
# +
"""
Expanded mouse gene set library with the same number of genes as the TCGA gene set.
We should ignore the ~3400 mouse genes not found in the TCGA gene set since they won't
be included in the above calculations.
"""
ex_mgsl = np.zeros((len(tcga_genes), len(curr_pheno)))
# -
# TCGA gene to index dictionary to help fill in expanded mouse gene set library
tcga_to_idx = {}
for i in range(len(tcga_genes)):
g = tcga_genes[i]
tcga_to_idx[g] = i
"""
Loop through the current mouse gene names. If the mouse gene name is found in the tcga_to_idx
dictionary, we find its index according to the TCGA gene list and replace the ex_mgsl row of
zeros with the row found in the previous mouse gene set library. All of the genes found in the
TCGA library but not in the mouse gene set library will be left as zero for phenotype correlations.
"""
for m in range(len(curr_genes)):
curr_gene = curr_genes[m]
if curr_gene in tcga_to_idx:
idx = tcga_to_idx[curr_gene]
ex_mgsl[idx] = curr_gslib[m] # replace expanded mgsl row with the prev mgsl row of correlations
ex_mgsl.shape
# +
"""
We can compute the numerator part of the matrix by multiplying matrices together.
Use Numpy rather than go through matrix manually b/c np probably has some speedy
magic we don't know about.
"""
gslib = np.matmul(tcga_cor, ex_mgsl)
# -
# Check to get a new matrix with TCGA genes as rows and phenotypes of columns
gslib.shape
# +
"""
To finish computing the gene set library we have to go through each of the entries and divide
by the sum of the correlations in that phenotype's set (minus the current gene's correlation).
We can speed up computations by just taking the sums of each phenotype column. As we loop
through the genes for each phenotype, we can just subtract the current gene's correlation
from the phenotype's sum.
"""
pheno_sums = []
for col in np.transpose(curr_gslib):
pheno_sums.append(sum(col))
for i in trange(len(gslib)):
for j in range(len(curr_pheno)):
sub = ex_mgsl[i][j]
denom = pheno_sums[j]
gslib[i][j] = gslib[i][j]/(denom-sub)
# -
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, roc_auc_score
from scipy.stats import norm
from matplotlib import pyplot
common_genes = list(set(tcga_genes) & set(curr_genes))
auc_list = []
for g in range(len(curr_genes)):
y_true = binary_matrix[g]
y_probs = curr_gslib[g]
auc = roc_auc_score(y_true, y_probs)
auc_list.append(auc)
np.mean(auc_list)
auc_list = []
for g in trange(len(curr_pheno)):
y_true = binary_matrix[:, g]
y_probs = curr_gslib[:, g]
auc = roc_auc_score(y_true, y_probs)
auc_list.append(auc)
np.mean(auc_list)
auc_list = []
for g in common_genes:
tcga_idx = tcga_to_idx[g]
mgi_idx = np.where(np.transpose(curr_genes) == g)[0][0]
y_true = binary_matrix[mgi_idx]
y_probs = gslib[tcga_idx]
# calculate AUC
auc = roc_auc_score(y_true, y_probs)
auc_list.append(auc)
np.unique(auc_list, return_counts=True)[0]
np.mean(auc_list)
common_idx = [ tcga_to_idx[g] for g in common_genes ]
common_binary_idx = [ np.where(np.transpose(curr_genes) == g)[0][0] for g in common_genes ]
smaller_gslib = pd.DataFrame(gslib).iloc[common_idx]
smaller_binary = pd.DataFrame(binary_matrix).iloc[common_binary_idx]
pheno_auc = []
for p in trange(len(curr_pheno)):
y_true = smaller_binary.loc[:, p]
y_probs = smaller_gslib.loc[:, p]
auc = roc_auc_score(y_true, y_probs)
pheno_auc.append(auc)
np.mean(pheno_auc)
fil = h5py.File("auc_data.hdf5", "r+")
data = fil['data']
meta = fil['meta']
# +
# meta.create_dataset("tcga_genes", data=pd.DataFrame(tcga_genes).astype("S"))
# meta.create_dataset(curr_name + "genes", data=pd.DataFrame(curr_genes).astype("S"))
# del data["tcga_" + curr_name + "gslib"]
# data.create_dataset(curr_name + "gslib", data=curr_gslib)
# data.create_dataset(curr_name + "bin_mat", data=binary_matrix)
# data.create_dataset("tcga_" + curr_name + "gslib", data=gslib)
# fil.close()
# -
print(list(data.keys()))
print(list(meta.keys()))
tcga_go_bp_gslib = data['tcga_go_bp_gslib']
tcga_genes = [ str(g[0])[2:-1] for g in meta['tcga_genes'] ]
curr_genes = [ str(g[0])[2:-1] for g in meta['go_bp_genes'] ]
curr_pheno = [ str(p[0])[2:-1] for p in meta['go_bp_pheno'] ]
fil.close()
common = list(set(tcga_genes) & set(curr_genes))
pd.DataFrame(np.matrix(tcga_go_bp_gslib))
common[0]
| auc_pipeline.ipynb |