code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''pyUdemy'': conda)'
# name: python3
# ---
from multiprocessing import Process
import multiprocessing as mp
import time
import math
# # Processes
# <div style="font-size:1.5em">
#
# The GIL wanted to protect shared data.
#
# Data is only chared between Threads but not Processes.
#
# Multi-Processing could speed up our program but:
#
# - Creating a process is slower than creating a thread
# - Each process has its on memory (copies are needed)
#
# </div>
numbers = [
1102023313711321,
2102023313556361,
2133132415198713,
2456241020233131,
2102124102331313,
2102023151513137,
2102023313112511,
]
def is_prime(n):
if n < 2:
return False
if n in (2, 3, 5, 7):
return True
if n % 2 == 0 or n % 3 == 0 or n % 5 == 0 or n % 7 == 0:
return False
upper_limit = int(math.sqrt(n)) + 1
for i in range(11, upper_limit, 2):
if n % i == 0:
return False
return True
procceses = [Process(target=is_prime, args=(n,)) for n in numbers]
# +
start = time.perf_counter_ns()
[p.start() for p in procceses]
[p.join() for p in procceses]
end = time.perf_counter_ns()
print(f"time: {(end - start) / 1000000.0} ms")
# -
[p.close() for p in procceses]
| Chapter13_ParallelCode/processes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import netCDF4 as nc
import pandas as pd
import numpy as np
# ### Purpose
# Grabbing hourly weather forecast data for 2019 for Florida, Bergen from MET THREDDS: https://thredds.met.no/thredds/catalog.html
# ### Define netCDF file names by specifying date and time
# +
thredds_urls = list()
for nc_dt in pd.date_range(start='2019-01-01 00:00', end='2019-12-31 23:00', freq='60H'):
current_url = 'https://thredds.met.no/thredds/dodsC/meps25epsarchive/2019/' + str(nc_dt.month).zfill(2) + '/' + \
str(nc_dt.day).zfill(2) + '/meps_mbr0_pp_2_5km_2019' + str(nc_dt.month).zfill(2) + str(nc_dt.day).zfill(2) + \
'T' + str(nc_dt.hour).zfill(2) + 'Z.nc'
thredds_urls.append(current_url)
# -
thredds_urls[:3]
# ### Find closest weather model point
# +
# Modified code from https://stackoverflow.com/questions/41336756/find-the-closest-latitude-and-longitude
from math import cos, asin, sqrt
def distance(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - cos((lat2-lat1)*p)/2 + cos(lat1*p)*cos(lat2*p) * (1-cos((lon2-lon1)*p)) / 2
return 12742 * asin(sqrt(a))
def closest(data, v):
closest_point = min(data, key=lambda p: distance(v['lat'],v['lon'],p['lat'],p['lon']))
closest_idx = np.argmin([distance(v['lat'],v['lon'],p['lat'],p['lon']) for p in data])
return closest_point, closest_idx
# -
weather_station = {'lat': 60.383, 'lon': 5.3327} # Bergen, FLorida
nc_data = nc.Dataset(thredds_urls[0])
lats = nc_data.variables['latitude'][:]
lons = nc_data.variables['longitude'][:]
# +
lat_lon_model_points = list()
for lat, lon in zip(lats.flatten(), lons.flatten()):
lat_lon_model_points.append({'lat': lat, 'lon': lon})
# -
lat_lon_model_points[0:5]
closest_point, closest_idx = closest(lat_lon_model_points, weather_station)
print('Closest point:', closest_point,
'\nClosest index:', closest_idx)
print('Shape of lat and lon arrays:', lats.shape, lons.shape)
closest_row_idx = closest_idx // 869
closest_col_idx = closest_idx - (closest_row_idx * 869)
print(lats[closest_row_idx, closest_col_idx], lons[closest_row_idx, closest_col_idx])
# Check that model point is consistent throughout period (2019)
for url in thredds_urls:
try:
nc_data = nc.Dataset(url)
lat = nc_data.variables['latitude'][closest_row_idx, closest_col_idx]
lon = nc_data.variables['longitude'][closest_row_idx, closest_col_idx]
if (lat != 60.37710907397123) | (lon != 5.330741772326141):
print('Inconsistent weather model point found:', lat, lon)
except OSError as e:
print(e)
# ### Get first 60 hours of temperature forecasts for every file
# +
df_list = list()
for url in thredds_urls:
try:
nc_data = nc.Dataset(url)
forecast_ref_time = pd.to_datetime([nc_data['forecast_reference_time'][:].tolist()] * 60, unit='s')
time = pd.to_datetime(nc_data['time'][0:60].tolist(), unit='s')
temp_kelvin = nc_data.variables['air_temperature_2m'][0:60, 0, closest_row_idx, closest_col_idx].tolist()
temp_celsius = pd.Series([temp - 273.15 for temp in temp_kelvin])
df_list.append(pd.DataFrame({'forecast_ref_time_utc': forecast_ref_time,
'datetime_utc': time,
'air_temperature_2m': temp_celsius}))
except OSError as e:
print(e)
# -
df_weather_forecast = pd.concat(df_list, axis=0, ignore_index=True)
df_weather_forecast.head()
df_weather_forecast = df_weather_forecast.set_index('datetime_utc').reindex(pd.date_range(start='2019-01-01 00:00',
end='2019-12-31 23:00', freq='H'))
df_weather_forecast.reset_index(inplace=True, drop=False)
df_weather_forecast[df_weather_forecast.air_temperature_2m.isna()].head()
for idx, _ in df_weather_forecast[df_weather_forecast.air_temperature_2m.isna()].iterrows():
df_weather_forecast.at[idx, 'air_temperature_2m'] = df_weather_forecast.at[idx - 24, 'air_temperature_2m']
df_weather_forecast.iloc[1440:1445]
df_weather_forecast.rename({'index': 'datetime_utc'}, axis=1, inplace=True)
df_weather_forecast.head()
os.listdir('../../data/weather/florida/csv')
df_weather_forecast.to_csv('../../data/weather/florida/csv/weather_forecast_florida_2019.csv', index=False)
| notebooks/get_weather_data/get_weather_forecasts_bergen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing Script
# In this notebook, we create the testing script for a trained model. This script is stored alone in a `scripts` directory both for ease of reference and because the Azure ML SDK limits the contents of this directory to at most 300 MB.
#
# The notebook cells are each appended in turn in the training script, so it is essential that you run the notebook's cells _in order_ for the script to run correctly. If you edit this notebook's cells, be sure to preserve the blank lines at the start and end of the cells, as they prevent the contents of consecutive cells from being improperly concatenated.
#
# The script sections are
# - [import libraries](#import),
# - [define utility functions and classes](#utility),
# - [define the script input parameters](#parameters),
# - [load and prepare the testing data](#data),
# - [load the trained pipeline](#pipeline),
# - [score the test data](#score), and
# - [compute the trained pipeline's performance](#performance).
#
# Start by creating the `scripts` directory, if it does not already exist.
# !mkdir -p scripts
# ## Load libraries <a id='import'></a>
# +
# %%writefile scripts/TestClassifier.py
from __future__ import print_function
import os
import argparse
import pandas as pd
from itertools import groupby
from sklearn.externals import joblib
from azureml.core import Run
import azureml.core
# -
# ## Define utility functions and classes <a id='utility'></a>
# +
# %%writefile --append scripts/TestClassifier.py
def cumulative_gain(y_true, y_pred, groups, max_gain=1.0, score_at=1):
"""
Compute the normalized cumulative gain.
This function assumes the data are sorted by groups.
"""
gain = sum([sum([v
for _, _, v in sorted(g,
key=lambda x: x[1],
reverse=True)[:score_at]])
for _, g in groupby(zip(groups, y_pred, y_true),
key=lambda x: x[0])])
eval_result = gain / max_gain
return eval_result
# -
# ## Define the input parameters <a id='parameters'></a>
# +
# %%writefile --append scripts/TestClassifier.py
if __name__ == '__main__':
print('azureml.core.VERSION={}'.format(azureml.core.VERSION))
parser = argparse.ArgumentParser(description='Test a model.')
parser.add_argument('--data-folder', help='the path to the data',
dest='data_folder', default='.')
parser.add_argument('--inputs', help='the inputs directory',
default='data')
parser.add_argument('--test', help='the test dataset name',
default='balanced_pairs_test.tsv')
parser.add_argument('--outputs', help='the outputs directory',
default='outputs')
parser.add_argument('--model', help='the model file base name',
default='model')
parser.add_argument("--rank", help="the maximum rank of a correct match",
type=int, default=3)
args = parser.parse_args()
# -
# ## Load and prepare the testing data <a id='data'></a>
# %%writefile --append scripts/TestClassifier.py
# Get a run logger.
run = Run.get_context()
# What to name the metric logged
metric_name = "accuracy"
print('Prepare the testing data.')
# Paths to the input data.
data_path = args.data_folder
inputs_path = os.path.join(data_path, args.inputs)
test_path = os.path.join(inputs_path, args.test)
# Define the input data columns.
feature_columns = ['Text_x', 'Text_y']
label_column = 'Label'
group_column = 'Id_x'
dupes_answerid_column = 'AnswerId_x'
questions_answerid_column = 'AnswerId_y'
score_column = 'score'
# Load the testing data.
print('Reading {}'.format(test_path))
test = pd.read_csv(test_path, sep='\t', encoding='latin1')
# Sort the data by groups
test.sort_values(group_column, inplace=True)
# Report on the dataset.
print('test: {:,} rows with {:.2%} matches'
.format(test.shape[0], test[label_column].mean()))
# Select and format the testing data.
test_X = test[feature_columns]
test_y = test[label_column]
# ## Load the trained model<a id='pipeline'></a>
# %%writefile --append scripts/TestClassifier.py
print('Load the model pipeline.')
# Paths for the model data.
outputs_path = args.outputs
model_path = os.path.join(outputs_path, '{}.pkl'.format(args.model))
print('Loading the model from {}'.format(model_path))
model = joblib.load(model_path)
# ## Score the test data using the model <a id='score'></a>
# %%writefile --append scripts/TestClassifier.py
# Collect the model predictions.
print('Scoring the test data.')
test[score_column] = model.predict_proba(test_X)[:, 1]
# ## Report the model's performance statistics on the test data <a id='performance'></a>
# %%writefile --append scripts/TestClassifier.py
print("Evaluating the model's performance on the test data.")
metric_name = "gain"
max_gain = test[label_column].sum()
for i in range(1, args.rank+1):
gain = cumulative_gain(y_true=test[label_column].values,
y_pred=test[score_column].values,
groups=test[group_column].values,
max_gain=max_gain,
score_at=i)
print('{}@{} = {:.2%}'.format(metric_name, i, gain))
# Log the gain@rank
run.log("{}@{}".format(metric_name, i), gain)
# ## Run the script to see that it works <a id='run'></a>
# This will take about a minute.
# %run -t scripts/TestClassifier.py --rank 5
# In [the next notebook](03_Run_Locally.ipynb), we set up and use the AML SDK to run the training script.
| architectures/Python-ML-Training/02_Testing_Script.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Зависимости
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.compose import ColumnTransformer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, plot_tree
from sklearn.metrics import mean_squared_error, f1_score
# -
# Генерируем уникальный seed
my_code = "Olgashov"
seed_limit = 2 ** 32
my_seed = int.from_bytes(my_code.encode(), "little") % seed_limit
# Читаем данные из файла
example_data = pd.read_csv("datasets/Fish.csv")
example_data.head()
# Определим размер валидационной и тестовой выборок
val_test_size = round(0.2*len(example_data))
print(val_test_size)
# Создадим обучающую, валидационную и тестовую выборки
random_state = my_seed
train_val, test = train_test_split(example_data, test_size=val_test_size, random_state=random_state)
train, val = train_test_split(train_val, test_size=val_test_size, random_state=random_state)
print(len(train), len(val), len(test))
# +
# Значения в числовых столбцах преобразуем к отрезку [0,1].
# Для настройки скалировщика используем только обучающую выборку.
num_columns = ['Weight', 'Length1', 'Length2', 'Length3', 'Height', 'Width']
ct = ColumnTransformer(transformers=[('numerical', MinMaxScaler(), num_columns)], remainder='passthrough')
ct.fit(train)
# -
# Преобразуем значения, тип данных приводим к DataFrame
sc_train = pd.DataFrame(ct.transform(train))
sc_test = pd.DataFrame(ct.transform(test))
sc_val = pd.DataFrame(ct.transform(val))
# Устанавливаем названия столбцов
column_names = num_columns + ['Species']
sc_train.columns = column_names
sc_test.columns = column_names
sc_val.columns = column_names
sc_train
# +
# Задание №1 - анализ деревьев принятия решений в задаче регрессии
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html
# criterion : {“mse”, “friedman_mse”, “mae”, “poisson”}, default=”mse”
# splitter : {“best”, “random”}, default=”best”
# max_depth : int, default=None
# min_samples_split : int or float, default=2
# min_samples_leaf : int or float, default=1
# +
# Выбираем 4 числовых переменных, три их них будут предикторами, одна - зависимой переменной
n = 4
labels = random.sample(num_columns, n)
y_label = labels[0]
x_labels = labels[1:]
print(x_labels)
print(y_label)
# +
# Отберем необходимые параметры
x_train = sc_train[x_labels]
x_test = sc_test[x_labels]
x_val = sc_val[x_labels]
y_train = sc_train[y_label]
y_test = sc_test[y_label]
y_val = sc_val[y_label]
# -
x_train
# Создайте 4 модели с различными критериями ветвления criterion: 'mse', 'friedman_mse', 'mae', 'poisson'.
# Решите получившуюся задачу регрессии с помощью созданных моделей и сравните их эффективность.
# При необходимости применяйте параметры splitter, max_depth, min_samples_split, min_samples_leaf
# Укажите, какая модель решает задачу лучше других.
criterion_list = ['mse', 'friedman_mse', 'mae', 'poisson']
r_model_list = []
for i in range(len(criterion_list)):
r_model_list.append(DecisionTreeRegressor(criterion=criterion_list[i], random_state=random_state))
r_model_list[i].fit(x_train, y_train)
print(i, r_model_list[i].score(x_val, y_val))
test_pred = r_model_list[1].predict(x_test)
print(mean_squared_error(y_test, test_pred), r_model_list[1], sep='\n')
# +
# Задание №2 - анали<NAME> принятия решений в задаче классификации
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
# criterion : {“gini”, “entropy”}, default=”gini”
# splitter : {“best”, “random”}, default=”best”
# max_depth : int, default=None
# min_samples_split : int or float, default=2
# min_samples_leaf : int or float, default=1
# +
# Выбираем 2 числовых переменных, которые будут параметрами элементов набора данных
# Метка класса всегда 'Species'
n = 2
x_labels = random.sample(num_columns, n)
y_label = 'Species'
print(x_labels)
print(y_label)
# +
# Отберем необходимые параметры
x_train = sc_train[x_labels]
x_test = sc_test[x_labels]
x_val = sc_val[x_labels]
y_train = sc_train[y_label]
y_test = sc_test[y_label]
y_val = sc_val[y_label]
# -
x_train
# Создайте 4 модели с различными критериями ветвления criterion : 'gini', 'entropy' и splitter : 'best', 'random'.
# Решите получившуюся задачу классификации с помощью созданных моделей и сравните их эффективность.
# При необходимости применяйте параметры max_depth, min_samples_split, min_samples_leaf
# Укажите, какая модель решает задачу лучше других.
import itertools
criterion = list(itertools.product(['gini', 'entropy'], ['best', 'random']))
c_model_list = []
for i in range(0,len(criterion)):
c_model_list.append(DecisionTreeClassifier(criterion[i][0], criterion[i][1], random_state=random_state))
c_model_list[i].fit(x_train, y_train)
print(i, c_model_list[i].score(x_val, y_val))
test_pred = c_model_list[1].predict(x_test)
f1 = f1_score(y_test, test_pred, average='weighted')
print(f1, c_model_list[1], sep='\n')
| 2021 Весенний семестр/Практическое задание 3/Олгашов.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
__author__ = 'aqeel'
#Note Remove This Line to Get the Figure in Seperate Window
# %matplotlib inline
import matplotlib.pyplot as plt
import csv
import numpy as np
with open ('ncmp_1415_final_non_disclosive.csv','rb') as datafile:
spamreader = csv.reader(datafile)
data = list(spamreader)
col = np.array(data[0])
data = np.matrix(data[1:])
col
samplerows= np.random.randint(data.shape[0],size=5000)
sampledata =np.array( data[samplerows,:])
sampledata.shape
def ColIndex(colname):
return np.where(col==colname)[0][0]
negativedata = sampledata[sampledata[ColIndex('height')]<0,]
type(sampledata)
sampledata[:,0]
| HW04/ipythoncodeQ2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Advent of Code - after my previous solution, I wanted to set up a class.
# <NAME> 2021-12-04
import numpy as np
#
# Structure of l:list of strings. First line is a sequence of numbers to call.
# Line 2: newline
# After this, next five lines is a bingo board. Subsequent boards are separated by a newline
# Line 3-7 bingo board
def reset_boards(l, skip=5, start_line = 2):
boards = []
boardcount = 0
for i in range(start_line,len(l),skip+1):
boards.append(BingoCard(l[i:i+skip],boardcount))
boardcount+= 1
return(boards)
import numpy as np
class BingoCard:
BINGO = 0
def __init__(self, v, i = 0):
self.board = np.array([i.split() for i in v]).astype(float)
self.boardsize = len(v)
self.bingo = False
self.diagonal_allowed = False
self.boardid = i
self.last_matched = np.nan
self.number_for_bingo = np.nan
pass
def called_number(self, v):
for i in range(self.boardsize):
for j in range(self.boardsize):
if self.board[i][j] == v:
self.last_matched = v
self.board[i][j] = np.nan
self.IsBingo()
if self.bingo:
self.number_for_bingo = v
return
def IsBingo(self):
if 0 in [np.nansum(i) for i in self.board]:
self.bingo = True
return
if 0 in [np.nansum(i) for i in self.board.T]:
self.bingo = True
return
if self.diagonal_allowed:
diagonal_bingo=np.nansum(np.diag(self.board))
diagonal1_bingo=np.nansum(np.diag(np.fliplr(self.board)))
if diagonal_bingo == BINGO or diagonal1_bingo == BINGO:
self.bingo = True
return self.bingo
def print_board(self):
print("Board ID %d" %self.boardid)
print("\tBingo?\t%s" % self.bingo)
print("\tLast number matched:\t%d" % self.last_matched)
print("\tNumber for the bingo:\t%d" % self.number_for_bingo)
print("\tSum:\t%d" % np.nansum(self.board))
print(self.board)
return
# split the string into numbers. The sequence is slightly different as it's separated by commas.
def s(l, d=None):
return [int(s) for s in l.split(d)]
file = "input_files/day04.txt"
with open(file,"r") as fp:
tmp = fp.read()
l = tmp.split("\n")
sequence = s(l[0],',')
boards = reset_boards(l)
# part 1 - find the first board that's a winner.
Done = False
for i in sequence:
for b in boards:
b.called_number(i)
if b.bingo:
print("Answer is %f" % (b.number_for_bingo *np.nansum(b.board)))
# b.print_board()
Done = True
break
if Done:
break
# Part 2 - we want to find the last board that's a winner.
boards = reset_boards(l)
winners = set()
Done = False
for i in sequence:
# print(i,np.sum([k.bingo for k in boards]))
for b in boards:
if b.boardid in winners:
continue
b.called_number(i)
if b.bingo:
# Squid wins
winners.add(b.boardid)
if len(boards) - len(winners) == 0:
Done = True
break
if Done:
print("Answer is %f" %
(b.number_for_bingo *np.nansum(b.board)))
b.print_board()
break
| day04.ipynb |
# ---
# title: "Function Basics"
# author: "<NAME>"
# date: 2017-12-20T11:53:49-07:00
# description: "Function basics in Python."
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Create Function Called print_max
def print_max(x, y):
# if a is larger than b
if x > y:
# then print this
print(x, 'is maximum')
# if a is equal to b
elif x == y:
# print this
print(x, 'is equal to', y)
# otherwise
else:
# print this
print(y, 'is maximum')
# ## Run Function With Two Arguments
print_max(3,4)
# Note: By default, variables created within functions are local to the function. But you can create a global function that IS defined outside the function.
# ## Create Variable
x = 50
# ## Create Function Called Func
# Create function
def func():
# Create a global variable called x
global x
# Print this
print('x is', x)
# Set x to 2.
x = 2
# Print this
print('Changed global x to', x)
# ## Run func()
func()
# ## Print x
x
# ## Create Function Say() Displaying x with default value of 1
# +
# Create function
def say(x, times = 1, times2 = 3):
print(x * times, x * times2)
# Run the function say() with the default values
say('!')
# Run the function say() with the non-default values of 5 and 10
say('!', 5, 10)
# -
# ## VarArgs Parameters (i.e. unlimited number of parameters)
# - \* denotes that all positonal arguments from that point to next arg are used
# - \** dnotes that all keyword arguments from that point to the next arg are used
# +
# Create a function called total() with three parameters
def total(initial=5, *numbers, **keywords):
# Create a variable called count that takes it's value from initial
count = initial
# for each item in numbers
for number in numbers:
# add count to that number
count += number
# for each item in keywords
for key in keywords:
# add count to keyword's value
count += keywords[key]
# return counts
return count
# Run function
total(10, 1, 2, 3, vegetables=50, fruits=100)
| docs/python/basics/function_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## PiFace Digital 2
# Zorg dat je een Raspberry Pi gebruikt met de PiFace Digital 2 aangesloten en volg de onderstaande stappen één voor één.
#
# Eerst moeten we de Python interpreter vertellen dat we met de PiFace gaan werken en dat hij dus de (reeds geïnstalleerde) PiFace bibliotheek moet inladen.
#
# Zet hiervoor je cursor in de cel hieronder en druk op Shift + Enter om de code die erin staat uit te voeren.
import pifacedigitalio as pf
# Om ons wat typwerk te besparen hebben we bij het importeren de bibliotheek ook meteen hernoemd naar "pf" en zo zullen we ze vanaf nu ook aanroepen.
# Voor we het device zelf kunnen gebruiken, moeten we het initialiseren, zodat de bibliotheek alle beginparameters kan goedzetten.
#
# Druk opnieuw op Shift + Enter in de onderstaande cel.
pf.init()
# Nu begint het echte werk :-) De groene en oranje schroefterminals laten we nog even terzijde. We concentreren ons enkel op de knoppen en ledjes die al op het bord gesoldeerd zijn (en die weliswaar op dezelfde Raspberry Pi poorten aangesloten zijn als de terminals).
#
# Wat gebeurt er als je de onderstaande code uitvoert?
pf.digital_read(0)
# Inderdaad, niet veel :-D
#
# Maar we krijgen wel een antwoord. De "Out[]" lijn is een manier voor IPython om ons te vertellen dat de functie die we uitgevoerd hebben een resultaat teruggeeft en in dit geval is dat "0".
# In dit geval is het de PiFace Digital 2 bibliotheek die ons vertelt dat het resultaat van het digitaal ("digital") uitlezen ("read") van de eerste knop (input "0") ook 0 is.
#
# Houd nu knop 0 (S0, het knopje het dichtst bij de scheiding tussen de groene en gele schroefterminals) ingedrukt en voer tegelijkertijd hieronder nogmaals dezelfde code uit:
pf.digital_read(0)
# 1?
#
# Proficiat! Dit was je eerste succesvolle manipulatie van programmeerbare electronica!
#
# Dan kunnen we nu een stapje verder gaan en een functie met 2 variabelen aanroepen. Probeer eens te bedenken wat de volgende functie zou kunnen doen, voor je ze aanroept.
pf.digital_write(7, 1)
# Met deze functie zetten we dus de waarde van output "7" op "1". Oftewel, we zetten de LED, aangesloten op de achtste poort (computers beginnen altijd van nul te tellen) op "AAN".
#
# Wat hier eigenlijk gebeurt, is dat we spanning zetten op poort 7 en er dus stroom begint te vloeien door de LED die erop is aangesloten.
#
# Zetten we de spanning weer op 0, dan stopt de stroom en de LED gaat uit:
pf.digital_write(7, 0)
# Door bibliotheken, functies en structuren samen te voegen, kunnen we programma's schrijven; van heel eenvoudig tot complex. Hieronder een eenvoudig voorbeeld :-)
#
# De functie sleep kunnen we inladen vanuit de time bibliotheek (die onderdeel is van de standaard Python installatie). Met deze functie kan je de computer een aantal seconden (in dit geval 0.4 seconden) laten wachten.
#
# Probeer eens in te schatten wat de code juist doet en voer ze dan uit.
# +
from time import sleep
while(True):
pf.digital_write(7, 1)
sleep(0.4)
pf.digital_write(7, 0)
sleep(0.4)
if (pf.digital_read(3) == 1):
break
# -
# Tip: houd knop S3 (die het dichtst bij de netwerkaansluiting) even ingedrukt (meer dan 0.8 seconden is niet nodig)
pf.deinit()
# +
# pf.InputEventListener?
# +
def button_3_pressed
button_3_listener = pifacedigitalio.InputEventListener()
button_3_listener.register(3, pifacedigitalio.IODIR_ON, print_flag)
button_3_listener.activate()
| notebooks/nl-be/PiFace Digital 2 - Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Description
# Can we forecast using the Pythagorean Expectation?
#
# 1. Divide a season into 2 halves
# 2. Take Pyth-Expec from 1st half, and see how well it fits with the win percentage in the 2nd half.
# 3. Control: how well does the wpc for the 1st half fit for the 2nd half?
# +
# # %load ./imports.py
# # %load /Users/bartev/dev/github-bv/sporty/notebooks/imports.py
## Where am I
# !echo $VIRTUAL_ENV
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# magics
# %load_ext blackcellmagic
# start cell with `%%black` to format using `black`
# %load_ext autoreload
# start cell with `%autoreload` to reload module
# https://ipython.org/ipython-doc/stable/config/extensions/autoreload.html
# reload all modules when running
# %autoreload 2
# imports
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import seaborn as sns
from importlib import reload
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
# https://plotnine.readthedocs.io/en/stable/
import plotnine as p9
from plotnine import ggplot, aes, facet_wrap
from src.utils import lower_case_col_names
# -
# # Read data
data_dir = Path('../data/raw/wk1-baseball/')
with open(data_dir / 'retrosheet-gamelog-header.txt', 'r') as f:
retro_cols = f.read().split(',')
MLB = pd.read_csv(data_dir / 'GL2018.csv', names=retro_cols)
print(MLB.shape)
MLB.head()
mlb18 = (
MLB[["VisitingTeam", "HomeTeam", "VisitorRunsScored", "HomeRunsScore", "Date"]]
.rename(columns={"VisitorRunsScored": "VisR", "HomeRunsScore": "HomR"})
.assign(count=1)
)
mlb18
# + active=""
# # Performance when home team
# mlb18.melt(value_vars=['VisitingTeam', 'HomeTeam'], id_vars=['VisR', 'HomR', 'Date', 'count'])
# -
# Performance when home team
mlb_home = (
mlb18[["HomeTeam", "HomR", "VisR", "count", "Date"]]
.assign(home=1)
.rename(columns={"HomeTeam": "team", "VisR": "RA", "HomR": "R"})
)
mlb_home
# Performance when away team
mlb_away = (
mlb18[["VisitingTeam", "VisR", "HomR", "count", "Date"]]
.assign(home=0)
.rename(columns={"VisitingTeam": "team", "VisR": "R", "HomR": "RA"})
)
mlb_away
mlb = (pd.concat([mlb_home, mlb_away])
.assign(win=lambda x: np.where(x['R'] > x['RA'], 1, 0)))
print(mlb.shape)
mlb.head()
# ## Split season midway (at the date of the All Star game)
#
# 2018-07-17
half1 = mlb.query("Date < 20180717")
half2 = mlb.query("Date >= 20180717")
half1.describe()
half2.describe()
# ## Performance variables
half1_perf = (half1.groupby('team')['count', 'win', 'R', 'RA'].sum().reset_index()
.rename(columns={'count':'count1', 'win':'win1', 'R':'R1', 'RA':'RA1'})
.assign(wpc1=lambda x: x['win1'] / x['count1'],
pyth1=lambda x: x['R1']**2 / (x['R1']**2 + x['RA1']**2)))
half1_perf
half2_perf = (half2.groupby('team')['count', 'win', 'R', 'RA'].sum().reset_index()
.rename(columns={'count':'count2', 'win':'win2', 'R':'R2', 'RA':'RA2'})
.assign(wpc2=lambda x: x['win2'] / x['count2'],
pyth2=lambda x: x['R2']**2 / (x['R2']**2 + x['RA2']**2)))
half2_perf
half2_predictor = pd.merge(half1_perf, half2_perf, on='team')
half2_predictor.head()
# # Plot performance
sns.relplot(x='pyth1', y='wpc1', data=half2_predictor)
sns.relplot(x='wpc1', y='wpc2', data=half2_predictor)
# # Correlation between key variables
keyvars = half2_predictor[['team', 'wpc2', 'wpc1', 'pyth1', 'pyth2']]
keyvars.corr()
# ## Interpretation
#
# * variable of interest on row index
# * what's the correlation between the wpc2 and the wpc1, and between wpc2 and pyth1?
# * correlation between wpc2 and pyth1 is HIGHER than that between wpc2 and wpc1.
#
#
# This is why <NAME> proposed pythagorean expectation is a better predictor than win percentage for future performance
keyvars.sort_values(by=['wpc2'], ascending=False)
sns.relplot(y='wpc2', x='pyth1', data=keyvars)
| notebooks/05-Pythagorean-Expectation-as-predictor-in-MLB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Repository Classifier - SpaCy
#
# ### Author: <NAME>
# #### June 2021
#
# ## Obtaining the Readme Files
# ## Loading the Data
# +
import pandas as pd
import numpy as np
import json
# Load py2neo
import py2neo
from py2neo import Graph
from py2neo.matching import *
# Self created functions
import config as cfg
# turn off warnings
np.seterr(divide = 'ignore')
# Decoder
import base64
# API
import requests
# -
graph = Graph("bolt://localhost:7687", auth=(cfg.neo4j['auth']), bolt=True, password=(cfg.neo4j['password']))
graph
sub_data = graph.run('''MATCH (s:SUBJECT)<-[:hasSubject]-(a:ANNOTATION)-[]->(dc:dataCat)\
WHERE s.id = 314 \
MATCH (dc)<-[:Target]-(:ANNOTATION)-[:Target]->(cr:codeRepo)\
RETURN distinct properties(dc), properties(cr), s.id''').data()
helper_dict = {'dacat': [], #done
'dacat_name':[], #done
'meta':[], #meta
'cr_item' :[],
'cr_name' : [],
'repo_url': [],
'repo_readme':[]
}
for i in range(0, len(sub_data)-1):
helper_dict['dacat'].append(sub_data[i]['properties(dc)']['id'])
helper_dict['dacat_name'].append(sub_data[i]['properties(dc)']['name'])
try:
helper_dict['meta'].append(sub_data[i]['properties(cr)']['meta'])
json_data = json.loads(sub_data[i]['properties(cr)']['meta'])
helper_data = json_data['id']
helper_data_name = json_data['name']
helper_data_url = json_data['url']
helper_data_readme = json_data['readme']['readme']['readme']
# Name CR
helper_dict['cr_item'].append(helper_data)
helper_dict['cr_name'].append(helper_data_name)
helper_dict['repo_url'].append(helper_data_url)
helper_dict['repo_readme'].append(helper_data_readme)
# Take care of empty spaces.
except KeyError:
helper_dict['meta'].append("Missing")
helper_dict['cr_item'].append("Missing")
helper_dict['cr_name'].append("Missing")
helper_dict['repo_url'].append("Missing")
helper_dict['repo_readme'].append("Missing")
data = pd.DataFrame(helper_dict)
data = data[data['cr_item'] != "Missing"]
data = data[['dacat', 'dacat_name', 'cr_item', 'cr_name', 'repo_url', 'repo_readme']].reset_index(drop=True)
data.head()
data = data[data['repo_readme'] == True]
token = cfg.github_api['secret']
def get_readme(url, token = token):
url_to_api_endpoint = url.replace('https://github.com/', '')
new_url = 'https://api.github.com/repos/' + url_to_api_endpoint + '/contents/README.md'
headers = {'Authorization': f'token {token}', 'accept': 'application/JSON'}
try:
readme = requests.get(new_url, headers=headers).json()
readme = readme['content']
readme = base64.b64decode(readme)
except:
readme = "Missing"
return readme
data['readme_content']= data['repo_url'].apply(lambda x: get_readme(x))
data.head()
data.to_csv('data/repository_readme_files.csv')
| Repo_Classifier_Readme_Getter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.016709, "end_time": "2020-08-21T13:45:56.609582", "exception": false, "start_time": "2020-08-21T13:45:56.592873", "status": "completed"} tags=[]
# %%writefile test.py
import pandas as pd
import numpy as np
import cv2
import os
import re
import albumentations as A
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.sampler import SequentialSampler
from PIL import Image
from albumentations.pytorch.transforms import ToTensorV2
from matplotlib import pyplot as plt
from tqdm import tqdm
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
DIR_INPUT = '../input/rsna-pneumonia-detection-2018/input'
DIR_TEST = f"{DIR_INPUT}/samples"
test_images = os.listdir(DIR_TEST)
print(f"Validation instances: {len(test_images)}")
# load a model; pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True, min_size=1024)
num_classes = 2 # 1 class (pnueomonia) + background
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
os.makedirs('../validation_predictions', exist_ok=True)
model.load_state_dict(torch.load('../input/rsna-pytorch-hackathon-fasterrcnn-resnet-training/fasterrcnn_resnet50_fpn.pth'))
model.to(device)
def format_prediction_string(boxes, scores):
pred_strings = []
for j in zip(scores, boxes):
pred_strings.append("{0:.4f} {1} {2} {3} {4}".format(j[0],
int(j[1][0]), int(j[1][1]),
int(j[1][2]), int(j[1][3])))
return " ".join(pred_strings)
detection_threshold = 0.9
img_num = 0
results = []
model.eval()
with torch.no_grad():
for i, image in tqdm(enumerate(test_images), total=len(test_images)):
orig_image = cv2.imread(f"{DIR_TEST}/{test_images[i]}", cv2.IMREAD_COLOR)
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
image = np.transpose(image, (2, 0, 1)).astype(np.float)
image = torch.tensor(image, dtype=torch.float).cuda()
image = torch.unsqueeze(image, 0)
model.eval()
cpu_device = torch.device("cpu")
outputs = model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
if len(outputs[0]['boxes']) != 0:
for counter in range(len(outputs[0]['boxes'])):
boxes = outputs[0]['boxes'].data.cpu().numpy()
scores = outputs[0]['scores'].data.cpu().numpy()
boxes = boxes[scores >= detection_threshold].astype(np.int32)
draw_boxes = boxes.copy()
boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
for box in draw_boxes:
cv2.rectangle(orig_image,
(int(box[0]), int(box[1])),
(int(box[2]), int(box[3])),
(0, 0, 255), 3)
plt.imshow(cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.savefig(f"{test_images[i]}")
plt.close()
result = {
'patientId': test_images[i].split('.')[0],
'PredictionString': format_prediction_string(boxes, scores)
}
results.append(result)
else:
result = {
'patientId': test_images[i].split('.')[0],
'PredictionString': None
}
results.append(result)
sub_df = pd.DataFrame(results, columns=['patientId', 'PredictionString'])
print(sub_df.head())
sub_df.to_csv('submission.csv', index=False)
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 737.939886, "end_time": "2020-08-21T13:58:14.554455", "exception": false, "start_time": "2020-08-21T13:45:56.614569", "status": "completed"} tags=[]
# !python test.py
# + papermill={"duration": 1.041041, "end_time": "2020-08-21T13:58:16.614028", "exception": false, "start_time": "2020-08-21T13:58:15.572987", "status": "completed"} tags=[]
# + papermill={"duration": 1.294133, "end_time": "2020-08-21T13:58:19.078096", "exception": false, "start_time": "2020-08-21T13:58:17.783963", "status": "completed"} tags=[]
| image/4. RSNA Pneumonia Detection Challenge/kaggle/rsna-pytorch-hackathon-fasterrcnn-resnet-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import datetime
from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt
data = pd.read_csv(r"C:\Users\Bhargava\Downloads\Ration.csv")
sns.set_context('notebook')
sns.set_style('ticks')
red='#D62728'
blue='#1F77B4'
# %matplotlib inline
data.head()
a1 = datetime.date(2017,11,1)
a2 = datetime.date(2017,12,1)
a3 = datetime.date(2018,1,1)
a4 = datetime.date(2018,2,1)
a5 = datetime.date(2018,3,1)
a6 = datetime.date(2018,4,1)
a7 = datetime.date(2018,5,1)
a8 = datetime.date(2018,6,1)
a9 = datetime.date(2018,7,1)
a10 = datetime.date(2018,8,1)
a11 = datetime.date(2018,9,1)
a12 = datetime.date(2018,10,1)
a13 = datetime.date(2018,11,1)
a14 = datetime.date(2018,12,1)
a15 = datetime.date(2019,1,1)
a16 = datetime.date(2019,2,1)
#data = data.drop(['Sl No.'],axis=1)
data.columns = ['Sl No.','Mandal',a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16]
#data = data.drop(['Sl No.','Mandal',a1,a2,a15,a16],axis=1)
data.head()
data1 = data.transpose()
data1.head()
y1 = data1[1].copy()
y1.head()
y1.plot.line()
plt.show()
train = y1.iloc[:11]
test = y1.iloc[11:]
train_data = np.asarray(train)
test_data = np.asarray(test)
y1_data = np.asarray(y1)
test.index
model = SimpleExpSmoothing(train_data)
model._index = pd.to_datetime(train.index)
fit1 = model.fit()
pred1 = fit1.forecast(5)
fit1.summary()
fit2 = model.fit(smoothing_level=.2)
pred2 = fit2.forecast(5)
fit1.summary()
fit3 = model.fit(smoothing_level=.5)
pred3 = fit3.forecast(5)
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(train.index, train.values)
ax.plot(test.index, test.values, color="gray")
for p, f, c in zip((pred1, pred2, pred3),(fit1, fit2, fit3),('#ff7823','#3c763d','c')):
ax.plot(train.index, f.fittedvalues, color=c)
ax.plot(test.index, p, label="alpha="+str(f.params['smoothing_level'])[:3], color=c)
plt.title("Simple Exponential Smoothing")
plt.legend();
model = Holt(np.asarray(train_data))
model._index = pd.to_datetime(train.index)
train_index.
fit1 = model.fit(smoothing_level=.3, smoothing_slope=.05)
pred1 = fit1.forecast(4)
fit2 = model.fit(optimized=True)
pred2 = fit2.forecast(4)
fit3 = model.fit(smoothing_level=.3, smoothing_slope=.2)
pred3 = fit3.forecast(4)
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(train.index, train.values)
ax.plot(test.index, test.values, color="gray")
for p, f, c in zip((pred1, pred2, pred3),(fit1, fit2, fit3),('#ff7823','#3c763d','c')):
ax.plot(train.index, f.fittedvalues, color=c)
ax.plot(test.index, p, label="alpha="+str(f.params['smoothing_level'])[:4]+", beta="+str(f.params['smoothing_slope'])[:4], color=c)
plt.title("Holt's Exponential Smoothing")
plt.legend();
# +
model = ExponentialSmoothing(train_data, trend='mul', seasonal=None)
model2 = ExponentialSmoothing(train_data, trend='mul', seasonal=None, damped=True)
model._index = pd.to_datetime(train.index)
fit1 = model.fit()
fit2 = model2.fit()
pred1 = fit1.forecast(4)
pred2 = fit2.forecast(5)
sse1 = np.sqrt(np.mean(np.square(test.values - pred1)))
sse2 = np.sqrt(np.mean(np.square(test.values - pred2)))
fig, ax = plt.subplots(2, figsize=(12, 12))
ax[0].plot(train.index, train.values)
ax[0].plot(test.index, test.values, color="gray", label="truth")
ax[1].plot(train.index, train.values)
ax[1].plot(test.index, test.values, color="gray", label="truth")
for p, f, c in zip((pred1, pred2),(fit1, fit2),('#ff7823','#3c763d')):
ax[0].plot(train.index, f.fittedvalues, color=c)
ax[1].plot(train.index, f.fittedvalues, color=c)
ax[0].plot(test.index, p, label="alpha="+str(f.params['smoothing_level'])[:4]+", beta="+str(f.params['smoothing_slope'])[:4]+ ", damping="+str(True if f.params['damping_slope']>0 else False), color=c)
ax[1].plot(test.index, p, label="alpha="+str(f.params['smoothing_level'])[:4]+", beta="+str(f.params['smoothing_slope'])[:4]+ ", damping="+str(True if f.params['damping_slope']>0 else False), color=c)
ax[0].set_title("Damped Exponential Smoothing");
ax[1].set_title("Damped Exponential Smoothing - zoomed");
plt.legend();
# -
from matplotlib import pyplot
series = pd.read_csv(r'C:\Users\Bhargava\Downloads\Ration.csv')
print(series.head())
series.plot()
pyplot.show()
#series = series.drop(['Sl No.'],axis=1)
series.head()
m1 = series.iloc[[1]]
m1 = m1.transpose()
m1 = m1.drop(['Mandal'])
m1.head()
m1.plot()
pyplot.show()
from pandas.tools.plotting import autocorrelation_plot
autocorrelation_plot(m1)
pyplot.show()
from statsmodels.tsa.arima_model import ARIMA
#m1 = m1.dropna(inplace=True)
m1_data = np.asarray(m1)
m1.head()
model = ARIMA(m1_data, order=(5,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
pyplot.show()
residuals.plot(kind='kde')
pyplot.show()
print(residuals.describe())
from sklearn.metrics import mean_squared_error
X = m1.values
size = int(len(X) * 0.66)
train, test = X[0:size], X[size:len(X)]
history = [x for x in train]
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=(5,1,0))
model_fit = model.fit(disp=0, start_params=[.1, .1, .1,.1,.1,.1])
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
print('predicted=%f, expected=%f' % (yhat, obs))
error = mean_squared_error(test, predictions)
print('Test MSE: %.3f' % error)
# plot
pyplot.plot(test)
pyplot.plot(predictions, color='red')
pyplot.show()
| Notebooks/Inventory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Numpy
# + tags=[]
import numpy as np
# -
# The core of the `numpy` package is the `array` class. Let's examine that first. We can make an array out of a sequence, like a list.
d = [1, 2, 3, 4, 5]
np.array(d)
# ### data types
#
# Unlike lists, arrays must be homogeneous, in that the data types of each element must be the same. The data type of the array is upcast to be able to represent all of the data. So, if only one element is a float, all elements will be converted to floats.
d = [1, 2, 3.1415, 4, 5]
np.array(d)
# You can query the datatype by examining the `dtype` attribute of the array.
d = [1, 2, 3.1415, 4, 5]
arr = np.array(d)
arr.dtype
# Array types may be defined explicity in the call
arr = np.array([1, 2, 3, 4, 5], dtype='float32')
arr
# Complex numbers are noted with a lowercase `j` or uppercase `J`, like this
cmplx = np.array([1.0+2.0j, 3.0])
print(cmplx)
cmplx.dtype
# As we have seen before, arrays are like multidimensional sequences. We can create a 2D array by supplying a list of lists as the argument.
arr = np.array([[1., 2., 3.,], [4., 5., 6.]])
arr
# ### Array attributes
#
# Arrays have a few other important attributes. Note attributes never have parentheses after them. Methods always do.
arr.size # The number of elements in the array
arr.shape # The shape of the array (i.e., the size of each dimension)
arr.ndim # The number of dimensions of the array
# ### Setting array shape
#
# You can set the `array.shape` attribute to change the shape of the array. This attribute does not change the elements of the array, or how it is stored in memory, just how it is seen.
arr.shape = (3, 2)
arr
arr.shape = (6,)
arr
# Singleton dimensions add to the dimensionality of an array. The last example was a 1D array (also called a vector), the next are 2D arrays.
arr.shape = (1, 6)
arr # Note that there are *two* square brackets in the output sequence. This is a row vector.
arr.shape = (6, 1)
arr # this is also a 2D array, like a column vector
# ## Array indexing
#
# Arrays are indexed in a similar way to sequences, with `start:stop:stride` notation, except that this is used for each dimension in the array. Colons denote all the values in a particular dimension, slices indicate some particular subset of the data in that particular dimension.
#
# A common use case is to get a single row or column from a 2D array (a table of data).
arr = np.arange(60).reshape(6, 10)
arr
arr[:, 4] # the 5th column
arr[2, :] # the 3rd row
arr[2] # Trailing colons do not need to be explicitly typed. This is equivalent to the last example.
arr[4, 7] # an individual element in the table
# ---
# ### *Exercise*
#
# > Slices can be combined in any way. Define a new array or use array `arr` and grab out every other row and the 4th column and beyond.
#
# ---
# + tags=[]
arr = np.arange(60).reshape(6,10)
arr[:,4:]
# -
# ### Conventions concerning arrays containing spatio-temporal information
#
# Generally, you will want to think of arrays as representing dimensions in space and time. The conventional way to think of this is that the dimensions are $(t, z, y, x)$; missing dimensions are omitted. This will help make plotting and analysis easier. Some examples might be:
#
# temp[:, :, :, :] # A 4D array (time, height, latitude, longitude)
# press[:, :] # A 2D array (time, height)
# humid[:, :] # A 2D array (latitude, longitude)
# ## Array methods
#
# Arrays have a number of methods. Let's take a look at the `mean` method as an example.
# +
arr = np.array([[1., 2., 3.,], [4., 5., 6.]]) # reset the array to our 2x3 array.
arr.mean() # The mean of all of the elements in the array
# -
# `Mean` takes the optional argument `axis` that can be used to take the mean along a single axis of the array. Just like with indexing, the axes are reference in a zero-based system; `axis=0` means the first dimension.
arr.mean(axis=0) # The mean
# In this case, there are two rows in the first dimension, and `arr.mean(axis=0)` takes the average in the 'row' direction, resulting in a 1D array that is the average across the rows.
# ---
# ### *Exercise*
#
# > Find the mean of the array in the 'column' direction, along `axis=1`.
#
# > Use the `sum` method of the array class to get the sum of the numbers in each column. The result should be a 1D array with three elements.
#
# ---
# + tags=[]
print(arr.mean(axis=1))
print(arr.sum())
# -
# You can also use the `reshape` method to change the shape of an array.
arr
arr.reshape(3, 2)
# You can find the mininum and maximum of an array with the `min` and `max` methods. Sometimes it is useful to find the indices of these minima and maxima. For this use `argmin` and `argmax`, like
x = np.random.rand(10)
imax = x.argmax()
print(imax, x[imax], x.max())
# ## Creating standard arrays
#
# There are a few standard arrays, for example, arrays filled with zeros or ones (or empty). Here are some examples of creating arrays.
# +
o = np.ones((3, 4, 5)) # The argument is a shape, so is a tuple with the length of each dimension as an argument
b = np.ones((2, 3), dtype=bool)
z = np.zeros((2, 3), dtype=np.float32)
b
# -
# You can also create these arrays with the same shape and datatype of the input array using `np.ones_like` and `np.zeros_like`.
zb = np.zeros_like(b)
ob = np.ones_like(o)
ob
# You can also create a diagonal array with a given vector along the diagonal. These can be offset with an optional argument `k` (default=0). This example creates a tri-diagonal array similar to that used for finite difference calculations
np.diag(-2*np.ones(6)) + np.diag(np.ones(5), k=-1) + np.diag(np.ones(5), k=1)
# There are also a number of ways to generate sequences of numbers.
# - `np.arange([start,] stop [[, stride]])` Create a sequence of numbers, similar to `range`
# - `np.linspace(min, max, length)` Create a uniform series of specified `length` between `min` and `max`, inclusive.
# - `np.logspace(minpow, maxpow, length)` Create a uniform series in logspace of specified `length` between `10**minpow` and `10**maxpow`, inclusive.
#
np.arange(10.)
np.arange(2, 10, 2.3)
np.linspace(2, 4, 17)
# You can create arrays of random numbers easily with methods in `np.random`.
#
# * `np.random.rand(d0, d1, ..., d2)`: Create an array of the given shape `d0, ..., dn` and populate it with random samples from a uniform distribution over [0,1).
# * `np.random.randint(low, high=None, size=None)`: Return random integers from `low` (inclusive) to `high` (exclusive). If `high` is None then return integers from [0, `low`). `size` is an int or tuple of ints to give the output shape.
# * `np.randon.randn(d0, d1, ..., d2)`: Create an array of the given shape `d0, ..., dn` and populate it with random samples from the "standard normal" distribution.
# * `np.random.random(size=None)`: Return random floats of `size` (int or tuple of ints) in the interval [0, 1).
np.random.rand(2, 4)
np.random.randint(1, 50, (2, 4))
# ---
# ### *Exercise*
#
# > Create an array of random floats between 0 and 1 that has dimension 5 x 3. Calculate the standard deviation of the columns of the array. Then add to this a `linspace` array of the appropriate size that contains numbers between 10 and 15.
#
# ---
# + tags=[]
# -
# ## Combining and splitting arrays
#
# Generally, arrays can be combined with the `np.concatenate` function. The arguments are a sequence of arrays to join, and the axis along which to join them (default=0).
#
#
#
# +
x = np.random.rand(4, 5, 6)
y = np.random.rand(4, 5, 6)
print(np.concatenate((x, y)).shape)
print(np.concatenate((x, y), axis=0).shape)
print(np.concatenate((x, y), axis=1).shape)
print(np.concatenate((x, y), axis=2).shape)
# -
# There are a number of convenience functions that act like concatenate for specific axes:
#
# - `np.vstack` – vertical stack (stack along axis=0)
# - `np.hstack` – horizontal stack (stack along axis=1)
# - `np.dstack` – depth stack (stack along axis=2)
#
print(np.vstack((x, y)).shape)
print(np.hstack((x, y)).shape)
print(np.dstack((x, y)).shape)
# Likewise, arrays can be split with `np.split` or `np.array_split`. There are also convenience functions to split horizontally, vertically, and with depth.
x = np.random.rand(12, 2, 5)
[a.shape for a in np.split(x, 4, axis=0)]
# ---
# ### *Exercise*
#
# > Create an array, A, of shape (40, 50, 60). The array slices for first ten entries in the axis=1 direction of A should be filled with 1's, for the next ten filled with 2's, and on up to 5's.
#
# > Split it along axis=1 into five sections.
#
# > Concatenate two of these back together along axis 1.
#
# > What is the resulting shape of each array? _[Advanced: can you calculate this on one line?]_
#
# ---
# ## Finding values
#
# There are a number of ways to find values in an array. The simplest is always to create a boolean array, like
x = np.random.rand(5, 5)
print(x)
ind = x > 0.5
print(ind)
# The boolean array can be used as an index to other arrays. Note this will return a 1D array, no matter what dimension the origial arrays are, because there is no way to know what structure the `True` values have.
# +
x = np.random.rand(5, 5)
y = np.sin(x)
y[x > 0.5]
# or, equivalently, as two lines
idx = x > 0.5
y[idx]
# -
# To get the indices of the places where the conditional is true (i.e., the locations of the `True` values in the boolean array), use the `np.where` command.
x = np.random.rand(5, 5)
idx = np.where(x > 0.5)
idx
# Note that `np.where` always returns a tuple of indices for each dimension. This is a little strange for 1D arrays, but is done for consistency across all input values. Often, you will want to explicitly pull out the (single) array of indices from the tuple, like
# + jupyter={"outputs_hidden": true}
x = np.random.rand(10)
idx = np.where(x>0.5)[0]
print(idx)
# -
# _What happens with the [0] is missing behind the call to `where`?_
# ---
# ### *Exercise*
#
# > You can also use these calculated indices, or boolean matrices on the left hand side for assignment.
#
# > Create a 10x10 random array, with values between 0 and 1. Replace all of the numbers smaller than 0.5 with zero.
#
# > Do this first not using `where` and then do it using `where`.
#
# ---
# + jupyter={"outputs_hidden": true}
# -
# ## Array views
#
# The data for an array may be stored in memory using `C` or `FORTRAN` ordered memory. Typically, there is no need to think about this, some details can be found [here](http://docs.scipy.org/doc/numpy-1.10.0/reference/internals.html).
#
# However, it is important to remember that subsets of an array can produce a different 'view' of the array that addresses the same memory as the original array. This can lead to some unexpected behaviors. One way to think of this is that assignment in Python is more like a C-pointer (i.e., a reference to a memory location) than an actual value.
a = np.arange(10.0)
b = a[::2]
print(a)
print(b)
a[4] = -999 # this will modify b as well...
print(a)
print(b)
b[-1] = -888 # this will modify a as well...
print(a)
print(b)
# Normally, this will not be a problem, but if you need to make sure that a subset of an array has it's own memory, make sure you make a `copy` of the array, like
a = np.arange(10.0)
b = a.copy()[::2] # or np.copy(a)
a[4] = -999 # this will NOT modify b now
print(a)
print(b)
# ## Array broadcasting
#
# (Largely taken from [SciPy docs](https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html))
#
# Generally arrays should be the same shape for them to be multiplied together.
a = np.array([1.0, 2.0, 3.0])
b = np.array([2.0, 2.0, 2.0])
a * b
# The term broadcasting describes how `numpy` treats arrays with different shapes during arithmetic operations. Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes. Broadcasting provides a means of vectorizing array operations so that looping occurs in C instead of Python. It does this without making needless copies of data and usually leads to efficient algorithm implementations.
#
# For example, the simplest broadcasting example occurs when an array and a scalar value are combined in an operation:
a = np.array([1.0, 2.0, 3.0])
b = 2.0
a * b
# The result is equivalent to the previous example where b was an array. We can think of the scalar b being stretched during the arithmetic operation into an array with the same shape as a. The new elements in b are simply copies of the original scalar. The stretching analogy is only conceptual. NumPy is smart enough to use the original scalar value without actually making copies, so that broadcasting operations are as memory and computationally efficient as possible.
#
# The code in the second example is more efficient than that in the first because broadcasting moves less memory around during the multiplication (b is a scalar rather than an array).
# ### General Broadcasting Rules
#
# When operating on two arrays, NumPy compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward. Two dimensions are compatible when
#
# 1. they are equal, or
# 1. one of them is 1
#
# If these conditions are not met, a ValueError: frames are not aligned exception is thrown, indicating that the arrays have incompatible shapes. The size of the resulting array is the maximum size along each dimension of the input arrays.
#
# Arrays do not need to have the same number of dimensions. For example, if you have a 256x256x3 array of RGB values, and you want to scale each color in the image by a different value, you can multiply the image by a one-dimensional array with 3 values. Lining up the sizes of the trailing axes of these arrays according to the broadcast rules, shows that they are compatible:
#
# Image (3d array): 256 x 256 x 3
# Scale (1d array): 3
# Result (3d array): 256 x 256 x 3
#
# When either of the dimensions compared is one, the other is used. In other words, dimensions with size 1 are stretched or “copied” to match the other.
#
# In the following example, both the A and B arrays have axes with length one that are expanded to a larger size during the broadcast operation:
#
# A (4d array): 8 x 1 x 6 x 1
# B (3d array): 7 x 1 x 5
# Result (4d array): 8 x 7 x 6 x 5
#
# Here are some more examples:
#
# A (2d array): 5 x 4
# B (1d array): 1
# Result (2d array): 5 x 4
#
# A (2d array): 5 x 4
# B (1d array): 4
# Result (2d array): 5 x 4
#
# A (3d array): 15 x 3 x 5
# B (3d array): 15 x 1 x 5
# Result (3d array): 15 x 3 x 5
#
# A (3d array): 15 x 3 x 5
# B (2d array): 3 x 5
# Result (3d array): 15 x 3 x 5
#
# A (3d array): 15 x 3 x 5
# B (2d array): 3 x 1
# Result (3d array): 15 x 3 x 5
#
# Let's create an example with arrays of random numbers.
# +
A = np.random.rand(15, 3, 5)
B = np.random.rand(3, 1)
print(A.shape, B.shape)
Result = A * B
print(Result.shape)
# -
# Here are examples of shapes that do not broadcast:
#
# A (1d array): 3
# B (1d array): 4 # trailing dimensions do not match
#
# A (2d array): 2 x 1
# B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
# ---
# ### *Exercise*
#
# a = np.random.rand(5, 7, 1, 8)
# b = np.random.rand(8)
# c = np.random.rand(7, 3, 8)
# d = np.random.rand(5, 1, 3, 1)
#
# > Experiment with multiplying combinations of the arrays above together. Try to predict the resulting shape beforehand.
#
# ---
# + jupyter={"outputs_hidden": true}
# -
# Notice that the rules for broadcasting are based on the location of singleton dimensions. Singleton dimensions are implied forward (to the left), but not backward (to the right). So, the first example here works but not the second:
#
# A (2d array): 5 x 4
# B (1d array): 4
# Result (2d array): 5 x 4
#
# A (2d array): 5 x 4
# B (1d array): 5
#
#
# *Compare with large set of examples above. How can the bottom example here be fixed?*
# This problem can be fixed by creating new singleton dimensions in arrays. This can be done by putting `np.newaxis` in the appropriate space when indexing the array. For example:
A = np.random.rand(5, 4)
B = np.random.rand(5)
A*B
print(B.shape)
print(B[:,np.newaxis].shape)
(A*B[:,np.newaxis]).shape
# ---
# ### *Exercise*
#
# > Multiply `b = np.random.rand(8)` and `c = np.random.rand(8, 3, 7)`. What is another way you could accomplish this calculation besides using `newaxis`?
#
# ---
# + jupyter={"outputs_hidden": true}
# -
# ---
# ### *Exercise*
#
# b = np.random.rand(2)
# c = np.random.rand(2, 3)
#
# > Concatenate arrays `b` and `c`. Along which axis would it make sense to concatenate, given the arrays dimensions? Do you need to make any changes to the arrays to get this to work?
#
# ---
# + jupyter={"outputs_hidden": true}
# -
# ## Flattening arrays with `a.flat` and `a.flatten()`
#
# There are two basic ways to turn any array into a 1D array. They are slightly different.
#
# `a.flatten()` returns a copy of an array, in one dimension.
a = np.arange(12).reshape(3, 4)
print(a)
b = a.flatten()
print(b)
# the `flat` attribute on the other hand gives a view of the array in 1D. It looks like an iterator object (like `range` and `zip`). This allows
a.flat[6] = -999
print(a)
# In contrast, this does not work as expected. _WHY?_
a.flatten()[5] = -888
print(a)
# Other operations can be done to the array first. For example, we can take a transpose of the array before we flatten it.
a.T.flat[6] = -998
print(a)
# Here, the `T` attribute (equivalent to the `a.transpose()` method) gives a view of the array transposed (similar to MATLAB's tick notation).
print(a.T)
# ## Masked arrays
#
# Masked arrays are ways to create arrays with missing values. MATLAB™ uses NaNs (NaN stands for 'Not a Number'), and the NaNs are the values of the arrays at those points. This approach also works in Python. Masked arrays are preferred since they retain the masked array values, and also some plotting routines require masked arrays when plotting arrays with missing values. Masked arrays are usually created through some condition, like
# +
arr = np.random.randn(7, 8)
cond = arr > 0.1 # `cond` is True for the random values greater than 0.5
marr = np.ma.masked_where(cond, arr)
print(marr)
# -
marr.mean(axis=0)
# The mask can also be supplied explicity when creating the masked array,
# + jupyter={"outputs_hidden": true}
marr = np.ma.masked_array([1, 2, 3, 4, 5], mask=[True, True, False, False, True])
marr
# -
# ## Importing data
#
# One of the basic commands in `numpy` for loading in data is the `loadtxt` command. There are other ways to do this, such as the [`genfromtxt`](http://docs.scipy.org/doc/numpy-dev/user/basics.io.genfromtxt.html) command, but `loadtxt` is sufficient for most purposes, and is easy to use.
# + jupyter={"outputs_hidden": true} tags=[]
data = np.loadtxt('../data/CTD.txt', comments='*')
data[:,2] # a column of data representing temperature
data
# -
# ---
# ### *Exercise*
#
# > Read in the oceanographic data file '../data/CTD.txt' into an array. You can look at the data file itself to see what variables are stored in each column.
#
# > Using this data, write a function to calculate the linear equation of state. This is an approximation of the density of water, as it depends on salinity, temperature, and some empirical constants. We will use the following form for the linear equation of state:
#
# > $\rho = 1027[1+7.6\times 10^{-4}(S-35) -1.7\times 10^{-4}(T-25)]$
#
# > where $\rho$ is the density, $S$ is the salinity, and $T$ is the temperature.
#
# > This is more free form than the homework, so you should set up all of the associated code to call the function, and write out the function yourself. Don't forget docstrings! For a check, the first value of your density array in order should equal 1021.7519981630001 and the last should equal 1028.0471353619998.
#
# ---
# + jupyter={"outputs_hidden": true}
# -
# ## Polynomial fitting
#
# The basic function for fitting a polynomial (e.g., a straight line) is `np.polyfit(x, y, deg)`. There are a number of other functions that let you add (`np.polyadd`), multiply (`np.polymul`), find zeros (`np.roots`), and do other operations to polynomials.
# +
x = np.random.rand(100)
y = 5 + 3*x + 0.1*np.random.randn(100) # A straight line with some noise
p = np.polyfit(x, y, 1) # fit a straight line (order is 1)
print(p) # The coefficients of the polynomial, with highest order first. (i.e,. [slope, intercept])
# -
# Let's plot it to make sure this makes sense:
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# plot data
plt.plot(x, y, '.')
# plot fitted line
plt.plot(x, p[0]*x + p[1])
plt.legend(('Data', 'Fitted line'))
# -
# Once you have the fit, you can use it to find other useful things, like the value of the fitted line at $x=1$:
np.polyval(p, 1)
# You can also use the `np.polynomial.Polynomial` class to work with polynomials. Note, these define polynomials the opposite way, with the _lowest_ order first. The Polynomial class gives an excellent example of operator overloading, and the flexibility of classes.
# + jupyter={"outputs_hidden": true}
p1 = np.polynomial.Polynomial([5, 3]) # y = 5 + 3 x
p2 = np.polynomial.Polynomial([3, 6, 8, 2]) # y = 3 + 6 x + 8 x**2 + 2 x**3
# -
# You can use the Polynomial object to evaluate the value of the polynomial at various input values:
print('Evaluation')
print('p1(0.0) = ', p1(0))
print('p2(5.0) = ', p2(5))
# We can use this to make a plot to see the function:
x = np.linspace(0,10)
plt.plot(x, p1(x), x, p2(x))
plt.legend(['p1', 'p2'])
# Other things we can do:
print('Roots')
print('Roots of p2 = ', p2.roots())
print()
print('Operations')
print('p1 + p2 = ', p1 + p2)
print('p1 * p2 = ', p1 * p2)
print()
print('Calculus')
print('Derivative of p1', p1.deriv(1))
print('Integral of p2', p2.integ(4, k=[4, 3, 2, 1]))
# ## Vectorization
#
# Vectorization and array broadcasting are two big reasons that `numpy` can be efficient and fast. With these tools, you can avoid writing for loops (which are slow).
#
# The best way to do mathematical operations using `numpy` arrays is to do `vector` operations. That is, mathematical operations are defined to be element by element, and this is done much faster than looping. As a rule of thumb, you should be very concerned if your code has more than one significant `for` loop in the numerical analysis section.
#
# Here is a way to do multiply 2 big arrays using for loops, which is not how you should do it. The sum at the end is included for comparison with the subsequent approach.
# +
a = np.arange(102400.0).reshape(4, 8, 1600, 2) # a 4D array using sequential numbers
b = np.random.rand(4, 8, 1600, 2) # a 4D array using random numbers
li, lj, lk, lm = b.shape # size of b in each dimension
sol = np.zeros(b.shape)
for i in range(li):
for j in range(lj):
for k in range(lk):
for m in range(lm):
sol[i,j,k,m] = a[i,j,k,m]*b[i,j,k,m]
print(sol.sum())
# -
# The better way is to directly multiply the arrays together, taking advantage of C code that Python has in the background.
sol = a * b # element-by-element multiplication. This operation is about as fast as it can be on your computer.
print(sol.sum())
# ## Basic performance evaluation
#
# We can do some very basic perfomance testing using the `%time` special function in jupyter notebooks. Lets use this to examine the time it takes to do a singular value decomposition for different sized matrices.
b = np.random.randn(5000, 2000)
# %time u, s, v = np.linalg.svd(b)
# `%time` runs the line once and gives the time required. However, calculation times vary depending on many things including the numbers involved and the state of your computer at the moment. In this case, the `%timeit` function can be used to perform the test a number of times to get an average calculation time.
# %timeit b = np.random.randn(50, 20); u, s, v = np.linalg.svd(b)
# For statements that are longer than a single line, the `time.time` function can be used.
# +
import time
t_start = time.time()
time.sleep(0.25) # Do nothing for 0.25 seconds
t_stop = time.time()
print('{:6.4f} seconds have passed.'.format(t_stop-t_start))
# -
# ---
# ### *Exercise*
#
# > Earlier, we discussed using array operations instead of looping because it is faster. Let's compare.
#
# > Calculate the time it takes to calculate the $a$ and $b$ arrays with dimensions [4, 8, 1600, 2] by both methods demonstrated: using a series of 4 `for` loops, one for each dimension of the arrays and using array operations. Compare the times by calculating a ratio.
#
# ---
# + jupyter={"outputs_hidden": true}
# -
# ## Linear algebra
#
# One of the key elements of the `numpy` package is the `numpy.linalg` subpackage that contains a number of linear algebra functions that work efficiently on arrays.
# + jupyter={"outputs_hidden": true}
a = np.random.randn(100, 100)
e, v = np.linalg.eig(a)
b = np.random.randn(500, 200)
u, s, v = np.linalg.svd(b)
# -
# Matrix multiplication is done using the `np.dot` function. In this case, matrices do _not_ need to be the same shape, but must follow the rules of matrix multiplication. E.g., the operation dot(<4x5 array>, <5x12 array>) results in a 4x12 array; i.e., the inner dimensions must match (technically last and second-to-last, for arrays with more than two dimensions).
# +
x = np.random.rand(4, 5)
y = np.random.rand(5, 12)
res = np.dot(x, y)
print(res.shape)
# np.dot(y, x) # This gives an error -- order is important.
# -
# ---
# ---
# ### *Exercise*
#
# > Output from a numerical model of the northwestern Gulf of Mexico are saved in a file `../data/model.npz`. Read in this file using `np.load`. Among other things, it contains `h`, the depths within the numerical domain, and `ssh`, the sea surface heights at two time steps. The sea surface height gives the deviation above and below sea level from a reference water level (which changes in time as the water moves), and the depths of the seabed are also given with respect to that reference water level.
#
# > Find the full water column depth, between the seabed and the sea surface, for the two given times.
#
# > You can use as a comparison that at the first time step the [0,0] value of this array should be 3007.6088347392124, and at the second time step the [0,-1] value should be 605.25282427018749. Note that there is a differences between the two time steps though it is generally quite small since it is the difference between time steps in the numerical circulation model.
#
# > Don't forget array conventions, repeated here for convenience:
#
# Generally, you will want to think of arrays as representing dimensions in space and time. The conventional way to think of this is that the dimensions are $(t, z, y, x)$; missing dimensions are omitted. This will help make plotting and analysis easier. Some examples might be:
#
# temp[:, :, :, :] # A 4D array (time, height, latitude, longitude)
# press[:, :] # A 2D array (time, height)
# humid[:, :] # A 2D array (latitude, longitude)
# ---
# + jupyter={"outputs_hidden": true}
| python basics/2_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# # Code Coverage
#
# In the [previous chapter](Fuzzer.ipynb), we introduced _basic fuzzing_ – that is, generating random inputs to test programs. How do we measure the effectiveness of these tests? One way would be to check the number (and seriousness) of bugs found; but if bugs are scarce, we need a _proxy for the likelihood of a test to uncover a bug._ In this chapter, we introduce the concept of *code coverage*, measuring which parts of a program are actually executed during a test run. Measuring such coverage is also crucial for test generators that attempt to cover as much code as possible.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
# **Prerequisites**
#
# * You need some understanding of how a program is executed.
# * You should have learned about basic fuzzing in the [previous chapter](Fuzzer.ipynb).
# + [markdown] slideshow={"slide_type": "skip"}
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from fuzzingbook.Coverage import <identifier>
# ```
#
# and then make use of the following features.
#
#
# This chapter introduces a `Coverage` class allowing you to measure coverage for Python programs. Its typical usage is in conjunction with a `with` clause:
#
# ```python
# >>> with Coverage() as cov:
# >>> cgi_decode("a+b")
# ```
# The `trace()` method returns the coverage as a list of locations covered. Each location comes as a pair (`function name`, `line`).
#
# ```python
# >>> print(cov.trace())
# [('cgi_decode', 9), ('cgi_decode', 10), ('cgi_decode', 11), ('cgi_decode', 12), ('cgi_decode', 8), ('cgi_decode', 15), ('cgi_decode', 16), ('cgi_decode', 17), ('cgi_decode', 18), ('cgi_decode', 19), ('cgi_decode', 21), ('cgi_decode', 30), ('cgi_decode', 31), ('cgi_decode', 17), ('cgi_decode', 18), ('cgi_decode', 19), ('cgi_decode', 20), ('cgi_decode', 31), ('cgi_decode', 17), ('cgi_decode', 18), ('cgi_decode', 19), ('cgi_decode', 21), ('cgi_decode', 30), ('cgi_decode', 31), ('cgi_decode', 17), ('cgi_decode', 32), ('__exit__', 25)]
#
# ```
# The `coverage()` method returns the set of locations executed at least once:
#
# ```python
# >>> print(cov.coverage())
# {('cgi_decode', 11), ('cgi_decode', 17), ('cgi_decode', 30), ('cgi_decode', 20), ('cgi_decode', 10), ('cgi_decode', 19), ('cgi_decode', 32), ('cgi_decode', 16), ('cgi_decode', 12), ('cgi_decode', 9), ('cgi_decode', 15), ('cgi_decode', 31), ('__exit__', 25), ('cgi_decode', 18), ('cgi_decode', 8), ('cgi_decode', 21)}
#
# ```
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## A CGI Decoder
#
# We start by introducing a simple Python function that decodes a CGI-encoded string. CGI encoding is used in URLs (i.e., Web addresses) to encode characters that would be invalid in a URL, such as blanks and certain punctuation:
#
# * Blanks are replaced by `'+'`
# * Other invalid characters are replaced by '`%xx`', where `xx` is the two-digit hexadecimal equivalent.
#
# In CGI encoding, the string `"Hello, world!"` would thus become `"Hello%2c+world%21"` where `2c` and `21` are the hexadecimal equivalents of `','` and `'!'`, respectively.
#
# The function `cgi_decode()` takes such an encoded string and decodes it back to its original form. Our implementation replicates the code from \cite{Pezze2008}. (It even includes its bugs – but we won't reveal them at this point.)
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
def cgi_decode(s):
"""Decode the CGI-encoded string `s`:
* replace "+" by " "
* replace "%xx" by the character with hex number xx.
Return the decoded string. Raise `ValueError` for invalid inputs."""
# Mapping of hex digits to their integer values
hex_values = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
t = ""
i = 0
while i < len(s):
c = s[i]
if c == '+':
t += ' '
elif c == '%':
digit_high, digit_low = s[i + 1], s[i + 2]
i += 2
if digit_high in hex_values and digit_low in hex_values:
v = hex_values[digit_high] * 16 + hex_values[digit_low]
t += chr(v)
else:
raise ValueError("Invalid encoding")
else:
t += c
i += 1
return t
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Here is an example of how `cgi_decode()` works:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cgi_decode("Hello+world")
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# If we want to systematically test `cgi_decode()`, how would we proceed?
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
# The testing literature distinguishes two ways of deriving tests: _Black-box testing_ and _White-box testing._
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Black-Box Testing
#
# The idea of *black-box testing* is to derive tests from the _specification_. In the above case, we thus would have to test `cgi_decode()` by the features specified and documented, including
#
# * testing for correct replacement of `'+'`;
# * testing for correct replacement of `"%xx"`;
# * testing for non-replacement of other characters; and
# * testing for recognition of illegal inputs.
#
# Here are four assertions (tests) that cover these four features. We can see that they all pass:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
assert cgi_decode('+') == ' '
assert cgi_decode('%20') == ' '
assert cgi_decode('abc') == 'abc'
try:
cgi_decode('%?a')
assert False
except ValueError:
pass
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# The advantage of black-box testing is that it finds errors in the _specified_ behavior. It is independent from a given implementation, and thus allows to create test even before implementation. The downside is that _implemented_ behavior typically covers more ground than _specified_ behavior, and thus tests based on specification alone typically do not cover all implementation details.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## White-Box Testing
#
# In contrast to black-box testing, *white-box testing* derives tests from the _implementation_, notably the internal structure. White-Box testing is closely tied to the concept of _covering_ structural features of the code. If a statement in the code is not executed during testing, for instance, this means that an error in this statement cannot be triggered either. White-Box testing thus introduces a number of *coverage criteria* that have to be fulfilled before the test can be said to be sufficient. The most frequently used coverage criteria are
#
# * *Statement coverage* – each statement in the code must be executed by at least one test input.
# * *Branch coverage* – each branch in the code must be taken by at least one test input. (This translates to each `if` and `while` decision once being true, and once being false.)
#
# Besides these, there are far more coverage criteria, including sequences of branches taken, loop iterations taken (zero, one, many), data flows between variable definitions and usages, and many more; \cite{Pezze2008} has a great overview.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Let us consider `cgi_decode()`, above, and reason what we have to do such that each statement of the code is executed at least once. We'd have to cover
#
# * The block following `if c == '+'`
# * The two blocks following `if c == '%'` (one for valid input, one for invalid)
# * The final `else` case for all other characters.
#
# This results in the same conditions as with black-box testing, above; again, the assertions above indeed would cover every statement in the code. Such a correspondence is actually pretty common, since programmers tend to implement different behaviors in different code locations; and thus, covering these locations will lead to test cases that cover the different (specified) behaviors.
#
# The advantage of white-box testing is that it finds errors in _implemented_ behavior. It can be conducted even in cases where the specification does not provide sufficient details; actually, it helps in identifying (and thus specifying) corner cases in the specification. The downside is that it may miss _non-implemented_ behavior: If some specified functionality is missing, white-box testing will not find it.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Tracing Executions
#
# One nice feature of white-box testing is that one can actually automatically assess whether some program feature was covered. To this end, one _instruments_ the execution of the program such that during execution, a special functionality keeps track of which code was executed. After testing, this information can be passed to the programmer, who can then focus on writing tests that cover the yet uncovered code.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# In most programming languages, it is rather difficult to set up programs such that one can trace their execution. Not so in Python. The function `sys.settrace(f)` allows to define a *tracing function* `f()` that is called for each and every line executed. Even better, it gets access to the current function and its name, current variable contents, and more. It is thus an ideal tool for *dynamic analysis* – that is, the analysis of what actually happens during an execution.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# To illustrate how this works, let us again look into a specific execution of `cgi_decode()`.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cgi_decode("a+b")
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# To track how the execution proceeds through `cgi_decode()`, we make use of `sys.settrace()`. First, we define the _tracing function_ that will be called for each line. It has three parameters:
#
# * The `frame` parameter gets you the current _frame_, allowing access to the current location and variables:
# * `frame.f_code` is the currently executed code with `frame.f_code.co_name` being the function name;
# * `frame.f_lineno` holds the current line number; and
# * `frame.f_locals` holds the current local variables and arguments.
# * The `event` parameter is a string with values including `"line"` (a new line has been reached) or `"call"` (a function is being called).
# * The `arg` parameter is an additional _argument_ for some events; for `"return"` events, for instance, `arg` holds the value being returned.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# We use the tracing function for simply reporting the current line executed, which we access through the `frame` argument.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
coverage = []
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
def traceit(frame, event, arg):
if event == "line":
global coverage
function_name = frame.f_code.co_name
lineno = frame.f_lineno
coverage.append(lineno)
return traceit
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# We can switch tracing on and off with `sys.settrace()`:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import sys
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
def cgi_decode_traced(s):
global coverage
coverage = []
sys.settrace(traceit) # Turn on
cgi_decode(s)
sys.settrace(None) # Turn off
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# When we compute `cgi_decode("a+b")`, we can now see how the execution progresses through `cgi_decode()`. After the initialization of `hex_values`, `t`, and `i`, we see that the `while` loop is taken three times – one for every character in the input.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cgi_decode_traced("a+b")
print(coverage)
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Which lines are these, actually? To this end, we get the source code of `cgi_decode_code` and encode it into an array `cgi_decode_lines`, which we will then annotate with coverage information. First, let us get the source code of `cgi_encode`:
# + slideshow={"slide_type": "skip"}
import inspect
# + slideshow={"slide_type": "fragment"}
cgi_decode_code = inspect.getsource(cgi_decode)
# + [markdown] slideshow={"slide_type": "fragment"}
# `cgi_decode_code` is a string holding the source code. We can print it out with Python syntax highlighting:
# + slideshow={"slide_type": "skip"}
from bookutils import print_content, print_file
# + slideshow={"slide_type": "subslide"}
print_content(cgi_decode_code[:300] + "...", ".py")
# + [markdown] slideshow={"slide_type": "fragment"}
# Using `splitlines()`, we split the code into an array of lines, indexed by line number.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
cgi_decode_lines = [""] + cgi_decode_code.splitlines()
# + [markdown] slideshow={"slide_type": "fragment"}
# `cgi_decode_lines[L]` is line L of the source code.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
cgi_decode_lines[1]
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# We see that the first line (9) executed is actually the initialization of `hex_values`...
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cgi_decode_lines[9:13]
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# ... followed by the initialization of `t`:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cgi_decode_lines[15]
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# To see which lines actually have been covered at least once, we can convert `coverage` into a set:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
covered_lines = set(coverage)
print(covered_lines)
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Let us print out the full code, annotating lines not covered with '#':
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
for lineno in range(1, len(cgi_decode_lines)):
if lineno not in covered_lines:
print("# ", end="")
else:
print(" ", end="")
print("%2d " % lineno, end="")
print_content(cgi_decode_lines[lineno], '.py')
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# We see that a number of lines (notably comments) have not been executed, simply because they are not executable. However, we also see that the lines under `if c == '%'` have _not_ been executed yet. If `"a+b"` were our only test case so far, this missing coverage would now encourage us to create another test case that actually covers these lines.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## A Coverage Class
#
# In this book, we will make use of coverage again and again – to _measure_ the effectiveness of different test generation techniques, but also to _guide_ test generation towards code coverage. Our previous implementation with a global `coverage` variable is a bit cumbersome for that. We therefore implement some functionality that will help us measuring coverage easily.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# The key idea of getting coverage is to make use of the Python `with` statement. The general form
#
# ```python
# with OBJECT [as VARIABLE]:
# BODY
# ```
#
# executes `BODY` with `OBJECT` being defined (and stored in `VARIABLE`). The interesting thing is that at the beginning and end of `BODY`, the special methods `OBJECT.__enter__()` and `OBJECT.__exit__()` are automatically invoked; even if `BODY` raises an exception. This allows us to define a `Coverage` object where `Coverage.__enter__()` automatically turns on tracing and `Coverage.__exit__()` automatically turns off tracing again. After tracing, we can make use of special methods to access the coverage. This is what this looks like during usage:
#
# ```python
# with Coverage() as cov:
# function_to_be_traced()
# c = cov.coverage()
# ```
#
# Here, tracing is automatically turned on during `function_to_be_traced()` and turned off again after the `with` block; afterwards, we can access the set of lines executed.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Here's the full implementation with all its bells and whistles. You don't have to get everything; it suffices that you know how to use it:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
class Coverage(object):
# Trace function
def traceit(self, frame, event, arg):
if self.original_trace_function is not None:
self.original_trace_function(frame, event, arg)
if event == "line":
function_name = frame.f_code.co_name
lineno = frame.f_lineno
self._trace.append((function_name, lineno))
return self.traceit
def __init__(self):
self._trace = []
# Start of `with` block
def __enter__(self):
self.original_trace_function = sys.gettrace()
sys.settrace(self.traceit)
return self
# End of `with` block
def __exit__(self, exc_type, exc_value, tb):
sys.settrace(self.original_trace_function)
def trace(self):
"""The list of executed lines, as (function_name, line_number) pairs"""
return self._trace
def coverage(self):
"""The set of executed lines, as (function_name, line_number) pairs"""
return set(self.trace())
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Let us put this to use:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
with Coverage() as cov:
cgi_decode("a+b")
print(cov.coverage())
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# As you can see, the `Coverage()` class not only keeps track of lines executed, but also of function names. This is useful if you have a program that spans multiple files.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Comparing Coverage
#
# Since we represent coverage as a set of executed lines, we can also apply _set operations_ on these. For instance, we can find out which lines are covered by individual test cases, but not others:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
with Coverage() as cov_plus:
cgi_decode("a+b")
with Coverage() as cov_standard:
cgi_decode("abc")
cov_plus.coverage() - cov_standard.coverage()
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# This is the single line in the code that is executed only in the `'a+b'` input.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# We can also compare sets to find out which lines still need to be covered. Let us define `cov_max` as the maximum coverage we can achieve. (Here, we do this by executing the "good" test cases we already have. In practice, one would statically analyze code structure, which we introduce in [the chapter on symbolic testing](SymbolicFuzzer.ipynb).)
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import bookutils
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
with Coverage() as cov_max:
cgi_decode('+')
cgi_decode('%20')
cgi_decode('abc')
try:
cgi_decode('%?a')
except:
pass
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# Then, we can easily see which lines are _not_ yet covered by a test case:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cov_max.coverage() - cov_plus.coverage()
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# Again, these would be the lines handling `"%xx"`, which we have not yet had in the input.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Coverage of Basic Fuzzing
#
# We can now use our coverage tracing to assess the _effectiveness_ of testing methods – in particular, of course, test _generation_ methods. Our challenge is to achieve maximum coverage in `cgi_decode()` just with random inputs. In principle, we should _eventually_ get there, as eventually, we will have produced every possible string in the universe – but exactly how long is this? To this end, let us run just one fuzzing iteration on `cgi_decode()`:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
from Fuzzer import fuzzer
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
sample = fuzzer()
sample
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Here's the invocation and the coverage we achieve. We wrap `cgi_decode()` in a `try...except` block such that we can ignore `ValueError` exceptions raised by illegal `%xx` formats.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
with Coverage() as cov_fuzz:
try:
cgi_decode(sample)
except:
pass
cov_fuzz.coverage()
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Is this already the maximum coverage? Apparently, there are still lines missing:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
cov_max.coverage() - cov_fuzz.coverage()
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Let us try again, increasing coverage over 100 random inputs. We use an array `cumulative_coverage` to store the coverage achieved over time; `cumulative_coverage[0]` is the total number of lines covered after input 1,
# `cumulative_coverage[1]` is the number of lines covered after inputs 1–2, and so on.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
trials = 100
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
def population_coverage(population, function):
cumulative_coverage = []
all_coverage = set()
for s in population:
with Coverage() as cov:
try:
function(s)
except:
pass
all_coverage |= cov.coverage()
cumulative_coverage.append(len(all_coverage))
return all_coverage, cumulative_coverage
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Let us create a hundred inputs to determine how coverage increases:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
def hundred_inputs():
population = []
for i in range(trials):
population.append(fuzzer())
return population
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Here's how the coverage increases with each input:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
all_coverage, cumulative_coverage = population_coverage(
hundred_inputs(), cgi_decode)
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
# %matplotlib inline
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import matplotlib.pyplot as plt
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
plt.plot(cumulative_coverage)
plt.title('Coverage of cgi_decode() with random inputs')
plt.xlabel('# of inputs')
plt.ylabel('lines covered')
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# This is just _one_ run, of course; so let's repeat this a number of times and plot the averages.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
runs = 100
# Create an array with TRIALS elements, all zero
sum_coverage = [0] * trials
for run in range(runs):
all_coverage, coverage = population_coverage(hundred_inputs(), cgi_decode)
assert len(coverage) == trials
for i in range(trials):
sum_coverage[i] += coverage[i]
average_coverage = []
for i in range(trials):
average_coverage.append(sum_coverage[i] / runs)
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
plt.plot(average_coverage)
plt.title('Average coverage of cgi_decode() with random inputs')
plt.xlabel('# of inputs')
plt.ylabel('lines covered')
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# We see that on average, we get full coverage after 40–60 fuzzing inputs.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Getting Coverage from External Programs
#
# Of course, not all the world is programming in Python. The good news is that the problem of obtaining coverage is ubiquitous, and almost every programming language has some facility to measure coverage. Just as an example, let us therefore demonstrate how to obtain coverage for a C program.
# + [markdown] slideshow={"slide_type": "fragment"}
# Our C program (again) implements `cgi_decode`; this time as a program to be executed from the command line:
#
# ```shell
# $ ./cgi_decode 'Hello+World'
# Hello World
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# Here comes the C code, first as a Python string. We start with the usual C includes:
# + slideshow={"slide_type": "fragment"}
cgi_c_code = """
/* CGI decoding as C program */
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
"""
# + [markdown] slideshow={"slide_type": "subslide"}
# Here comes the initialization of `hex_values`:
# + slideshow={"slide_type": "fragment"}
cgi_c_code += r"""
int hex_values[256];
void init_hex_values() {
for (int i = 0; i < sizeof(hex_values) / sizeof(int); i++) {
hex_values[i] = -1;
}
hex_values['0'] = 0; hex_values['1'] = 1; hex_values['2'] = 2; hex_values['3'] = 3;
hex_values['4'] = 4; hex_values['5'] = 5; hex_values['6'] = 6; hex_values['7'] = 7;
hex_values['8'] = 8; hex_values['9'] = 9;
hex_values['a'] = 10; hex_values['b'] = 11; hex_values['c'] = 12; hex_values['d'] = 13;
hex_values['e'] = 14; hex_values['f'] = 15;
hex_values['A'] = 10; hex_values['B'] = 11; hex_values['C'] = 12; hex_values['D'] = 13;
hex_values['E'] = 14; hex_values['F'] = 15;
}
"""
# + [markdown] slideshow={"slide_type": "subslide"}
# Here's the actual implementation of `cgi_decode()`, using pointers for input source (`s`) and output target (`t`):
# + slideshow={"slide_type": "fragment"}
cgi_c_code += r"""
int cgi_decode(char *s, char *t) {
while (*s != '\0') {
if (*s == '+')
*t++ = ' ';
else if (*s == '%') {
int digit_high = *++s;
int digit_low = *++s;
if (hex_values[digit_high] >= 0 && hex_values[digit_low] >= 0) {
*t++ = hex_values[digit_high] * 16 + hex_values[digit_low];
}
else
return -1;
}
else
*t++ = *s;
s++;
}
*t = '\0';
return 0;
}
"""
# + [markdown] slideshow={"slide_type": "subslide"}
# Finally, here's a driver which takes the first argument and invokes `cgi_decode` with it:
# + slideshow={"slide_type": "fragment"}
cgi_c_code += r"""
int main(int argc, char *argv[]) {
init_hex_values();
if (argc >= 2) {
char *s = argv[1];
char *t = malloc(strlen(s) + 1); /* output is at most as long as input */
int ret = cgi_decode(s, t);
printf("%s\n", t);
return ret;
}
else
{
printf("cgi_decode: usage: cgi_decode STRING\n");
return 1;
}
}
"""
# + [markdown] slideshow={"slide_type": "subslide"}
# Let us create the C source code: (Note that the following commands will overwrite the file `cgi_decode.c`, if it already exists in the current working directory. Be aware of this, if you downloaded the notebooks and are working locally.)
# + slideshow={"slide_type": "fragment"}
with open("cgi_decode.c", "w") as f:
f.write(cgi_c_code)
# + [markdown] slideshow={"slide_type": "fragment"}
# And here we have the C code with its syntax highlighted:
# + slideshow={"slide_type": "skip"}
from bookutils import print_file
# + slideshow={"slide_type": "subslide"}
print_file("cgi_decode.c")
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now compile the C code into an executable. The `--coverage` option instructs the C compiler to instrument the code such that at runtime, coverage information will be collected. (The exact options vary from compiler to compiler.)
# + slideshow={"slide_type": "fragment"}
# !cc --coverage -o cgi_decode cgi_decode.c
# + [markdown] slideshow={"slide_type": "subslide"}
# When we now execute the program, coverage information will automatically be collected and stored in auxiliary files:
# + slideshow={"slide_type": "fragment"}
# !./cgi_decode 'Send+mail+to+me%40fuzzingbook.org'
# + [markdown] slideshow={"slide_type": "subslide"}
# The coverage information is collected by the `gcov` program. For every source file given, it produces a new `.gcov` file with coverage information.
# + slideshow={"slide_type": "fragment"}
# !gcov cgi_decode.c
# + [markdown] slideshow={"slide_type": "subslide"}
# In the `.gcov` file, each line is prefixed with the number of times it was called (`-` stands for a non-executable line, `#####` stands for zero) as well as the line number. We can take a look at `cgi_decode()`, for instance, and see that the only code not executed yet is the `return -1` for an illegal input.
# + slideshow={"slide_type": "fragment"}
lines = open('cgi_decode.c.gcov').readlines()
for i in range(30, 50):
print(lines[i], end='')
# + [markdown] slideshow={"slide_type": "subslide"}
# Let us read in this file to obtain a coverage set:
# + slideshow={"slide_type": "fragment"}
def read_gcov_coverage(c_file):
gcov_file = c_file + ".gcov"
coverage = set()
with open(gcov_file) as file:
for line in file.readlines():
elems = line.split(':')
covered = elems[0].strip()
line_number = int(elems[1].strip())
if covered.startswith('-') or covered.startswith('#'):
continue
coverage.add((c_file, line_number))
return coverage
# + slideshow={"slide_type": "subslide"}
coverage = read_gcov_coverage('cgi_decode.c')
# + slideshow={"slide_type": "fragment"}
list(coverage)[:5]
# + [markdown] slideshow={"slide_type": "fragment"}
# With this set, we can now do the same coverage computations as with our Python programs.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Finding Errors with Basic Fuzzing
#
# Given sufficient time, we can indeed cover each and every line within `cgi_decode()`, whatever the programming language would be. This does not mean that they would be error-free, though. Since we do not check the result of `cgi_decode()`, the function could return any value without us checking or noticing. To catch such errors, we would have to set up a *results checker* (commonly called an *oracle*) that would verify test results. In our case, we could compare the C and Python implementations of `cgi_decode()` and see whether both produce the same results.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Where fuzzing is great at, though, is in finding _internal errors_ that can be detected even without checking the result. Actually, if one runs our `fuzzer()` on `cgi_decode()`, one quickly finds such an error, as the following code shows:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
from ExpectError import ExpectError
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
with ExpectError():
for i in range(trials):
try:
s = fuzzer()
cgi_decode(s)
except ValueError:
pass
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# So, it is possible to cause `cgi_decode()` to crash. Why is that? Let's take a look at its input:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
s
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# The problem here is at the end of the string. After a `'%'` character, our implementation will always attempt to access two more (hexadecimal) characters, but if these are not there, we will get an `IndexError` exception.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# This problem is also present in our C variant, which inherits it from the original implementation \cite{Pezze2008}:
#
# ```c
# int digit_high = *++s;
# int digit_low = *++s;
# ```
#
# Here, `s` is a pointer to the character to be read; `++` increments it by one character.
# In the C implementation, the problem is actually much worse. If the `'%'` character is at the end of the string, the above code will first read a terminating character (`'\0'` in C strings) and then the following character, which may be any memory content after the string, and which thus may cause the program to fail uncontrollably. The somewhat good news is that `'\0'` is not a valid hexadecimal character, and thus, the C version will "only" read one character beyond the end of the string.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
# Interestingly enough, none of the manual tests we had designed earlier would trigger this bug. Actually, neither statement nor branch coverage, nor any of the coverage criteria commonly discussed in literature would find it. However, a simple fuzzing run can identify the error with a few runs – _if_ appropriate run-time checks are in place that find such overflows. This definitely calls for more fuzzing!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Synopsis
#
# This chapter introduces a `Coverage` class allowing you to measure coverage for Python programs. Its typical usage is in conjunction with a `with` clause:
# + slideshow={"slide_type": "fragment"}
with Coverage() as cov:
cgi_decode("a+b")
# + [markdown] slideshow={"slide_type": "fragment"}
# The `trace()` method returns the coverage as a list of locations covered. Each location comes as a pair (`function name`, `line`).
# + slideshow={"slide_type": "fragment"}
print(cov.trace())
# + [markdown] slideshow={"slide_type": "fragment"}
# The `coverage()` method returns the set of locations executed at least once:
# + slideshow={"slide_type": "subslide"}
print(cov.coverage())
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Lessons Learned
#
# * Coverage metrics are a simple and fully automated means to approximate how much functionality of a program is actually executed during a test run.
# * A number of coverage metrics exist, the most important ones being statement coverage and branch coverage.
# * In Python, it is very easy to access the program state during execution, including the currently executed code.
# + [markdown] slideshow={"slide_type": "skip"}
# At the end of the day, let's clean up: (Note that the following commands will delete all files in the current working directory that fit the pattern `cgi_decode.*`. Be aware of this, if you downloaded the notebooks and are working locally.)
# + slideshow={"slide_type": "skip"}
import os
import glob
# + slideshow={"slide_type": "skip"}
for file in glob.glob("cgi_decode") + glob.glob("cgi_decode.*"):
os.remove(file)
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
# ## Next Steps
#
# Coverage is not only a tool to _measure_ test effectiveness, but also a great tool to _guide_ test generation towards specific goals – in particular uncovered code. We use coverage to
#
# * [guide _mutations_ of existing inputs towards better coverage in the chapter on mutation fuzzing](MutationFuzzer.ipynb)
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Background
#
# Coverage is a central concept in systematic software testing. For discussions, see the books in the [Introduction to Testing](Intro_Testing.ipynb).
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Exercises
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true
# ### Exercise 1: Fixing cgi_decode
#
# Create an appropriate test to reproduce the `IndexError` discussed above. Fix `cgi_decode()` to prevent the bug. Show that your test (and additional `fuzzer()` runs) no longer expose the bug. Do the same for the C variant.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden"
# **Solution.** Here's a test case:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
with ExpectError():
assert cgi_decode('%') == '%'
# + slideshow={"slide_type": "fragment"} solution2="hidden"
with ExpectError():
assert cgi_decode('%4') == '%4'
# + slideshow={"slide_type": "fragment"} solution2="hidden"
assert cgi_decode('%40') == '@'
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden"
# Here's a fix:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
def fixed_cgi_decode(s):
"""Decode the CGI-encoded string `s`:
* replace "+" by " "
* replace "%xx" by the character with hex number xx.
Return the decoded string. Raise `ValueError` for invalid inputs."""
# Mapping of hex digits to their integer values
hex_values = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
t = ""
i = 0
while i < len(s):
c = s[i]
if c == '+':
t += ' '
elif c == '%' and i + 2 < len(s): # <--- *** FIX ***
digit_high, digit_low = s[i + 1], s[i + 2]
i += 2
if digit_high in hex_values and digit_low in hex_values:
v = hex_values[digit_high] * 16 + hex_values[digit_low]
t += chr(v)
else:
raise ValueError("Invalid encoding")
else:
t += c
i += 1
return t
# + slideshow={"slide_type": "subslide"} solution2="hidden"
assert fixed_cgi_decode('%') == '%'
# + slideshow={"slide_type": "fragment"} solution2="hidden"
assert fixed_cgi_decode('%4') == '%4'
# + slideshow={"slide_type": "fragment"} solution2="hidden"
assert fixed_cgi_decode('%40') == '@'
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden"
# Here's the test:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
for i in range(trials):
try:
s = fuzzer()
fixed_cgi_decode(s)
except ValueError:
pass
# + [markdown] slideshow={"slide_type": "fragment"} solution2="hidden"
# For the C variant, the following will do:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
cgi_c_code = cgi_c_code.replace(
r"if (*s == '%')", # old code
r"if (*s == '%' && s[1] != '\0' && s[2] != '\0')" # new code
)
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden"
# Go back to the above compilation commands and recompile `cgi_decode`.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# ### Exercise 2: Branch Coverage
#
# Besides statement coverage, _branch coverage_ is one of the most frequently used criteria to determine the quality of a test. In a nutshell, branch coverage measures how many different _control decisions_ are made in code. In the statement
#
# ```python
# if CONDITION:
# do_a()
# else:
# do_b()
# ```
#
# for instance, both the cases where `CONDITION` is true (branching to `do_a()`) and where `CONDITION` is false (branching to `do_b()`) have to be covered. This holds for all control statements with a condition (`if`, `while`, etc.).
#
# How is branch coverage different from statement coverage? In the above example, there is actually no difference. In this one, though, there is:
#
# ```python
# if CONDITION:
# do_a()
# something_else()
# ```
#
# Using statement coverage, a single test case where `CONDITION` is true suffices to cover the call to `do_a()`. Using branch coverage, however, we would also have to create a test case where `do_a()` is _not_ invoked.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# Using our `Coverage` infrastructure, we can simulate branch coverage by considering _pairs of subsequent lines executed_. The `trace()` method gives us the list of lines executed one after the other:
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"}
with Coverage() as cov:
cgi_decode("a+b")
trace = cov.trace()
trace[:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true
# #### Part 1: Compute branch coverage
#
# Define a function `branch_coverage()` that takes a trace and returns the set of pairs of subsequent lines in a trace – in the above example, this would be
#
# ```python
# set(
# (('cgi_decode', 9), ('cgi_decode', 10)),
# (('cgi_decode', 10), ('cgi_decode', 11)),
# # more_pairs
# )
# ```
#
# Bonus for advanced Python programmers: Define `BranchCoverage` as a subclass of `Coverage` and make `branch_coverage()` as above a `coverage()` method of `BranchCoverage`.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden"
# **Solution.** Here's a simple definition of `branch_coverage()`:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
def branch_coverage(trace):
coverage = set()
past_line = None
for line in trace:
if past_line is not None:
coverage.add((past_line, line))
past_line = line
return coverage
# + slideshow={"slide_type": "subslide"} solution2="hidden"
branch_coverage(trace)
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden"
# Here's a definition as a class:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
class BranchCoverage(Coverage):
def coverage(self):
"""The set of executed line pairs"""
coverage = set()
past_line = None
for line in self.trace():
if past_line is not None:
coverage.add((past_line, line))
past_line = line
return coverage
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true
# #### Part 2: Comparing statement coverage and branch coverage
#
# Use `branch_coverage()` to repeat the experiments in this chapter with branch coverage rather than statement coverage. Do the manually written test cases cover all branches?
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden"
# **Solution.** Let's repeat the above experiments with `BranchCoverage`:
# + slideshow={"slide_type": "fragment"} solution2="hidden"
with BranchCoverage() as cov:
cgi_decode("a+b")
print(cov.coverage())
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
with BranchCoverage() as cov_plus:
cgi_decode("a+b")
with BranchCoverage() as cov_standard:
cgi_decode("abc")
cov_plus.coverage() - cov_standard.coverage()
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
with BranchCoverage() as cov_max:
cgi_decode('+')
cgi_decode('%20')
cgi_decode('abc')
try:
cgi_decode('%?a')
except:
pass
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
cov_max.coverage() - cov_plus.coverage()
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
sample
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
with BranchCoverage() as cov_fuzz:
try:
cgi_decode(s)
except:
pass
cov_fuzz.coverage()
# + slideshow={"slide_type": "subslide"} solution2="hidden"
cov_max.coverage() - cov_fuzz.coverage()
# + slideshow={"slide_type": "subslide"} solution2="hidden"
def population_branch_coverage(population, function):
cumulative_coverage = []
all_coverage = set()
for s in population:
with BranchCoverage() as cov:
try:
function(s)
except:
pass
all_coverage |= cov.coverage()
cumulative_coverage.append(len(all_coverage))
return all_coverage, cumulative_coverage
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
all_branch_coverage, cumulative_branch_coverage = population_branch_coverage(
hundred_inputs(), cgi_decode)
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution2="hidden"
# %matplotlib inline
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution2="hidden"
import matplotlib.pyplot as plt
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} solution2="hidden"
plt.plot(cumulative_branch_coverage)
plt.title('Branch coverage of cgi_decode() with random inputs')
plt.xlabel('# of inputs')
plt.ylabel('line pairs covered')
# + slideshow={"slide_type": "subslide"} solution2="hidden"
len(cov_max.coverage())
# + slideshow={"slide_type": "fragment"} solution2="hidden"
all_branch_coverage - cov_max.coverage()
# + [markdown] slideshow={"slide_type": "fragment"} solution2="hidden"
# The additional coverage comes from the exception raised via an illegal input (say, `%g`).
# + slideshow={"slide_type": "fragment"} solution2="hidden"
cov_max.coverage() - all_branch_coverage
# + [markdown] slideshow={"slide_type": "fragment"} solution2="hidden"
# This is an artefact coming from the subsequent execution of `cgi_decode()` when computing `cov_max`.
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true
# #### Part 3: Average coverage
#
# Again, repeat the above experiments with branch coverage. Does `fuzzer()` cover all branches, and if so, how many tests does it take on average?
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden"
# **Solution.** We repeat the experiments we ran with line coverage with branch coverage.
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} solution2="hidden"
runs = 100
# Create an array with TRIALS elements, all zero
sum_coverage = [0] * trials
for run in range(runs):
all_branch_coverage, coverage = population_branch_coverage(
hundred_inputs(), cgi_decode)
assert len(coverage) == trials
for i in range(trials):
sum_coverage[i] += coverage[i]
average_coverage = []
for i in range(trials):
average_coverage.append(sum_coverage[i] / runs)
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden"
plt.plot(average_coverage)
plt.title('Average branch coverage of cgi_decode() with random inputs')
plt.xlabel('# of inputs')
plt.ylabel('line pairs covered')
# + [markdown] slideshow={"slide_type": "fragment"} solution2="hidden"
# We see that achieving branch coverage takes longer than statement coverage; it simply is a more difficult criterion to satisfy with random inputs.
| docs/beta/notebooks/Coverage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jgrdn-srn/Linear-Algebra_2nd-Sem/blob/main/Lab5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="F7JH-Oc2O9Sw"
# # Linear Algebra for ChE
# ## Laboratory 5 : Linear Combination and Vector Spaces
# + [markdown] id="aTacEIfvO9S0"
# Now that you have a fundamental knowledge about linear combination, we'll try to visualize it using scientific programming.
# + [markdown] id="5Rk-2NPqO9S1"
# ### Objectives
# At the end of this activity you will be able to:
# 1. Be familiar with representing linear combinations in the 2-dimensional plane.
# 2. Visualize spans using vector fields in Python.
# 3. Perform vector fields operations using scientific programming.
# + [markdown] id="PJYUg0_dO9S1"
# ## Discussion
# + id="ZQYRaKtkO9S1"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="REOM226CO9S2"
# ## Linear Combination
# + [markdown] id="mXycshXOO9S3"
# It is said that a linear combination is the combination of linear scaling and addition of a vector its bases/components
# + [markdown] id="_-14ifj_O9S3"
# We will try to visualize the vectors and their linear combinations by plotting a sample of real number values for the scalars for the vectors. Let's first try the vectors below:
# + [markdown] id="A1p9s0DvO9S4"
# $$dan = \begin{bmatrix} 5\\9 \\\end{bmatrix} , nyx = \begin{bmatrix} 10\\18 \\\end{bmatrix} $$
# + id="V0jYnfAaO9S4"
dan = np.array([5,9])
nyx = np.array([10,18])
# + [markdown] id="ZnweVCl3O9S4"
# #### Span of single vectors
# + [markdown] id="I1is-SWEO9S5"
# As discussed in the lecture, the span of individual vectors can be represented by a line span. Let's take vector $j$ as an example.
# + [markdown] id="1TuPwTkVO9S5"
# $$j = c\cdot \begin{bmatrix} 5\\9 \\\end{bmatrix} $$
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="ttPQ0Ti4O9S5" outputId="050f601a-ecc3-42dc-b451-3d9d853c6284"
c = np.arange(-8,8,0.2)
plt.scatter(c*dan[0],c*dan[1])
plt.xlim(-11,11)
plt.ylim(-11,11)
plt.axhline(y=0, color='b')
plt.axvline(x=0, color='b')
plt.grid()
plt.show()
# + [markdown] id="pf_rVm8FV0Jy"
# $$Y = c\cdot \begin{bmatrix} 7\\9 \\\end{bmatrix} $$
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="OjoN2KdzV5s2" outputId="d3b05783-c035-4b0e-f440-c38d5a8c1505"
c = np.arange(-18,18,0.18)
plt.scatter(c*nyx[0],c*nyx[1])
plt.xlim(-22,22)
plt.ylim(-22,22)
plt.axhline(y=0, color='r')
plt.axvline(x=0, color='r')
plt.grid()
plt.show()
# + [markdown] id="BXNrnuFLO9S6"
# ### Span of a linear combination of vectors
# + [markdown] id="5gYQM__tO9S6"
# So what if we are to plot the span of a linear combination of vectors? We can visualize as a plane on the 2-dimensional coordinate system. Let's take the span of the linear combination below:
# + [markdown] id="7Tc0vPm-O9S6"
# $$S = \begin{Bmatrix} c_1 \cdot\begin{bmatrix} 2\\1 \\\end{bmatrix},
# c_2 \cdot \begin{bmatrix} 0\\-2 \\\end{bmatrix}\end{Bmatrix} $$
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Z8UT7UNUO9S6" outputId="ae2b9163-0029-4ec0-cefb-be6d3e3ff2db"
J = np.array([2,1])
R = np.array([0,-2])
S = np.arange(-15,15,1.5)
c1, c2 = np.meshgrid(S,S)
O = J + R
spanSx = c1*J[0] + c2*R[0]
spanSy = c1*J[1] + c2*R[1]
#plt.scatter(S*J[0],S*J[1])
#plt.scatter(S*R[0],S*R[1])
plt.scatter(spanSx,spanSy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
# + id="REIskplJad3G" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="b46018b5-8bca-4eae-9bda-2d1c3a1a9b47"
ony = np.array([3,4])
jgiordy = np.array([8,8])
K = np.arange(-20,20,3)
c1, c2 = np.meshgrid(K,K)
rhyth = ony + jgiordy
spanKx = c1*ony[0] + c2*jgiordy[0]
spanKy = c1*ony[1] + c2*jgiordy[1]
#plt.scatter(K*ony[0],K*ony[1])
#plt.scatter(K*jgiordy[0],K*jgiordy[1])
plt.scatter(spanKx,spanKy, s=5, alpha=0.75)
plt.axhline(y=0, color='r')
plt.axvline(x=0, color='r')
plt.grid()
plt.show()
# + [markdown] id="N9nhJvXFO9S7"
# Take note that if vectors are seen to be as a 2-dimensional span we can say it has a Rank of 2 or $\mathbb{R}^2$. But if the span of the linear combination of vectors are seen to be like a line, they are said to be <b> linearly dependent </b> and they have a rank of 1 or $\mathbb{R}^1$.
# + [markdown] id="2OVPBMoSO9S7"
# # Activity
# + [markdown] id="iT7GLOf6O9S7"
# ### Task 1
# + [markdown] id="As7P0lcrO9S7"
# Try different linear combinations using different scalar values. In your methodology discuss the different functions that you have used, the linear equation and vector form of the linear combination, and the flowchart for declaring and displaying linear combinations. Please make sure that your flowchart has only few words and not putting the entire code as it is bad practice. In your results, display and discuss the linear combination visualization you made. You should use the cells below for displaying the equation markdows using LaTeX and your code.
# + [markdown] id="8Usvs9WoO9S7"
# $$
# M = \left\{
# \begin{array}\
# ax + 8ay - 9az \\
# 10\hat{i} - 8\hat{j} + 2\hat{k}
# \end{array}
# \right\} \\
# $$
# + [markdown] id="peZ6TepdO9S7"
# $$M = \begin{Bmatrix} c_1 \cdot\begin{bmatrix} 1\\8 \\ -9\\\end{bmatrix},
# c_2 \cdot \begin{bmatrix} 10\\-8\\2 \\\end{bmatrix}\end{Bmatrix} $$
# + id="0zDr8Wt0O9S8" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="6834260b-822b-44cf-f8c8-df009bd5024b"
### TYPE YOU CODE FOR TASK 1 HERE
L = np.array([1,8,-9])
N = np.array([10,-8, 2])
M = np.arange(-20,20,3)
c1, c2 = np.meshgrid(M,M)
O = L + N
spanMx = c1*L[0] + c2*N[0]
spanMy = c1*L[1] + c2*N[1]
#plt.scatter(M*L[0],M*L[1])
#plt.scatter(M*N[0],M*N[1])
plt.scatter(spanKx,spanKy, s=5, alpha=0.75)
plt.axhline(y=0, color='green')
plt.axvline(x=0, color='green')
plt.grid()
plt.show()
# + [markdown] id="FyEYibVfO9S9"
# ## Conclusion guide
# + [markdown] id="smmACaxLO9S9"
# For your conclusion synthesize the concept and application of the laboratory. Briefly discuss what you have learn and achieved in this activity. At the end of your conclusion try to answer the question : "How can you apply the concept of linear combination in engineering or real-life situations?".
| Lab5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
# +
# This is in regards to quantifying the error for continuous
# problems like trying to predict an age instead of a gender.
# -
m = 2
b = 1
y = lambda x: m*x+b
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
# +
# Steps:
# -> Use 'y' to generate 'y_actual'
# -> Use 'y_actual' plus some err to generate 'y_prediction'
# -> Plot the differences.
# -> Walk through various metrics and plot some of them.
# -
def data(x, m=2, b=1, e=None, s=10):
"""
Args:
x: The x value
m: slope
b: The y intercept
e: Error, optional, True will give random error.
s: Level of noise to generate for predictions.
"""
if e is None:
e_i = 0
elif e is True:
e_i = np.random.normal(0, s, len(xs))
else:
e_i = e
return x * m + b + e_i
# now define 'y_hat' and 'y_actual'
from functools import partial
# +
N = 100
xs = np.sort(np.random.rand(N)*100)
y_pred_gen = partial(data, x=xs, e=True)
y_true_gen = partial(data, x=xs)
y_pred = y_pred_gen()
y_true = y_true_gen()
# -
f, ax = plt.subplots(figsize=(7,5))
ax.set_title("Plotting the fit vs the underlying process.")
ax.scatter(xs, y_pred, label=r'$\hat{y}$')
ax.plot(xs, y_true, label=r'$y$')
ax.legend(loc='best')
e_hat = y_pred - y_true
f, ax = plt.subplots(figsize=(7,5))
ax.set_title("Residuals")
ax.hist(e_hat, color='r', alpha=.5, histtype='stepfilled')
# How does it work?
# A metric is just the mean squared error:
# MSE = ERROR((y_true - y_pred)**2)
metrics.mean_squared_error(y_true, y_pred)
# +
# The above code will penalize large errors more than small errors
# It is important to remember that all that's happening here is
# applying what (probably) was the cost function for the model
# on the test data.
# +
# A different option would be the absolute deviation. Take the
# absolute value of the difference.
# MAD = ERROR(|y_true - y_pred|)
# +
# The final option is the R^2, this is 1 minus the ration of the
# squared errors for the overall mean and the fit model. As the
# ratio tends to 0, the R^2 tends to 1.
metrics.r2_score(y_true, y_pred)
# +
# R2 is deceptive though, as it cannot give the clearest sense
# of the accuracy of the model.
# -
| Chapter 5/5.0.8 Regression Model Evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Air passengers
# * 참고
# * https://www.kaggle.com/rakannimer/air-passengers
# * https://www.kaggle.com/abhishekmamidi/air-passengers
# +
import pandas as pd
# %matplotlib inline
# -
df = pd.read_csv("data/AirPassengers.csv")
df.shape
df.head()
df.info()
df.describe()
df.plot()
df.dtypes
# 데이터타입을 변환해서 사용할 수 있도록 datetime 형태로 바꾼다.
df['Month'] = pd.to_datetime(df['Month'])
df.dtypes
indexedData = df.set_index('Month')
indexedData.head()
indexedData.plot()
from statsmodels.tsa.seasonal import seasonal_decompose
decomp = seasonal_decompose(indexedData)
decomp.plot()
# ### 평균과 분산이 일정하지 않고, 계절 성분도 남아있음
# * 분산 안정화: 루트, 역수, 로그 변환
# * 평균 안정화&계절 성분: 차분
| air-passengers/time-series-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import statsmodels.tsa.stattools as stats
import numpy as np
import glob
filesPath = glob.glob('../Time Series/Testes em 100/*.csv')
# +
Granger = pd.DataFrame(columns = ['windowSize (ms)', 'Modo', 'Direction', 'Lag', 'Chi2', 'p'])
timeSeriesM = pd.read_csv(filesPath[0])
timeSeriesS = pd.read_csv(filesPath[1])
Z1 = timeSeriesM[['Pulse X', 'Pulse Y']].to_numpy(dtype = bool)
Z2 = timeSeriesM[['Pulse Y', 'Pulse X']].to_numpy(dtype = bool)
Z3 = timeSeriesS[['Pulse X', 'Pulse Y']].to_numpy(dtype = bool)
Z4 = timeSeriesS[['Pulse Y', 'Pulse X']].to_numpy(dtype = bool)
result1 = stats.grangercausalitytests(Z1, 15, addconst=True, verbose=True)
result2 = stats.grangercausalitytests(Z2, 15, addconst=True, verbose=True)
result3 = stats.grangercausalitytests(Z3, 15, addconst=True, verbose=True)
result4 = stats.grangercausalitytests(Z4, 15, addconst=True, verbose=True)
for i in range(7, 15):
delaySon = i + 1
ssr_ftest = result1[delaySon][0]['ssr_ftest']
ssr_qtest = result1[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'A->B', 'Modo': 'M', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result3[delaySon][0]['ssr_ftest']
ssr_qtest = result3[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'A->B', 'Modo': 'S', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result2[delaySon][0]['ssr_ftest']
ssr_qtest = result2[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'B->A', 'Modo': 'M', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result4[delaySon][0]['ssr_ftest']
ssr_qtest = result4[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'B->A', 'Modo': 'S', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
Granger1 = Granger.copy()
# +
Granger = pd.DataFrame(columns = ['windowSize (ms)', 'Modo', 'Direction', 'Lag', 'Chi2', 'p'])
timeSeriesM = pd.read_csv(filesPath[2])
timeSeriesS = pd.read_csv(filesPath[3])
Z1 = timeSeriesM[['Pulse X', 'Pulse Y']].to_numpy(dtype = bool)
Z2 = timeSeriesM[['Pulse Y', 'Pulse X']].to_numpy(dtype = bool)
Z3 = timeSeriesS[['Pulse X', 'Pulse Y']].to_numpy(dtype = bool)
Z4 = timeSeriesS[['Pulse Y', 'Pulse X']].to_numpy(dtype = bool)
result1 = stats.grangercausalitytests(Z1, 15, addconst=True, verbose=True)
result2 = stats.grangercausalitytests(Z2, 15, addconst=True, verbose=True)
result3 = stats.grangercausalitytests(Z3, 15, addconst=True, verbose=True)
result4 = stats.grangercausalitytests(Z4, 15, addconst=True, verbose=True)
for i in range(7, 15):
delaySon = i + 1
ssr_ftest = result1[delaySon][0]['ssr_ftest']
ssr_qtest = result1[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'C->D', 'Modo': 'M', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result3[delaySon][0]['ssr_ftest']
ssr_qtest = result3[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'C->D', 'Modo': 'S', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result2[delaySon][0]['ssr_ftest']
ssr_qtest = result2[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'D->C', 'Modo': 'M', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result4[delaySon][0]['ssr_ftest']
ssr_qtest = result4[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'D->C', 'Modo': 'S', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
Granger2 = Granger.copy()
# +
Granger = pd.DataFrame(columns = ['windowSize (ms)', 'Modo', 'Direction', 'Lag', 'Chi2', 'p'])
timeSeriesM = pd.read_csv(filesPath[4])
timeSeriesS = pd.read_csv(filesPath[5])
Z1 = timeSeriesM[['Pulse X', 'Pulse Y']].to_numpy(dtype = bool)
Z2 = timeSeriesM[['Pulse Y', 'Pulse X']].to_numpy(dtype = bool)
Z3 = timeSeriesS[['Pulse X', 'Pulse Y']].to_numpy(dtype = bool)
Z4 = timeSeriesS[['Pulse Y', 'Pulse X']].to_numpy(dtype = bool)
result1 = stats.grangercausalitytests(Z1, 15, addconst=True, verbose=True)
result2 = stats.grangercausalitytests(Z2, 15, addconst=True, verbose=True)
result3 = stats.grangercausalitytests(Z3, 15, addconst=True, verbose=True)
result4 = stats.grangercausalitytests(Z4, 15, addconst=True, verbose=True)
for i in range(7, 15):
delaySon = i + 1
ssr_ftest = result1[delaySon][0]['ssr_ftest']
ssr_qtest = result1[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'E->F', 'Modo': 'M', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result3[delaySon][0]['ssr_ftest']
ssr_qtest = result3[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'E->F', 'Modo': 'S', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result2[delaySon][0]['ssr_ftest']
ssr_qtest = result2[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'F->E', 'Modo': 'M', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
ssr_ftest = result4[delaySon][0]['ssr_ftest']
ssr_qtest = result4[delaySon][0]['ssr_chi2test']
f = ssr_ftest[0]
p = ssr_ftest[1]
q = ssr_qtest[0]
Granger = Granger.append([{'Direction' : 'F->E', 'Modo': 'S', 'windowSize (ms)' : 100, 'Lag' : delaySon, 'Chi2': q, 'p' : p}])
Granger3 = Granger.copy()
# -
Granger1
Granger2
# +
Granger3
A->B
C->
# -
pd.read_csv(filesPath[0])
pd.read_csv(filesPath[2])
pd.read_csv(filesPath[4])
| code/Python/Analysis2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%time
import pandas as pd
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# %%time
import pandas as pd
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from imblearn.over_sampling import SMOTE,RandomOverSampler
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from itertools import combinations
from mlxtend.classifier import StackingClassifier
from sklearn import model_selection
df=pd.read_csv('60s_window_wrist_chest.csv',index_col=0)
df['label'].value_counts()[0]
print(df['label'].value_counts()[1]/(len(list(df['label']))))
# +
df=pd.read_csv('60s_window_wrist_chest.csv',index_col=0)
features=df.columns.tolist()
removed = ['label']
for rem in removed:
features.remove(rem)
features_with_sub=[]
features_with_sub[:]=features
removed = ['subject']
for rem in removed:
features.remove(rem)
feature=features
print(len(feature))
len(features_with_sub)
sm = SMOTE(random_state=2)
X, y= sm.fit_sample(df[features_with_sub], df['label'])
df_new=pd.concat([pd.DataFrame(X,columns=features_with_sub),pd.DataFrame(y,columns=['label'])],axis=1)
for i in range (len(list(df_new['subject']))):
df_new['subject'][i] = min([2,3,4,5,6,7,8,9,10,11,13,14,15,16,17], key=lambda x:abs(x-df_new['subject'][i]))
df_new['subject']=df_new['subject'].astype(int)
p_d=pd.read_csv('personal_detail.csv',index_col=0)
df_new_1=df_new.merge(p_d,on='subject')
features=df_new_1.columns.tolist()
features
removed = ['label']
for rem in removed:
features.remove(rem)
features_with_sub=[]
features_with_sub[:]=features
removed = ['subject']
for rem in removed:
features.remove(rem)
feature=features
print(len(feature))
len(features_with_sub)
# -
train=df_new_1[df_new_1['subject']<=9]
test=df_new_1[df_new_1['subject']>9]
et = ExtraTreesClassifier(n_estimators=100,n_jobs=10,random_state=56 ,)
scaler = Normalizer()
scaled_data_train = scaler.fit_transform(train[feature])
scaled_data_test = scaler.transform(test[feature])
et.fit(scaled_data_train,train['label'])
y_pred=et.predict(scaled_data_test)
print(classification_report(test['label'],y_pred))
#activation : {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}
#solver : {‘lbfgs’, ‘sgd’, ‘adam’}
model = MLPClassifier(verbose=2,max_iter=1000,hidden_layer_sizes=(500,200),activation='identity',solver='lbfgs')
model.fit(scaled_data_train,train['label'])
y_pred=model.predict(scaled_data_test)
print(classification_report(test['label'],y_pred))
| User Independence Analysis/ipynb/Random Modelling for 4 class.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="vHLp7VeYByB4" executionInfo={"status": "ok", "timestamp": 1605766350933, "user_tz": -540, "elapsed": 5930, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="404e6a24-01f3-4332-bae8-b423c4b2ab80"
from numpy import array
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, SimpleRNN
# 1. Data
input_string="hihello"
char_set=sorted(set(input_string))
encoder={k:v for v,k in enumerate(char_set)}
decoder={v:k for v,k in enumerate(char_set)}
print('# Encoder')
print(encoder)
encoded_string= [encoder[c] for c in input_string ]
print('# Encoded string')
print(encoded_string)
string_width=len(input_string)
input_w=4
output_w=string_width-input_w
x=[]
y=[]
for i in range(output_w):
x.append( encoded_string[i:i+input_w] )
y.append( encoded_string[input_w+i] )
x=array(x)
y=array(y)
x = x.reshape((x.shape[0], x.shape[1], 1))
print(x,y)
# 2. Keras Model
model = Sequential()
# model.add(LSTM(10, activation = 'relu', input_shape=(input_w,1)))
model.add(SimpleRNN(10, activation = 'relu', input_shape=(input_w,1)))
# DENSE와 사용법 동일하나 input_shape=(열, 몇개씩잘라작업)
model.add(Dense(5))
model.add(Dense(1))
model.summary()
# 3. Training
model.compile(optimizer='adam', loss='mse')
model.fit(x, y, epochs=100, batch_size=1)
# 4.Test
test_string="hihe"
x_input = array([encoder[c] for c in test_string ] )
x_input = x_input.reshape((1,input_w,1))
print(f"# test string\n {test_string}")
yhat = model.predict(x_input)
out=decoder[round(yhat[0][0])]
print(f"# test output\n {out}")
# + id="7rcDKEffB1yj"
| 8.Long_Short-Term_Memory/SimpleRNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Что сегодня сделаем
# Рекомендательный алгоритм сопутствующих товаров для пользователя
# # Numpy и матричная алгебра
import numpy as np
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
x
# у переменной x тип, отличный от list
type(x)
# дополнительные методы
x.mean()
# ### Поэлементные операции
5 * 5
5 + 5
'5' + '5'
[1, 2] + [3, 4]
x, y
x + y
x - y
x * y
x / y
x, y
# элементы массива x возводятся в соответствующие степени элементов массива y
x ** y
10 = 3*3 + 1
# +
# остаток от деления
# обратите внимание, что для удобства данного примера x и y идут в другом порядке
y % x
# -
# ### Автоматическое наполнение массивов
# +
# аналог range
np.arange(0, 10)
# -
list(range(10))
for i in range(10**20):
print(i)
break
# создать массив из 10 чисел, заполненных единицами
np.full(10, 1)
# создать матрицу 2х5 из нулей
np.full([2, 5], 0)
[1, 2, 3] + [4, 5, 6]
# +
# объединить массивы в один
a = np.arange(10)
b = np.arange(10, 20)
# обратите внимание на двойные скобки
np.concatenate((a, b))
# +
# перемешать элементы массива
import random
nums = np.arange(10)
# random.shuffle(nums)
nums
# -
# ### Изменение размерности
x = np.arange( 0, 10 )
x
# +
# 10 - количество строк
x.shape
# +
# первый аргумент - количество строк
# второй - столбцов
x.reshape(5, 2)
# -
x.reshape(3, 3)
# +
# транспонирование матриц
np.array(
[
[1, 2],
[3, 4],
[5, 6]
]
).T
# -
# склеивание набора списков
[1, 2, 3] + [4, 5, 6]
# +
# склеивание массива из списков
x = np.array( [ [1, 2, 3], [4, 5, 6] ] )
x.ravel()
# +
# можно и так
x.reshape(6)
# +
# результат разный, если добавить 1 в качестве количества строк
x.reshape(1, 6)
# -
# ### Создание матриц
# +
# нулевой вектор заданной размерности
np.zeros(10)
# +
# единичная матрица
np.eye(5)
# +
# более общий случай диагональной матрицы
np.diag(np.arange(2, 50, 15), k=1)
# +
# матрица со случайными значениями
np.random.random(10)
# +
# более универсальный вариант создания матриц
# ?np.linspace
# -
np.linspace(5, 25, 30)
# ### Более сложные распределения
#
# https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.gaussian.html
# # Скалярное произведение векторов
# \begin{equation*}
# \LARGE
# \vec{a} \dot{} \vec{b} = |\vec{a}| \space |\vec{b}| \space cos(\vec{a}, \vec{b})
# \end{equation*}
# Пусть
# \begin{equation*}
# \LARGE
# \vec{a} = (a_1, a_2, a_3) \\
# \LARGE
# \vec{b} = (b_1, b_2, b_3)
# \end{equation*}
# Тогда скалярное произведение векторов равно
# \begin{equation*}
# \LARGE
# \vec{a} \dot{} \vec{b} = a_1 b_1 + a_2 b_2 + a_3 b_3
# \end{equation*}
a = np.array( [4, 3] )
b = np.array( [2, 1] )
a, b
4*2 + 3*1
# Пример расчета скалярного произведения векторов
np.dot( a, b )
# Можно посчитать и таким образом
# +
# первый шаг
for pair in zip( a, b ):
print( pair )
# +
# второй шаг
[ pair[0] * pair[1] for pair in zip( a, b ) ]
# +
# итоговый результат
sum( [ pair[0] * pair[1] for pair in zip( a, b ) ] )
# -
# # Косинусное сходство между векторами
# \begin{equation*}
# \LARGE
# cos(\vec{a}, \vec{b}) = \frac{\vec{a} \dot{} \vec{b}}{|\vec{a}| \space |\vec{b}|}
# \end{equation*}
import matplotlib.pyplot as plt
a, b
# +
ax = plt.axes()
plt.xlim( [0, 5] )
plt.ylim( [0, 4] )
ax.arrow( 0, 0, a[0], a[1], head_width=0.1, head_length=0.2, fc='k', ec='k' )
ax.arrow( 0, 0, b[0], b[1], head_width=0.1, head_length=0.2, fc='k', ec='k' )
plt.show()
# -
def cosine( a, b ):
"""
Подсчет косинуса угла между векторами a, b по их координатам
"""
# длины векторов
aLength = np.linalg.norm( a )
bLength = np.linalg.norm( b )
return np.dot( a, b ) / ( aLength * bLength )
# +
# длины векторов можно было посчитать и так
aLength = np.sqrt( (a*a).sum() )
bLength = np.sqrt( (b*b).sum() )
# -
cosine( a, b )
# +
# угол между векторами в радианах
np.arccos( cosine( a, b ) )
# +
# угол между векторами в градусах
np.arccos( cosine( a, b ) ) * 360 / 2 / np.pi
# -
# ## Задача 4 домашнего задания
# Имеется матрица покупок в интернет-магазине. Столбец А - ID пользователя. Остальные столбцы - количество покупок категорий товаров этим пользователем:
from IPython.display import Image
Image("user_matrix.JPG")
# Матрица в виде numpy array
users_stats = np.array(
[
[2, 1, 0, 0, 0, 0],
[1, 1, 2, 1, 0, 0],
[2, 0, 1, 0, 0, 0],
[1, 1, 2, 1, 0, 1],
[0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 5],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 3],
[1, 0, 0, 2, 1, 4]
],
np.int32
)
# На сайт заходит очередной посетитель, о покупках которого известно следующее:
next_user_stats = np.array([0, 1, 2, 0, 0, 0])
# Найдите самого похожего пользователя. Т. е. посчитайте косинусное сходство между этим пользователем и всеми пользователями из массива user_stats
# # Перемножение матриц
# **Определение**
#
# Пусть даны две матрицы a и b размером l x m и m x n соответственно. l - количество строк, n - количество столбцов.
#
# \begin{equation*}
# \LARGE
# a =
# \begin{bmatrix}
# a_{11} & a_{12} \dots a_{1m} \\
# a_{21} & a_{22} \dots a_{2m} \\
# \vdots & \vdots \dots \vdots \\
# a_{l1} & a_{l2} \dots a_{lm}
# \end{bmatrix}
# \end{equation*}
#
# <br>
#
# \begin{equation*}
# \LARGE
# b =
# \begin{bmatrix}
# b_{11} & b_{12} \dots b_{1n} \\
# b_{21} & b_{22} \dots b_{2n} \\
# \vdots & \vdots \dots \vdots \\
# b_{m1} & b_{m2} \dots b_{mn}
# \end{bmatrix}
# \end{equation*}
# Тогда произведением матриц a и b будет матрица c размерностью l x n:
#
# \begin{equation*}
# \LARGE
# c =
# \begin{bmatrix}
# c_{11} & c_{12} \dots c_{1n} \\
# c_{21} & c_{22} \dots c_{2n} \\
# \vdots & \vdots \dots \vdots \\
# c_{l1} & c_{l2} \dots c_{ln}
# \end{bmatrix}
# \end{equation*}
#
# <br>
#
# \begin{equation*}
# \LARGE
# c_{ij} = \sum_{k=1}^m a_{ik} b_{kj}
# \end{equation*}
# <img src = 'https://wikimedia.org/api/rest_v1/media/math/render/svg/1f96c71f0a99eac3ee872e7baf22e84324d7b4c9' style="width: 80%"></img>
a = np.array(
[
[1, 2],
[3, 4]
]
)
b = np.array(
[
[5, 6],
[7, 8]
]
)
c = np.dot( a, b )
c
# В numpy есть специальный тип matrix, который отличается от ndarray
a * b
aMatrix = np.matrix( [ [1, 2], [3, 4] ] )
bMatrix = np.matrix( [ [5, 6], [7, 8] ] )
type(aMatrix)
aMatrix * bMatrix
type( aMatrix ), type( a )
np.mat( a ) * np.mat( b )
# # Линейные уравнения
# Дана система линейных уравнений
#
# \begin{equation*}
# \LARGE
# x + 3*y = 9 \\
# \LARGE
# 2*x - 4*y = 8
# \end{equation*}
# +
# коэффициенты при переменных в левой части уравнения
a = np.array( [ [1, 3], [2, -4] ] )
# +
# значения в правой части уравнения
b = np.array( [9, 8] )
# +
# решение
from numpy import linalg
# -
linalg.solve(a, b)
# +
# проверка верности
np.allclose( np.dot(a, linalg.solve(a, b)), b )
| numpy_basics/Python_9_Numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="padding: -5px;
# text-align: center;
# color: white;
# font-size: 15px;">
# <img src="images/banner.jpg" alt="MINE-Seminario de programación" style="width:100%;">
# <h1 style="
# position: absolute;
# top: 5%;
# left: 50%;">Caso de Estudio</h1>
# </div>
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Exploración-datos" data-toc-modified-id="Exploración-datos-1"><span class="toc-item-num">1 </span>Exploración datos</a></span></li><li><span><a href="#Nivel-de-obesidad" data-toc-modified-id="Nivel-de-obesidad-2"><span class="toc-item-num">2 </span>Nivel de obesidad</a></span></li><li><span><a href="#Exploración-Previa---Solo-cuantitativa" data-toc-modified-id="Exploración-Previa---Solo-cuantitativa-3"><span class="toc-item-num">3 </span>Exploración Previa - Solo cuantitativa</a></span></li><li><span><a href="#Exploración-Total" data-toc-modified-id="Exploración-Total-4"><span class="toc-item-num">4 </span>Exploración Total</a></span></li><li><span><a href="#Aprendizaje-Supervisado" data-toc-modified-id="Aprendizaje-Supervisado-5"><span class="toc-item-num">5 </span>Aprendizaje Supervisado</a></span></li><li><span><a href="#Infome-Final" data-toc-modified-id="Infome-Final-6"><span class="toc-item-num">6 </span>Infome Final</a></span></li></ul></div>
# -
# # Nivel de obesidad
# En el siguiente enlace encontrarán una base de datos que permiten estimarlos niveles de obesidad en individuos de los países de México, Perú y Colombia, con base en sus hábitos alimenticios y condición física. Los datos contienen 17 atributos y 2111 registros, además, hay una variable `NObesidad` (Nivel de obesidad), que permite clasificar los datos utilizando los valores de Peso Insuficiente, Peso Normal, Nivel de Sobrepeso I, Nivel de Sobrepeso II, Obesidad Tipo I , Obesidad tipo II y Obesidad tipo III. El 77% de los datos se generaron sintéticamente utilizando la herramienta Weka y el filtro SMOTE, el 23% de los datos se recopilaron directamente de los usuarios a través de una plataforma web, la información acerca de los datos y los paper relevantes que se han creado a partir de este ejercicio lo pueden encontrar en:
#
# [ Estimation of obesity levels based on eating habits and physical condition Data Set ](https://archive.ics.uci.edu/ml/datasets/Estimation+of+obesity+levels+based+on+eating+habits+and+physical+condition+)
#
#
# Este ejercicio guiado pretende que ustedes hagan una exploración completa a la base, identifiquen grupos en la población y desarrolen modelos que permitan determinar el nivel de obesidad para cualquier individuo.
#
# Después de descargar, cargamos la base:
import pandas as pd
import matplotlib.pyplot as plt
DataO=pd.read_csv('../data/ObesityDataSet_raw_and_data_sinthetic.csv')
# Hagan una exploración manual de la base, identifiquen las variables y los tipos de variables a las que nos enfretamos y, con su grupo, plantee unas hipótesis frente a las tareas planteadas. En lo que sigue se sugeriran algunos puntos a desarrollar en este trabajo.
# ## Exploración de datos
#
# La idea en esta sección es hacer un EDA sobre la base. Es muy importante identificar datos atípicos, nulos y relaciones entre variables (colinealidad, alta correlación, etc.).
# ## Nivel de obesidad
#
# Es claro que la altura y el peso son determinantes en el Nivel de obesidad, como lo muestra el siguiente gráfico:
import seaborn as sns
sns.scatterplot(data=DataO,x='Weight',y='Height',hue='NObeyesdad')
# Extraiga esas columnas de la base y haga los análisis a continuación sin tener en cuenta esas variables. Responda:
#
# **¿Tiene sentido haber extraido esas variables? ¿Cómo afectará a mi investigación no tenerlas en cuenta?**
# ## Exploración Previa - Solo cuantitativa
# 1. Tomen las variables cuantitativas y determinen agrupamientos en la base de datos. No olviden fijar una semilla para la revisión del trabajo. No duden en usar métodos que permitan determinar un valor adecuado de agrupamientos.
# 2. Es importante generar visualizaciones adecuadas de los datos luego tendrán que poner cada registro en un plano cartesiano. Usen reducciones de dimensionalidad.
# 3. Concluyan a partir de los resultados.
# ## Exploración Total
#
# 1. Crearemos Clusters usando también las variables cualitativas, el ejercicio consiste en poder medir distancias incluso entre variables/componentes cualitativas. Lo haremos con la distancia de Gower (qué es y como se calcula en Python), con esta distancia se monta una matriz de distancias entre los datos.
# **Solución:**
#
# La distancia de Gower se puede utilizar para medir qué tan diferentes son dos registros. Los registros pueden contener una combinación de datos lógicos, categóricos, numéricos o de texto. La distancia es siempre un número entre 0 (idéntico) y 1 (máximamente diferente).
#
# Las métricas utilizadas para cada tipo de datos se describen a continuación:
#
# - Cuantitativo (intervalo): distancia de Manhattan normalizada por rango.
#
# - Ordinal: la variable se clasifica primero, luego la distancia de Manhattan se usa con un ajuste especial para los empates.
#
# - Nominal: las variables de k categorías se convierten primero en k columnas binarias y luego se usa el coeficiente de Dice.
#
#
# En esencia, la distancia de Gower es calculada como el promedio de disimilaridades parciales entre individuos. De manera general, este coeficiente se calcula como sigue:
#
# $$D_{Gow}(x_1,x_2)=1-\frac{1}{p}\sum\limits_{j=1}^p s_j(x_1,x_2).$$
#
# Aquí $s_j(x_1,x_2)$ se conoce como la similaridad parcial de $j$-ésimo descriptor.
#
#
# Para descriptores cuantitativos tenemos:
#
# $$s_j(x_1,x_2)=1-\cfrac{|y_{1j}-y_{2j}|}{R_j}$$
#
# Para descriptores cualitativos se usa el coeficiente de Sorensen-Dice, se calcula de la siguiente forma:
#
# $$s_j(x_1,x_2)=1-\frac{NNEQ}{NTT+NNZ}$$
#
# donde (tomado de scikit-learn):
#
# ***
#
# N : number of dimensions
#
# NTT : number of dims in which both values are True
#
# NTF : number of dims in which the first value is True, second is False
#
# NFT : number of dims in which the first value is False, second is True
#
# NFF : number of dims in which both values are False
#
# NNEQ : number of non-equal dimensions, NNEQ = NTF + NFT
#
# NNZ : number of nonzero dimensions, NNZ = NTF + NFT + NTT
# ***
#
#
# |reg|Color|orientacion|
# |:--:|:--:|:--:|
# |1|Amarillo|izquierda|
# |2|Amarillo|derecha|
#
# |reg|Amarillo|Azul|Rojo|Derecha|izquierda|
# |:--:|:--:|:--:|:--:|:--:|:-:|
# |1|1|0|0|0|1|
# |2|1|0|0|1|0|
#
# Ahora calculemos la distancia de Gower:
#
#
#
# +
# #!pip install gower
# -
columns=['Gender', 'Age', 'family_history_with_overweight', 'FAVC', 'FCVC',
'NCP', 'CAEC', 'SMOKE', 'CH2O', 'SCC', 'FAF', 'TUE', 'CALC', 'MTRANS']
DataO[columns]
import gower as gw
distance_matrix=gw.gower_matrix(DataO[columns])
distance_matrix
# 2. Es importante generar visualizaciones adecuadas de los datos luego tendrán que poner cada registro en un plano cartesiano. Usar reducciones de dimensionalidad.
# 3. Concluyan a partir de los resultados.
# ## Aprendizaje Supervisado
#
# En esta sección crearemos modelos que permitan responder la pregunta inicial:
# 1. Particionan la base en 2 (Entrenamiento y testeo) (80/20) de nuevo con semilla fija.
# 2. Usando la base de entrenamiento crearán algunos modelos de clasificación con los siguientes métodos:
#
# * Regresión Logística
# * Clasificador Naive Bayes
# * Arboles de decisión
# * Random Forest
# * SVM
# 3. Usando la base de testeo validar cada modelo (Uso matrices de confusión, estadísticos apropiados, curvas ROC, AUC...)
# 4. De acuerdo con los resultados identifico problemas como sobre ajuste y mal desempeño del modelo.
# 5. Si lo anterior resulta cierto aplico técnicas de regularización para estimar un modelo de clasificación adecuado.
# 6. Visualizamos el modelo (si es posible).
#
# ## Infome Final
#
# Elaboramos un informe como cuaderno de jupyter que responda la pregunta planteada en esta investigación ¿Como afectan la condición física y los hábitos alimenticios a la obesidad?
| Cuadernos/Caso Obesidad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# # Air Pressure System Failure Challenge
# data from: [here](https://archive.ics.uci.edu/ml/datasets/APS+Failure+at+Scania+Trucks)
#
# scores to beat:
#
# <NAME> and <NAME> | 9920 | 542 | 9
#
# <NAME>, <NAME> and <NAME> | 10900 | 490 | 12
#
# <NAME>, <NAME>, <NAME> and <NAME> | 11480 | 398 | 15
# ## The Problem
#
# The Air Pressure system (APS) which generates pressurised air that are utilized in various functions in a truck, such as braking and gear changes. Malfunction of the APS is costly as it renders the truck stranded immediatly, leading to costly delays and towing operations.
#
# Scheduled preventative maintainance is historically the main tool to combat this. It is however costly and impractical to sample the entire fleet to prevent malfunctions. With the array of sensor data availabe to assess the performance and operating conditions of the APS however, we can attempt to predict imminent systems failure leading to more efficient resource allocation.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
import joblib
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids, RandomUnderSampler
from imblearn.over_sampling import SMOTE, RandomOverSampler
from sklearn.impute import KNNImputer, SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import make_scorer, confusion_matrix, plot_confusion_matrix, classification_report
# -
df_train = pd.read_csv("./data/aps_failure_training_set.csv")
df_test = pd.read_csv("./data/aps_failure_test_set.csv")
df_train.head()
# +
# set na to proper NaNs for later imputation
df_train.replace("na", np.nan, inplace=True)
df_test.replace("na", np.nan, inplace=True)
# encode class
df_train['class'].replace(['pos','neg'],[1,0], inplace=True)
df_test['class'].replace(['pos','neg'],[1,0], inplace=True)
# change dtypes to numbers
df_train = df_train.astype(np.float64)
df_test = df_test.astype(np.float64)
# drop duplicates
# df_train = df_train.drop_duplicates()
# df_train = df_train.T.drop_duplicates().T
# -
# check for class imbalance
df_train["class"].value_counts(normalize=True)
# checking for nullity
missingno.matrix(df_train)
# +
# # drop any column with over 50 % values missing
# # percent_rows_not_nan = 0.5
# # mask = (pd.isna(df_train).sum() <= len(df_train) * percent_rows_not_nan).values
# # df_train_drop = df_train.loc[:, mask].copy()
# # print(f"{np.around((1 - len(df_train_drop.columns) / len(df_train.columns)) * 100, 2)} % of columns dropped.")
# # drop any row with over 50 % values missing
# # percent_cols_not_nan = 0.5
# # tresh_columns = np.floor(len(df_train.columns) * percent_cols_not_nan).astype(int)
# df_train_drop.dropna(thresh=tresh_columns, inplace=True)
# print(f"{np.around((1 - len(df_train_drop) / len(df_train)) * 100, 2)} % of rows dropped.")
# # reset index
# df_train.reset_index(drop=True, inplace=True)
# +
def evaluator(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
return (cm[0, 1] * 10 + cm[1, 0] * 500) # FP + FN
scorer = make_scorer(evaluator, greater_is_better=False)
# +
def evaluator_norm(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
return (cm[0, 1] * 10 + cm[1, 0] * 500) / len(y_true) # FP + FN
scorer_norm = make_scorer(evaluator_norm, greater_is_better=False)
# +
target_var = "class"
y = df_train[target_var]
X = df_train.drop(target_var, axis=1)
y_test = df_test[target_var]
X_test = df_test.drop(target_var, axis=1)
# +
# dummy classifier for doing nothing
no_action = DummyClassifier(strategy="most_frequent", random_state=42)
no_action.fit(X, y)
y_dummy_pred = no_action.predict(X)
plot_confusion_matrix(no_action, X_test, y_test)
scorer(no_action, X_test, y_test)
# +
# test performance of different classifiers
classifiers = [
LogisticRegression(max_iter=1_000)
, DecisionTreeClassifier()
, RandomForestClassifier()
, SVC()
, KNeighborsClassifier()
, GaussianNB()
, MLPClassifier(max_iter=500)
]
impute = SimpleImputer(strategy="median")
over = RandomOverSampler(sampling_strategy=0.5, random_state=42)
under = RandomUnderSampler(sampling_strategy=0.5, random_state=42)
def producer(clf):
pipe = make_pipeline(under, impute, StandardScaler(), clf)
search = GridSearchCV(pipe, {}, scoring=scorer_norm, n_jobs=-1, cv=StratifiedKFold())
search.fit(X, y)
return (str(clf), search.best_score_, search.refit_time_)
clf_perf = joblib.Parallel(n_jobs=-1)(joblib.delayed(producer)(clf) for clf in classifiers)
scores = pd.DataFrame(clf_perf, columns=["clf", "score", "time"])
scores.sort_values(by="score", inplace=True)
# -
scores
# +
# test over/under sampling
over = RandomOverSampler(sampling_strategy=0.1, random_state=42)
under = RandomUnderSampler(sampling_strategy=0.5, random_state=42)
over_under = make_pipeline(over, under)
X_res, y_res = over_under.fit_resample(X, y)
print(len(pd.concat((X_res, y_res), axis=1)))
pd.concat((X_res, y_res), axis=1)["class"].value_counts()
# +
# select features
under = RandomUnderSampler(sampling_strategy=0.5, random_state=42, replacement=True)
impute = SimpleImputer(strategy="median")
clf = RandomForestClassifier(n_estimators=48, n_jobs=-1)
selector = RFECV(clf, cv=StratifiedKFold(), scoring="recall", min_features_to_select=10, step=2)
selector_pipe = make_pipeline(impute, under, selector)
selector_pipe.fit(X, y)
feature_mask = selector_pipe[2].support_
# -
len(feature_mask)
# +
# weighing trial
impute = SimpleImputer(strategy="median")
under = RandomUnderSampler(sampling_strategy=0.5, random_state=42, replacement=True)
clf = RandomForestClassifier(n_jobs=-1, random_state=42, class_weight="balanced")
pipe = make_pipeline(impute, clf)
search_rf = GridSearchCV(pipe, {}, scoring=scorer, cv=StratifiedKFold())
search_rf.fit(X.loc[:,feature_mask], y)
print(search_rf.best_score_)
# +
# set up pipeline
impute = SimpleImputer(strategy="median")
under = RandomUnderSampler(sampling_strategy=0.5, random_state=42, replacement=True)
clf = RandomForestClassifier(n_jobs=-1, random_state=42)
pipe = make_pipeline(under, impute, clf)
search_rf = GridSearchCV(pipe, {}, scoring=scorer, cv=StratifiedKFold())
search_rf.fit(X.loc[:,feature_mask], y)
print(search_rf.best_score_)
# +
# tune hyperparameters
impute = SimpleImputer(strategy="median")
under = RandomUnderSampler(sampling_strategy=0.5, random_state=42, replacement=True)
clf = RandomForestClassifier(n_jobs=-1, random_state=42, warm_start=True)
pipe = make_pipeline(under, impute, clf)
param_grid = {
"randomforestclassifier__n_estimators": np.around(np.linspace(10, 500, 20),0).astype(int)
, "randomforestclassifier__min_samples_leaf": np.arange(1, 5)
}
hyper_search = HalvingGridSearchCV(pipe, param_grid, scoring=scorer_norm, cv=StratifiedKFold())
hyper_search.fit(X.loc[:,feature_mask], y)
print(hyper_search.best_score_)
print(hyper_search.best_params_)
# +
y_pred = search_rf.predict(X.loc[:, feature_mask])
evaluator(y, y_pred)
# +
# find the probablity treshold that gives the best score
y_pred_proba = search_rf.predict_proba(X.loc[:, feature_mask])
score_lst = []
p_to_try = np.linspace(0.5, 1, 51)
for p in p_to_try:
y_pred = []
for i in y_pred_proba[:, 1]:
if i >= p:
y_pred.append(1)
else:
y_pred.append(0)
score_lst.append(evaluator(y, y_pred))
pd.DataFrame(list(zip(p_to_try, score_lst)), columns=["p", "score"]).sort_values(by="score")
# +
y_pred_proba = search_rf.predict_proba(X_test.loc[:, feature_mask])
p = 0.5
y_pred = []
for i in y_pred_proba[:, 1]:
if i >= p:
y_pred.append(1)
else:
y_pred.append(0)
evaluator(y_test, y_pred)
# -
len(df_test)
scorer(no_action, X_test, y_test)
print(classification_report(y_test, y_pred))
print(classification_report(y_test, no_action.predict(X_test)))
| next-gen-check-engine/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:larval_gonad]
# language: python
# name: conda-env-larval_gonad-py
# ---
# # Prototype Chromosome Permutation Test
# Replicate 1 seems to work well with this type of permutation test, however replicates 2 and 3 end up calling everything significant.
#
# As a reminder the permutation test that I am doing is as follows.
#
# For each cell I am calculating the median ratio of (X / Autosome). I then permute X and Autosome labels generating random cells and compare the median ratio. I use this to generate a null distribution and calculate an empirical p-value based on the number of permuted median ratios that are more extreme than the observed ratio.
#
# After talking with Cameron, he thinks that this algorithm is a little strange. Instead he thinks I was on the right track earlier when I was permuting cell type labels and keeping the cells whole. I will explore his suggested algorithm later.
#
# Here I show that while rep 1 behaves as hypothesized, reps 2 and 3 show everything is significant.
# +
import os
import sys
import re
from pathlib import Path
from itertools import combinations
import re
from IPython.display import display, HTML, Markdown
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Project level imports
from larval_gonad.notebook import Nb
from larval_gonad.stats import permutation_test_chrom1_lt_chrom2
# -
# Setup notebook
nbconfig = Nb.setup_notebook(seurat_dir='../output/scrnaseq-wf/scrnaseq_combine_force')
# +
# Shortten cluster names for nicer plots
cluster_annot = {
0: 'LS',
1: 'MC',
2: 'MS',
3: 'ES',
4: 'LC',
5: 'EC',
6: 'SP',
7: 'TE',
8: 'PC',
}
cluster_order = ['SP', 'ES', 'MS', 'LS', 'EC', 'MC', 'LC', 'TE', 'PC']
# Get cell to cluster
clusters = nbconfig.seurat.get_clusters('res.0.6')
clusters = clusters[(clusters != 9) & (clusters != 10) & (clusters != 11)].copy() # drop Unknown clusters
clusters = clusters.map(cluster_annot)
clusters = pd.Series(pd.Categorical(clusters.values, categories=cluster_order, ordered=True), index=pd.Index(clusters.index, name='cell_id'), name='cluster').to_frame()
clusters['rep'] = clusters.index.str.extract('(rep\d)_').values.flatten()
clusters.head()
# -
# Get fbgn to chromosome mappings
chroms = nbconfig.fbgn2chrom.query('chrom != "chrM"').copy() # Drop mitochondrion
autosomes = ['chr2L', 'chr2R', 'chr3L', 'chr3R']
chroms.chrom = chroms.chrom.astype('category')
chroms.chrom = chroms.chrom.cat.reorder_categories(nbconfig.chrom_order)
chrom_cbns = [
('chrX', 'chr2L'),
('chrX', 'chr2R'),
('chrX', 'chr3L'),
('chrX', 'chr3R'),
('chrX', 'chrA'),
('chr4', 'chr2L'),
('chr4', 'chr2R'),
('chr4', 'chr3L'),
('chr4', 'chr3R'),
('chr4', 'chrA'),
('chrY', 'chr2L'),
('chrY', 'chr2R'),
('chrY', 'chr3L'),
('chrY', 'chr3R'),
('chrY', 'chrA'),
]
# ## Normalized Chromosome level counts
fname = '/data/LCDB/lcdb-references/dmel/r6-16/fasta/dmel_r6-16.chromsizes'
chrom_lengths = pd.read_csv(fname, sep='\t', index_col=0, header=None, names=['chrom', 'chrom_size'])
chrom_lengths = chrom_lengths.reindex(nbconfig.chrom_order)
chrom_lengths = chrom_lengths.chrom_size
def read_chrom_cnt(rep_num, chrom_lengths):
fname = f'../output/scrnaseq-wf/scrnaseq_samples/testis{rep_num}_force/outs/possorted_genome_bam.bam_counts'
chrom_cnt = pd.read_csv(fname, sep='\t', index_col=0, header=0, names=['cell_id', 'chrom', 'UMI'])
# Add `rep#_` and `chr` prefixes
chrom_cnt.index = pd.Index([f'rep{rep_num}_{cell_id}' for cell_id in chrom_cnt.index], name='cell_id')
chrom_cnt.chrom = [f'chr{chrom}' for chrom in chrom_cnt.chrom]
chrom_cnt_wide = chrom_cnt.set_index('chrom', append=True).unstack().fillna(0)
chrom_cnt_wide.columns = chrom_cnt_wide.columns.droplevel(0)
num_reads_per_cell = chrom_cnt_wide.sum(axis=1)
#chrom_cnt_wide_norm = chrom_cnt_wide.div(num_reads_per_cell / 1e3, axis='index').div(chrom_lengths / 1e7)
chrom_cnt_wide_norm = (
chrom_cnt_wide
.div(num_reads_per_cell / 1e3, axis='index')
.div(chrom_lengths / 1e7)
)
return chrom_cnt_wide_norm
# ### Rep 1
# +
# get rep 1 chromosome level counts by cell
cnt1 = read_chrom_cnt(1, chrom_lengths)
cnt1 = cnt1.reindex(clusters.index).dropna() # Only keep cells that have cluster calls
grps = cnt1.join(clusters).groupby('cluster')
results = []
for c, dd in grps:
for c1, c2 in chrom_cbns:
chrom1 = dd[c1]
if c2 == 'chrA':
chrom2 = dd[['chr2L', 'chr2R', 'chr3L', 'chr3R']].median(axis=1)
else:
chrom2 = dd[c2]
pval = permutation_test_chrom1_lt_chrom2(chrom1, chrom2)
results.append((c, c1, c2, pval))
dat = -np.log10(pd.DataFrame(results, columns=['cluster', 'chrom1', 'chrom2', 'p_value']).set_index(['cluster', 'chrom1', 'chrom2']) + .0001)
# +
fig, ax = plt.subplots(1, 1, figsize=(20, 8))
dat.plot.bar(ax=ax, legend=False)
ax.set_xlabel('Cluster Chrom1-Chrom2')
ax.set_ylabel('-log10(p-value)')
ax.set_ylim(0, None)
ax.axhline(-np.log10(0.05), color='r', ls=':', label='0.05')
ax.axhline(-np.log10(0.01), color='r', ls='-.', label='0.01')
plt.legend()
ax.set_title('Rep 1 Reads Mapping to Chromosome Arm / (chromosoMe length / 1e7)')
new_labels = []
for l in ax.get_xticklabels():
txt = l.get_text()
clus, c1, c2 = re.match(f"\((\w\w), chr([\w\d]+), chr([\w\d]+)\)", txt).groups()
new_labels.append(f'{clus} {c1:<2}-{c2:<2}')
ax.set_xticklabels(new_labels, fontsize=8, fontdict=dict(family='Monospace'))
loc = 4.5
for i in range(26):
ax.axvline(loc, color='k', alpha=0.3)
loc += 5
ax.add_patch(plt.Rectangle([-1, -1], width=15.5, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][0]))
ax.add_patch(plt.Rectangle([14.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][1]))
ax.add_patch(plt.Rectangle([29.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][2]))
ax.add_patch(plt.Rectangle([44.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][3]))
ax.add_patch(plt.Rectangle([59.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][4]))
ax.add_patch(plt.Rectangle([74.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][5]))
ax.add_patch(plt.Rectangle([89.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][6]))
ax.add_patch(plt.Rectangle([104.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][7]))
ax.add_patch(plt.Rectangle([119.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][8]))
# -
# ### Rep 2
cnt = read_chrom_cnt(2, chrom_lengths)
grp = cnt.join(clusters).groupby('cluster')
dat = grp.get_group('EC')
ax = np.log10(dat[autosomes + ['chrX']]).plot(kind='kde')
ax.axvline(np.log10(dat['chr2L'].median()), color='blue', ls='--', alpha=.5)
ax.axvline(np.log10(dat['chr2R'].median()), color='orange', ls='--', alpha=.5)
ax.axvline(np.log10(dat['chr3L'].median()), color='green', ls='--', alpha=.5)
ax.axvline(np.log10(dat['chr3R'].median()), color='red', ls='--', alpha=.5)
ax.axvline(np.log10(dat['chrX'].median()), color='purple', ls=':', alpha=.5)
ax.axvline(np.log10(dat[autosomes].median(axis=1).median()), color='k', ls='-.', alpha=.5)
# +
# get rep 2 chromosome level counts by cell
cnt2 = read_chrom_cnt(2, chrom_lengths)
cnt2 = cnt2.reindex(clusters.index).dropna() # Only keep cells that have cluster calls
grps = cnt2.join(clusters).groupby('cluster')
results = []
for c, dd in grps:
for c1, c2 in chrom_cbns:
chrom1 = dd[c1]
if c2 == 'chrA':
chrom2 = dd[['chr2L', 'chr2R', 'chr3L', 'chr3R']].median(axis=1)
else:
chrom2 = dd[c2]
pval = permutation_test_chrom1_lt_chrom2(chrom1, chrom2)
results.append((c, c1, c2, pval))
dat = -np.log10(pd.DataFrame(results, columns=['cluster', 'chrom1', 'chrom2', 'p_value']).set_index(['cluster', 'chrom1', 'chrom2']) + .0001)
# +
fig, ax = plt.subplots(1, 1, figsize=(20, 8))
dat.plot.bar(ax=ax, legend=False)
ax.set_xlabel('Cluster Chrom1-Chrom2')
ax.set_ylabel('-log10(p-value)')
ax.set_ylim(0, None)
ax.axhline(-np.log10(0.05), color='r', ls=':', label='0.05')
ax.axhline(-np.log10(0.01), color='r', ls='-.', label='0.01')
plt.legend()
ax.set_title('Rep 2 Reads Mapping to Chromosome Arm / (chromosome length / 1e7)')
new_labels = []
for l in ax.get_xticklabels():
txt = l.get_text()
clus, c1, c2 = re.match(f"\((\w\w), chr([\w\d]+), chr([\w\d]+)\)", txt).groups()
new_labels.append(f'{clus} {c1:<2}-{c2:<2}')
ax.set_xticklabels(new_labels, fontsize=8, fontdict=dict(family='Monospace'))
loc = 4.5
for i in range(26):
ax.axvline(loc, color='k', alpha=0.3)
loc += 5
ax.add_patch(plt.Rectangle([-1, -1], width=15.5, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][0]))
ax.add_patch(plt.Rectangle([14.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][1]))
ax.add_patch(plt.Rectangle([29.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][2]))
ax.add_patch(plt.Rectangle([44.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][3]))
ax.add_patch(plt.Rectangle([59.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][4]))
ax.add_patch(plt.Rectangle([74.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][5]))
ax.add_patch(plt.Rectangle([89.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][6]))
ax.add_patch(plt.Rectangle([104.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][7]))
ax.add_patch(plt.Rectangle([119.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][8]))
# -
# ### Rep 3
# +
# get rep 3 chromosome level counts by cell
cnt3 = read_chrom_cnt(3)
cnt3 = cnt3.reindex(clusters.index).dropna() # Only keep cells that have cluster calls
cnt3_chrom_length_norm = cnt3.div(chrom_sizes / 1e7)
grps = cnt3_chrom_length_norm.join(clusters).groupby('cluster')
results = []
for c, dd in grps:
for c1, c2 in chrom_cbns:
chrom1 = dd[c1]
if c2 == 'chrA':
chrom2 = dd[['chr2L', 'chr2R', 'chr3L', 'chr3R']].median(axis=1)
else:
chrom2 = dd[c2]
pval = permutation_test_chrom1_lt_chrom2(chrom1, chrom2)
results.append((c, c1, c2, pval))
dat = -np.log10(pd.DataFrame(results, columns=['cluster', 'chrom1', 'chrom2', 'p_value']).set_index(['cluster', 'chrom1', 'chrom2']) + .0001)
# +
fig, ax = plt.subplots(1, 1, figsize=(20, 8))
dat.plot.bar(ax=ax, legend=False)
ax.set_xlabel('Cluster Chrom1-Chrom2')
ax.set_ylabel('-log10(p-value)')
ax.set_ylim(0, None)
ax.axhline(-np.log10(0.05), color='r', ls=':', label='0.05')
ax.axhline(-np.log10(0.01), color='r', ls='-.', label='0.01')
plt.legend()
ax.set_title('Rep 3 Reads Mapping to Chromosome Arm / (chromosome length / 1e7)')
new_labels = []
for l in ax.get_xticklabels():
txt = l.get_text()
clus, c1, c2 = re.match(f"\((\w\w), chr([\w\d]+), chr([\w\d]+)\)", txt).groups()
new_labels.append(f'{clus} {c1:<2}-{c2:<2}')
ax.set_xticklabels(new_labels, fontsize=8, fontdict=dict(family='Monospace'))
loc = 4.5
for i in range(26):
ax.axvline(loc, color='k', alpha=0.3)
loc += 5
ax.add_patch(plt.Rectangle([-1, -1], width=15.5, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][0]))
ax.add_patch(plt.Rectangle([14.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][1]))
ax.add_patch(plt.Rectangle([29.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][2]))
ax.add_patch(plt.Rectangle([44.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][3]))
ax.add_patch(plt.Rectangle([59.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][4]))
ax.add_patch(plt.Rectangle([74.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][5]))
ax.add_patch(plt.Rectangle([89.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][6]))
ax.add_patch(plt.Rectangle([104.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][7]))
ax.add_patch(plt.Rectangle([119.5, -1], width=15, height=10, zorder=0, alpha=.4, color=nbconfig.colors['clusters'][8]))
# -
| notebook/2019-01-16_prototype_chrom_perm_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project Name: ANALYZING DATASET OF MULTINATIONAL FOOD COMPANY Zomato
# <NAME> 19BCE2090
#
# <NAME> 19BCE2093
#
# ROYAL 19BCE2692
# ## 1. Importing required libraries
#Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import r2_score
# ## 2. Collection / Load the Dataset
#Read the CSV File containing dataset using Pandas Library
df_original = pd.read_csv("Zomato.csv")
df_original.head()
df_original.columns
# ## 3. Preprocessing the data
#Deleting Unnnecessary Columns
df_original = df_original.drop(['url','dish_liked','phone'],axis=1)
#Removing the Duplicates records
df_original.duplicated().sum()
df_original.drop_duplicates(inplace=True)
#Remove the NaN values from the dataset
df_original.isnull().sum()
df_original.dropna(how='any',inplace=True)
#Changing the Columns Names to proper names
df_original.columns
df_original = df_original.rename(columns={'approx_cost(for two people)':'cost','listed_in(type)':'type',
'listed_in(city)':'city'})
df_original.columns
#Convert cost objecttype into numerical type
df_original['cost'] = df_original['cost'].astype(str)
df_original['cost'] = df_original['cost'].apply(lambda x: x.replace(',','.'))
df_original['cost'] = df_original['cost'].astype(float)
df_original.info()
#Removing '/5' from Rates and replace proper rating
df_original['rate'].unique()
df_original = df_original.loc[df_original.rate !='NEW']
df_original = df_original.loc[df_original.rate !='-'].reset_index(drop=True)
remove_slash = lambda x: x.replace('/5', '') if type(x) == np.str else x
df_original.rate = df_original.rate.apply(remove_slash).str.strip().astype('float')
df_original['rate'].head()
# Adjust the column names
df_original.name = df_original.name.apply(lambda x:x.title())
df_original.online_order.replace(('Yes','No'),(True, False),inplace=True)
df_original.book_table.replace(('Yes','No'),(True, False),inplace=True)
df_original.cost.unique()
df_original.head()
df = df_original.copy()
df.columns
df = df.drop(['address','name','reviews_list','type', 'city'], axis=1)
df.head(2)
from sklearn.preprocessing import LabelEncoder
cat_to_num = df[['online_order','book_table','location','rest_type','cuisines','menu_item']]
le = LabelEncoder()
for i in cat_to_num:
df[i] = le.fit_transform(cat_to_num[i])
df.head(2)
df["rate"] = pd.to_numeric(df["rate"])
# Discretizing the ratings into a categorical feature with 4 classes
df["rate"] = pd.cut(df["rate"], bins = [0, 3.0, 3.5, 4.0, 5.0], labels = ["0", "1", "2", "3"])
df['rate']
# Checking the number of restaurants in each rating class
np.unique(df["rate"], return_counts = True)
#Defining the independent variables and dependent variables
x = df.iloc[:,[0,1,3,4,5,6,7,8]]
y = df['rate']
#Getting Test and Training Set
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.2,random_state=353)
x_train.head()
y_train.head()
# +
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# -
# ## 1. Logistic Regression Classifier
from sklearn.linear_model import LogisticRegression
my_model = LogisticRegression()
result = my_model.fit(x_train, y_train)
predictions = result.predict(x_test)
predictions
from sklearn.metrics import accuracy_score
print('Accracy of Logistic Regression Classifier :',accuracy_score(y_test, predictions))
from sklearn.metrics import confusion_matrix
confusion_mat = confusion_matrix(y_test, predictions)
confusion_df = pd.DataFrame(confusion_mat, index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# ## 2. Decision Tree Classifier
# +
from sklearn.tree import DecisionTreeClassifier
my_model = DecisionTreeClassifier(random_state=0)
result = my_model.fit(x_train,y_train)
predictions = result.predict(x_test)
predictions
from sklearn.metrics import mean_absolute_error,accuracy_score
mean_absolute_error(y_test, predictions)
print("Accuracy Score for Decision Tress=",accuracy_score(y_test,predictions))
from sklearn.metrics import confusion_matrix
matrix=confusion_matrix(y_test,predictions)
confusion_df = pd.DataFrame(matrix, index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# -
# ## 3. KNN Classifier
from sklearn.neighbors import KNeighborsClassifier
my_model = KNeighborsClassifier(n_neighbors=1)
result = my_model.fit(x_train,y_train)
predictions = result.predict(x_test)
from sklearn.metrics import classification_report,confusion_matrix
matrix = confusion_matrix(y_test,predictions)
print("Accuracy of KNN Classifier:",accuracy_score(y_test, predictions))
from sklearn import metrics
confusion_df = pd.DataFrame(matrix, index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# ## 4. Random Forest classifier
from sklearn.ensemble import RandomForestClassifier
my_model = RandomForestClassifier(n_estimators = 50, criterion = 'entropy', random_state = 42)
result=my_model.fit(x_train, y_train)
predictions = result.predict(x_test)
predictions
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, predictions))
from sklearn.metrics import confusion_matrix
conf_matrix =confusion_matrix(predictions,y_test)
confusion_df = pd.DataFrame(conf_matrix, index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# ## 5. Support Vector Machine Classifier
from sklearn.svm import SVC
my_model = SVC(kernel = 'rbf', random_state = 0)
result = my_model.fit(x_train, y_train)
predictions = result.predict(x_test)
predictions
from sklearn import metrics
print("Accuracy of Support Vector Machine Classifier:",accuracy_score(y_test, predictions))
from sklearn.metrics import confusion_matrix
conf_matrix =confusion_matrix(predictions,y_test)
confusion_df = pd.DataFrame(conf_matrix ,index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# ## 6. Gradient Boosting Classifier
from sklearn.ensemble import GradientBoostingClassifier
my_model = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,max_depth=1,random_state=0)
result = my_model.fit(x_train, y_train)
predictions = result.predict(x_test)
from sklearn import metrics
print("Accuracy of Support Gradient Boosting Classifier:",round(metrics.accuracy_score(y_test, predictions)*100,2))
from sklearn.metrics import confusion_matrix
conf_matrix =confusion_matrix(predictions,y_test)
confusion_df = pd.DataFrame(conf_matrix ,index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# ## 7. Naive Bayes Classifier
from sklearn.naive_bayes import GaussianNB
my_model = GaussianNB()
result = my_model.fit(x_train,y_train)
predictions = result.predict(x_test)
predictions
from sklearn import metrics
print("Accuracy of Naive bayes Classifier id: ",round(accuracy_score(y_test,predictions)*100,2))
from sklearn.metrics import confusion_matrix
matrix = confusion_matrix(predictions,y_test,)
confusion_df = pd.DataFrame(matrix, index=['Actual Label 0','Actual Label 1','Actual Label 2','Actual Label 3'], columns=['Predicted Label 0','Predicted Label 1','Predicted Label 2','Predicted Label 3'])
confusion_df
# +
# libraries
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
accuracy = [59.29, 93.67, 89.81, 94.90, 59.25, 69.18, 45]
bars = ('Logistic Regression', 'Decision Tree', 'KNN', 'Random Forest','Support Vector Machine', 'Gradient Boosting', 'Naive bayes')
plt.figure(figsize=(15,10))
y_pos = np.arange(len(bars))
bars = plt.bar(bars,height=accuracy, color=['black', 'red', 'green', 'blue', 'cyan', 'yellow', 'magenta'])
for bar in bars:
yval = bar.get_height()
plt.text(bar.get_x(), yval + 1,yval)
#plt.xticks(y_pos, bars)
plt.xlabel("Ml Algorithms",fontsize=2)
plt.ylabel("Accuracy in Percentage")
plt.title("Accuracy of Machine Learning Algorithms")
plt.show()
# -
# ## Exploratory Data Analysis for Zomato Dataset
# ### Correlation between different variables
#Get Correlation between different variables
corr = df_original.corr(method='kendall')
plt.figure(figsize=(15,8))
sns.heatmap(corr, annot=True)
df_original.columns
# ### Restaurants delivering Online or not
#Restaurants delivering Online or not
sns.countplot(df_original['online_order'])
fig = plt.gcf()
fig.set_size_inches(5,5)
plt.title('Restaurants delivering online or Not')
df_original.columns
# ### Restaurants allowing table booking or not
#Restaurants allowing table booking or not
sns.countplot(df_original['book_table'])
fig = plt.gcf()
fig.set_size_inches(7,7)
plt.title('Restaurants allowing table booking or not')
# ### Types of Services
#Types of Services
sns.countplot(df_original['type'])
sns.countplot(df_original['type']).set_xticklabels(sns.countplot(df_original['type']).get_xticklabels(), rotation=90, ha="right")
fig = plt.gcf()
fig.set_size_inches(15,15)
plt.title('Types of Service')
# ### Type and Rating
#Type and Rating
type_plt=pd.crosstab(df_original['rate'],df_original['type'])
type_plt.plot(kind='bar',stacked=True);
plt.title('Type - Rating',fontsize=15,fontweight='bold')
plt.ylabel('Type',fontsize=10,fontweight='bold')
plt.xlabel('Rating',fontsize=10,fontweight='bold')
plt.xticks(fontsize=10,fontweight='bold')
plt.yticks(fontsize=10,fontweight='bold');
plt.figure(figsize=(90,50))
plt.scatter(df_original['cost'], df_original['rate'])
plt.xlabel("cost")
plt.ylabel("rate")
plt.title("Distribution of ratings and cost")
plt.show()
# ### Histogram of distribution of restaurant ratings
# Plotting the distribution of restaurant ratings
plt.figure(figsize = (10, 5))
plt.hist(df_original.rate, bins = 10, color = "g")
plt.xlabel("Ratings")
plt.ylabel("Count")
plt.title("Histogram of distribution of restaurant ratings")
plt.show()
# ### Top 15 Restro with maximum number of outlets
#Top 15 Restro with maximum number of outlets
ax=df_original['name'].value_counts().head(15).plot.bar(figsize =(12,6))
ax.set_title("Top 15 Restarurents with maximum outlets")
for i in ax.patches:
ax.annotate(i.get_height(), (i.get_x() * 1.005, i.get_height() * 1.005))
# ### Most famous restaurant chains in Bengaluru
#Most famous restaurant chains in Bengaluru
plt.figure(figsize=(15,7))
chains=df_original['name'].value_counts()[:20]
sns.barplot(x=chains,y=chains.index,palette='Set1')
plt.title("Most famous restaurant chains in Bangaluru",size=20,pad=20)
plt.xlabel("Number of outlets",size=15)
# ### Restaurant Count by Location
# Restaurant Count by Location
df_original.location.value_counts().nlargest(10).plot(kind="barh")
plt.title("Restaurants by Location")
plt.xlabel("Count")
plt.ylabel("Number of restaurants by location")
plt.show()
| project (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Generating GRCh37 Medical Genes Benchmark
# This notebook details the steps to generate the challenging medically relevant genes benchmark. All paths are from the top level directory of the repository and large dependency files. External data dependencies are downloaded from the GIAB FTP site as needed.
#
# ## Steps
#
# 1. Look up coordinates for gene symbols in ENSEMBLE GRCh37 Human Genes v101 of the union of Mandelker et al Supplementary Table 13, COSMIC Cancer Gene Census, <NAME> Medical Gene Lists -- `scripts\GRCh37_lookup_MRG_symbol_coordinates_ENSEMBL.R`
#
# 2. Find overlap of genes with HG002 v4.2.1, then add slop and find overlap with HG002 trio-hifiasm (v0.11) diploid assembly confident regions `HG002v11-align2-GRCh37.dip.bed`
#
# 3. Find genes with < 90% included bases in v4.2.1 HG002 small variant benchmark and fully covered with overlapping segdups and flanking sequence in HG002 hifiasm v0.11 GRCh37 dip.bed, find union of GRCh37 and GRCh38 MRG lists, then add genes that are unique to GRCh37 but still fully covered with overlapping segdups and flanking sequence in HG002 hifiasm v0.11 GRCh37 dip.bed -- `scripts/find_coordinates_of_MRG_GRCh37_GRCh38_union.R`
#
# 4. Use coordinates for benchmark then remove
# - homopolymers and imperfect homopolymers > 20
# - SVs with 50bp flanking and overlapping tandem repeats
# - hifiasm error
# - GRCh37 GAPs
# - Remove partially covered tandem repeats
# - Remove MHC region
#
# 5. Generate stratification files for Complex Variants in Tandem Repeats
# - GRCh37_MRG_stratification_ComplexVar_in_TR.bed
# # Downloading Data Dependencies
# ## hifiasm Variants and Diploid Regions
mkdir -p data/hifiasm_dipcall_output
wget -O data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.bed \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_benchmark_v0.02/GRCh37/hifiasm_v0.11/HG002v11-align2-GRCh37.dip.bed
wget -O data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/AshkenazimTrio/HG002_NA24385_son/CMRG_v1.00/GRCh37/SupplementaryFiles/HG002v11-align2-GRCh37/HG002v11-align2-GRCh37.dip.vcf.gz
# ## Reference Genome
wget -O resources/hs37d5.fa.gz \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/references/GRCh37/hs37d5.fa.gz
gunzip resources/hs37d5.fa.gz
samtools faidx resources/hs37d5.fa
# ## Genomic Stratifications
# +
mkdir -p resources/giab_stratifications
wget -O resources/giab_stratifications/GRCh37_segdups.bed.gz \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/genome-stratifications/v2.0/GRCh37/SegmentalDuplications/GRCh37_segdups.bed.gz
wget -O resources/giab_stratifications/GRCh37_MHC.bed.gz \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/genome-stratifications/v2.0/GRCh37/OtherDifficult/GRCh37_MHC.bed.gz
wget -O resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
ftp://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/genome-stratifications/v2.0/GRCh37/LowComplexity/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz
wget -O resources/giab_stratifications/GRCh37_AllTandemRepeats_gt100bp_slop5.bed \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/genome-stratifications/v2.0/GRCh37/LowComplexity/GRCh37_AllTandemRepeats_gt100bp_slop5.bed.gz
# -
# ## GIAB Benchmark Sets
# ### CMRG Draft Benchmarks
# +
mkdir -p data/manually_created_files/cmrg_draft_benchmarks
## v0.02.03 small variant benchmark
wget -O data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.bed \
https://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_benchmark_v0.02/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.bed
wget -O data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz \
https://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_benchmark_v0.02/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz
wget -O data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz.tbi \
https://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_benchmark_v0.02/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz.tbi
## v0.01 SV benchmark
wget -O data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.bed \
https://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_SV_benchmark_v0.01/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.bed
wget -O data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz \
https://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_SV_benchmark_v0.01/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz
wget -O data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz.tbi \
https://ftp-trace.ncbi.nlm.nih.gov/giab/ftp/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_SV_benchmark_v0.01/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz.tbi
# -
# ### V4.2.1 GRCh37 Benchmark
mkdir -p data/v4.2.1_benchmark_regions
wget -O data/v4.2.1_benchmark_regions/HG002_GRCh37_1_22_v4.2.1_benchmark_noinconsistent.bed \
https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/AshkenazimTrio/HG002_NA24385_son/NISTv4.2.1/GRCh37/HG002_GRCh37_1_22_v4.2.1_benchmark_noinconsistent.bed
# # From these genes to be benchmarked remove the following regions that we exclude from the diploid assembly based variant calls:
#
# - homopolymers and imperfect homopolymers > 20
# - SVs with 50bp flanking and overlapping tandem repeats
# - hifiasm error
# - GRCh37 GAPs
# - Remove partially covered tandem repeats
#
#
# ## Remove homopolymers > 20bp
bedtools subtract \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_bedtools_merge.bed \
-b data/giab_stratifications/GRCh37/GRCh37_SimpleRepeat_homopolymer_gt20_slop5.bed.gz \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_SimpleRepeat_homopolymer_gt20_slop5.bed
# ## Remove imperfect homopolymers > 20bp
bedtools subtract \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_SimpleRepeat_homopolymer_gt20_slop5.bed \
-b data/giab_stratifications/GRCh37/GRCh37_SimpleRepeat_imperfecthomopolgt20_slop5.bed.gz \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_SimpleRepeat_imperfecthomopolgt20_slop5.bed
# ## SVs with 50bp flanking and overlapping tandem repeats
# `GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz` is from ftp://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/release/genome-stratifications/v2.0/GRCh38/LowComplexity/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz
gunzip \
-c data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
| awk 'length($4)>49 || length($5)>49' \
| awk '{FS=OFS="\t"} {print $1,$2-1,$2+length($4)}' \
> workflow/smallvar_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp.bed
intersectBed \
-wa -a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b workflow/smallvar_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp.bed \
| multiIntersectBed -i stdin workflow/smallvar_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp.bed \
| awk '{FS=OFS="\t"} {print $1,$2-50,$3+50}' \
| mergeBed -i stdin -d 1000 \
> workflow/smallvar_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000.bed
bedtools subtract \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_SimpleRepeat_imperfecthomopolgt20_slop5.bed \
-b workflow/smallvar_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000.bed \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_SVsgt49bp_repeatexpanded_slop50_merge1000.bed
# ## Remove hifiasm error on chr21
# `GRCh37_hifiasm_error.bed`
# was created through manual curation of clusters of errors identified during evaluation steps of benchmark development
bedtools subtract \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_SVsgt49bp_repeatexpanded_slop50_merge1000.bed \
-b data/manually_created_files/GRCh37_hifiasm_error.bed \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_hifiasm_error.bed
# ## Remove GRCh37 GAPs
# `GRCh37_MRG_GAPs.bed` was created through manual curation of clusters of errors identified during evaluation steps of benchmark development
#
bedtools subtract \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_hifiasm_error.bed \
-b data/manually_created_files/GRCh37_MRG_GAPs.bed \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_MRG_GAPs.bed
# ## Sort
cat workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_MRG_GAPs.bed \
| sort -k1,1n -k2,2n \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_MRG_GAPs_sorted.bed
# ## Remove partially covered tandem repeats
complementBed \
-i workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_MRG_GAPs_sorted.bed \
-g resources/human.b37.genome \
| intersectBed \
-wa -a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b stdin \
| subtractBed \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_GRCh37_MRG_GAPs_sorted.bed \
-b stdin \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_partial_tandem_repeats.bed
# ## Prepare Small Variant benchmark VCF
#
# __Notes__
# 1. `workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.vcf.gz` is https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_benchmark_v0.02/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz after updates to the headers
#
# 2. `workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.bed` is https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/data/AshkenazimTrio/analysis/NIST_HG002_medical_genes_benchmark_v0.02/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.bed
# +
cp workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_subtract_partial_tandem_repeats.bed \
workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.bed
cat workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.bed \
| awk '{sum+=$3-$2} END {print sum}'
bedtools intersect \
-a data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
-b workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_bedtools_merge.bed \
-header \
> workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.vcf
bgzip -f workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.vcf
tabix -f workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark.vcf.gz
# -
# ## SV Benchmark
# Find SVs MRG benchmark gene coordinates
mkdir -p workflow/SV_benchmark/GRCh37/
bedtools intersect \
-a workflow/smallvar_benchmark/GRCh37//HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000.bed \
-b workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_bedtools_merge.bed \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates.bed
# +
# Find SVs MRG benchmark gene coordinates
bedtools intersect \
-a workflow/smallvar_benchmark/GRCh37//HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000.bed \
-b workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_bedtools_merge.bed \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates.bed
# Subset to SVs only gt49bp
gunzip \
-c data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
| awk '{FS="\t|,"} {if($1 ~ /^#/ || length($4)-length($5)>49 || length($5)-length($4)>49 || length($6)-length($4)>49) print}' \
| intersectBed \
-a workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates.bed \
-b stdin -c \
| awk '$4>0' \
| cut -f1-3 \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp.bed
# Find isolated SVs
gunzip \
-c data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
| awk '{FS="\t|,"} {if($1 ~ /^#/ || length($4)-length($5)>9 || length($5)-length($4)>9 || length($6)-length($4)>9) print}' \
| intersectBed -a workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp.bed \
-b stdin -c \
| awk '$4==1' \
| cut -f1-3 \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp_isolated.bed
# Find complex SVs
gunzip \
-c data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
| awk '{FS="\t|,"} {if($1 ~ /^#/ || length($4)-length($5)>9 || length($5)-length($4)>9 || length($6)-length($4)>9) print}' \
| intersectBed \
-a workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp.bed \
-b stdin -c \
| awk '$4>1' \
| cut -f1-3 \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp_complexSVs.bed
# Remove complex SVs from MRG gene candidate coordinates and remove GAPs
bedtools subtract \
-a workflow/smallvar_benchmark/GRCh37/HG002_GRCh37_CMRG_coordinates_temp_bedtools_merge.bed \
-b workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp_complexSVs.bed \
| bedtools subtract \
-a stdin \
-b data/manually_created_files/GRCh37_MRG_GAPs.bed \
> workflow/SV_benchmark/GRCh37/HG002_GRCh37_MRG_draft_SV_benchmark_temp.bed
#HG002v11-align2-GRCh37.dip_complexindelsgt9bpinRepeats.bed from the SV benchmark bed:
# Find tandem repeats and homopolymers that have multiple indels >9bp, since these can add up to >49bp and should be subtracted from the benchmark SV bed
gunzip \
-c data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
| awk '{FS="\t|,"} {if($1 ~ /^#/ || length($4)-length($5)>9 || length($5)-length($4)>9 || length($6)-length($4)>9) print}' \
| intersectBed \
-a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b stdin -c \
| awk '$4>1' \
| cut -f1-3 \
| intersectBed \
-v -a stdin \
-b workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_SVsgt49bp_repeatexpanded_slop50_merge1000_intersect_MRG_benchmark_coordinates_onlygt49bp.bed \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_complexindelsgt9bpinRepeats.bed
bedtools subtract \
-a workflow/SV_benchmark/GRCh37/HG002_GRCh37_MRG_draft_SV_benchmark_temp.bed \
-b workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37.dip_complexindelsgt9bpinRepeats.bed \
> workflow/SV_benchmark/GRCh37/HG002_GRCh37_MRG_draft_SV_benchmark.bed
cat workflow/SV_benchmark/GRCh37/HG002_GRCh37_MRG_draft_SV_benchmark.bed \
| awk '{sum+=$3-$2} END {print sum}'
# Decompose for truvari comparison
#vt decompose -s HG002v11-align2-GRCh37.dip.vcf -o HG002v11-align2-GRCh37.dip_decomposed.vcf
#python script to remove ambiguous (non-ACTGN) REF
gunzip \
-c data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf.gz \
> data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf
## remove ambiguous (non-ACGTN) in REF field. Adjust path to where you keep this file
python scripts/fix_reference_allele.py \
--input_vcf_file data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf \
--output_file workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig.dip.vcf
rm data/hifiasm_dipcall_output/HG002v11-align2-GRCh37.dip.vcf
## zip for bcftools
bgzip \
-c workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig.dip.vcf \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig.dip.vcf.gz
tabix workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig.dip.vcf.gz
# +
## split multiallelic to biallelic
bcftools norm -m- \
-Oz workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig.dip.vcf.gz \
-o workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_m.vcf.gz
## left align and normalize indels.
bcftools norm \
-f resources/hs37d5.fa \
-Oz -o workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mf.vcf.gz \
workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_m.vcf.gz
## remove duplicate records
bcftools norm -d none \
-Oz workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mf.vcf.gz \
-o workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd.vcf.gz
# -
## remove MHC region
bedtools subtract \
-header \
-a workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd.vcf.gz \
-b resources/giab_stratifications/GRCh37_MHC.bed.gz \
| bgzip -c \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd_noMHC.vcf.gz
# +
## intersect w/ benchmark bed and subset to >39bp in REF or ALT fields.
#intersect w/ MRG target regions and subset >39 bp
bedtools intersect -header \
-a workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd_noMHC.vcf.gz \
-b workflow/SV_benchmark/GRCh37/HG002_GRCh37_MRG_draft_SV_benchmark.bed \
| awk '$1 ~ /^#/ || length($4)>39 || length($5)>39' \
| bgzip -c \
> workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd_noMHC_intersectBenchBED_gt39bp.vcf.gz
## index vcf, required by truvari
tabix -p vcf workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd_noMHC_intersectBenchBED_gt39bp.vcf.gz
# -
## Find benchmark variants between 35 and 49 base pairs in size and exclude overlapping tandem repeats plus slop 50bp on either side. Remove these from the benchmark regions bed so that it includes only SVs that are greater than 49 base pairs
gunzip workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd_noMHC_intersectBenchBED_gt39bp.vcf
python scripts/SVs_between_35_50bp.py \
--input workflow/SV_benchmark/GRCh37/HG002v11-align2-GRCh37_noambig_norm_mfd_noMHC_intersectBenchBED_gt39bp.vcf \
--output workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_SVs_gt34_and_lt_50bp.vcF
# vcf2bed from bedops
vcf2bed < workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_SVs_gt34_and_lt_50bp.vcf \
> workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_SVs_gt34_and_lt_50bp.bed
intersectBed -wa \
-a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_SVs_gt34_and_lt_50bp.bed \
| multiIntersectBed \
-i stdin workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_SVs_gt34_and_lt_50bp.bed \
| awk '{FS=OFS="\t"} {print $1,$2-50,$3+50}' \
> workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_gt34_and_lt_50bp_repeatexpanded_slop50.bed
bedtools subtract \
-a workflow/SV_benchmark/GRCh37/HG002_GRCh37_MRG_draft_SV_benchmark.bed \
-b workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.02_gt34_and_lt_50bp_repeatexpanded_slop50.bed \
> workflow/SV_benchmark/GRCh37/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.bed
# ## Prepare release benchmark files
#
# Steps for creating excluding errors found in curation of the MRG benchmark, in order to create v1.0 bed files for small variants and SVs
#
# Updated 4/27/21 to exclude 50bp on either side of errors and unsure variants found in curation, because sometimes a deletion and overlapping SNVs outside a repeat weren't excluded from benchmark stats. Also, remove ".;" at beginning of INFO field introduced by svanalyzer svwiden. Produces v1.00.01 small variant and structural variant MRG benchmarks
#
# __used NCBI remap to generate__ GRCh38_curation_medicalgene_SV_errorsorunsure_repeatexpanded.bed
#
##errors to exclude found in curation of SVs
#only found 2 errors so manually created bed GRCh37_curation_medicalgene_SV_errorsorunsure.bed
#expand bed coordinates to completely cover any overlapping homopolymers and tandem repeats
mkdir -p workflow/release_benchmark_generation
intersectBed -wa \
-a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b data/manually_created_files/GRCh37_curation_medicalgene_SV_errorsorunsure.bed \
| multiIntersectBed -i stdin data/manually_created_files/GRCh37_curation_medicalgene_SV_errorsorunsure.bed \
| mergeBed -i stdin \
> workflow/release_benchmark_generation/GRCh37_curation_medicalgene_SV_errorsorunsure_repeatexpanded.bed
# +
#create bed with sites curated as unsure or incorrect in the benchmark in GRCh37 coordinates
cut -f3,6,8,12,20 \
data/manually_created_files/combined\ curation\ responses\ from\ benchmarking\ with\ sm\ variant\ v0.02.03\ -\ GRCh37andGRCh38.tsv \
| grep 'sure\|o' \
| grep -v ^ref \
| awk '{FS=OFS="\t"} {print $2, $3-50, $3+length($4)+50}' \
| sort -k1,1n -k2,2n -k3,3n \
| mergeBed -i stdin -d 100 \
> workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure.bed
intersectBed -wa \
-a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure.bed \
| multiIntersectBed \
-i stdin workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure.bed \
| mergeBed -i stdin \
> workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_repeatexpanded.bed
# +
cd data/manually_created_files/
##for manuscript, identify number of errors/unsure identified in curation and the number of these that would have been excluded by curating common FPs/FNs first
#create bed with sites curated as unsure or incorrect in the benchmark in GRCh37 coordinates
grep ^Common combined\ curation\ responses\ from\ benchmarking\ with\ sm\ variant\ v0.02.03\ -\ GRCh37andGRCh38.tsv \
| cut -f3,6,8,12,20 \
| grep 'sure\|o' \
| grep -v ^ref \
| awk '{FS=OFS="\t"} {print $2, $3-50, $3+length($4)+50}' \
| sort -k1,1n -k2,2n -k3,3n \
| mergeBed -i stdin -d 100 \
> ../../workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_Commononly.bed
grep -v ^Common combined\ curation\ responses\ from\ benchmarking\ with\ sm\ variant\ v0.02.03\ -\ GRCh37andGRCh38.tsv \
| cut -f3,6,8,12,20 \
| grep 'sure\|o' \
| grep -v ^ref \
| grep ^GRCh37 \
| awk '{FS=OFS="\t"} {print $2, $3-50, $3+length($4)+50}' \
| sort -k1,1n -k2,2n -k3,3n \
> ../../workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_evaluationonly.bed
cd ../..
# -
## Sanity check - expected value 50
wc -l workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_evaluationonly.bed
#expand bed coordinates to completely cover any overlapping homopolymers and tandem repeats
## Sanity Check - expected value 6
intersectBed -wa \
-a resources/giab_stratifications/GRCh37_AllTandemRepeatsandHomopolymers_slop5.bed.gz \
-b workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_Commononly.bed \
| multiIntersectBed \
-i stdin workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_Commononly.bed \
| mergeBed -i stdin \
| intersectBed -v \
-a workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_evaluationonly.bed \
-b stdin \
| wc -l
# exclude errors/unsure sites found in SV curation, small variant curation, small variant complex TR curation, and FPs from complex TR comparison
#
subtractBed \
-a data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.bed \
-b workflow/release_benchmark_generation/GRCh37_curation_medicalgene_SV_errorsorunsure_repeatexpanded.bed \
| subtractBed \
-a stdin \
-b workflow/release_benchmark_generation/GRCh37_curation_medicalgene_smallvar_errorsorunsure_repeatexpanded.bed \
| subtractBed \
-a stdin \
-b data/manually_created_files/GRCh37_curation_medicalgene_smallvar_complexrepeat_errorsorunsure_repeatexpanded.bed \
| subtractBed \
-a stdin \
-b data/manually_created_files/HiCanu_2.1_HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03_intersected_FPs_repeatexpanded_slop50.bed \
> workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v1.00.01.bed
# __Sanity Checks__ Make sure the bed size doesn't decrease more than expected
## Sanity Check - expected value 11712171
awk '{sum+=$3-$2} END {print sum}' \
data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.bed
## Sanity Check - expected value 11679803
awk '{sum+=$3-$2} END {print sum}' \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v1.00.01.bed
# Make sure the number of variants in the bed don't decrease more than expected
## Sanity check - expected number of lines 21907
intersectBed -a data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz \
-b data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.bed \
| wc -l
## Sanity check - expected number of lines 21591
intersectBed -a data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz \
-b workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v1.00.01.bed | wc -l
# Using v0.02.03 vcf as v1.00.01 vcf
cp data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v1.00.01.vcf.gz
cp data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v0.02.03.vcf.gz.tbi \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_smallvar_benchmark_v1.00.01.vcf.gz.tbi
# Excluding errors/unsure sites found in SV curation
subtractBed \
-a data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.bed \
-b workflow/release_benchmark_generation/GRCh37_curation_medicalgene_SV_errorsorunsure_repeatexpanded.bed \
> workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.bed
# __Sanity Check__ Make sure the bed size doesn't decrease more than expected
#
## Sanity Check - expected value 11966919
awk '{sum+=$3-$2} END {print sum}' \
data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.bed
## Sanity Check - expected value 11966249
awk '{sum+=$3-$2} END {print sum}' \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.bed
# __Sanity Check__ Make sure the number of variants in the bed don't decrease more than expected
## Sanity Check - expected value 205
intersectBed \
-a data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz \
-b data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.bed \
| wc -l
## Sanity check - expected value 204
intersectBed \
-a data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz \
-b workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.bed \
| wc -l
# Using v0.01 variant calls as v0.02 variant calls
cp data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.vcf.gz
cp data/manually_created_files/cmrg_draft_benchmarks/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.01.vcf.gz.tbi \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.vcf.gz.tbi
# +
#remove ".;" at beginning of INFO field introduced by svanalyzer svwiden
gunzip -c workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.vcf.gz \
| sed 's/\.;REPTYPE/REPTYPE/' \
| bgzip -c \
> workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v1.00.01.vcf.gz
tabix workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v1.00.01.vcf.gz
# -
cp workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v0.02.00.bed \
workflow/release_benchmark_generation/HG002_GRCh37_difficult_medical_gene_SV_benchmark_v1.00.01.bed
# ### Software versions
#
# See `environment.yml` for dependencies.
#
| GRCh37/GenomeSpecific/GRCh37_HG002_medical_genes_benchmark_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kaggleEnv
# language: python
# name: kaggleenv
# ---
import os
os.chdir('../src')
# +
import datetime
import pandas as pd
import numpy as np
import rasterio
import cv2
from config import _C as cfg
from utils import create_tile_v2, to_mask, rle2mask
from models.loss import dice_coefficient
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
# +
# Provo a fare un check con le tile create con la procedura
# +
df = pd.read_csv('/home/giorgio/Scrivania/Kaggle/hubmap/data/train.csv')
valid_id = cfg.DATASET.VALID_ID[0]
res = create_tile_v2(
valid_id,
df,
cfg
)
# +
path_img = os.path.join(
cfg.DATA_DIR, 'train', valid_id+'.tiff'
)
dataset = rasterio.open(path_img, transform=identity, num_threads = 'all_cpus')
h, w = dataset.shape
encoding = df[df['id']==valid_id]['encoding'].values[0]
mask = rle2mask(encoding, (w, h))
# -
mask = cv2.resize(mask, dsize=None, fx=cfg.DATASET.IMG_SCALE, fy=cfg.DATASET.IMG_SCALE, interpolation = cv2.INTER_AREA)
h, w = mask.shape
mask_pred = to_mask(
res['mask_tile'],
res['coord'],
h,
w,
cfg.DATASET.TRAIN_TILE_SIZE
)
dice_coefficient(mask, mask_pred)
from models.model import build_model
from data_builder.transforms import get_valid_transform
import albumentations as A
import torch
model = build_model(cfg)
ckpt = torch.load('/home/giorgio/Scrivania/Kaggle/hubmap/experiments/resnet34/2020-12-29/unet_best.ckpt')
model.load_state_dict(ckpt['model_state_dict'])
del ckpt
model = model.to(cfg.DEVICE)
model = model.eval()
tile_image = res['img_tile']
tile_image = np.stack(tile_image)[..., ::-1]
print(tile_image.shape)
tile_image = np.ascontiguousarray(tile_image.transpose(0,3,1,2))
print(tile_image.shape)
norm = A.Normalize()
# +
tile_image2 = res['img_tile']
tile_image2 = np.stack(tile_image2)[..., ::-1]
tile_image2 = norm(image=tile_image2)['image']
tile_image2 = np.ascontiguousarray(tile_image2.transpose(0,3,1,2))
print(tile_image2.shape)
# +
batch = np.array_split(tile_image2, len(tile_image2)//4)
tile_prob = []
#itero per tutti i batch
for num, imgs in enumerate(batch):
imgs = torch.from_numpy(imgs).to(cfg.DEVICE)
with torch.no_grad():
y_hat = model(imgs)
prob = torch.sigmoid(y_hat)
tile_prob.append(prob.detach().cpu().numpy())
# -
tile_probz = np.concatenate(tile_prob).squeeze()
mask_pred = to_mask(
tile_probz,
res['coord'],
h,
w,
cfg.DATASET.TRAIN_TILE_SIZE
)
mask_pred.shape
for thr in np.linspace(0, 1, 21):
predict = (mask_pred>thr).astype(np.float32)
print(thr, dice_coefficient(predict, mask))
| notebooks/check_tile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## matplotlib (plotting) and SciPy (scientific algorithms) libraries
# The SciPy library provides an emormous variety of scientific algorithms, exposed in the Python language,
# but often written in low-level languages like C and Fortran to empower us with the fastest performance.
# Many of these algorithms are too "heavy" to be implemented in the basic NumPy library; however, SciPy
# is built upon the data structures and operations enabled by NumPy and the two libraries are often
# used side-by-side.
#
# Let's start with a common problem in many scientific disciplines -- calculating the Voronoi diagram
# for a set of data points. Since this problem falls into the category of "computational geometry,"
# the SciPy developers have placed it in the scipy.spatial submodule with other spatial algorithms
# and data structures.
import numpy as np
import scipy
import scipy.spatial
import matplotlib
# %matplotlib inline
# NOTE: can also use %matplotlib notebook to gain access to interactive plots in notebook
import matplotlib.pyplot as plt
# we start off with a set of random points, which may for example represent
# standing trees in a forest after a forest fire event
tree_positions = np.random.randn(15, 2)
# +
# at this point, it is already helpful to first take a look at the positions of the trees on the 2D "map"
# let's start using matplotlib for this work
# then create an axis object (which can be used for adding the data and labels)
f, ax = plt.subplots(1, 1)
# scatter plot the 2D tree position data
ax.scatter(tree_positions[...,0], # x coordinates
tree_positions[...,1]) # y coordinates
ax.set_title("Trees Remaining After Forest Fire")
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_xlim(-3, 3)
ax.set_ylim(-3, 3);
# +
# The Voronoi diagram will tell us which parts of the forest are closest to which tree
# This can be used i.e., as an estimate of the amount of area occupied by each tree (the area around it)
vor = scipy.spatial.Voronoi(tree_positions)
# it is such a common operation to plot the Voronoi diagram of a set of 2D generators
# that SciPy can plot the diagram (using matplotlib under the hood) directly
# from the vor object above
fig_vor = scipy.spatial.voronoi_plot_2d(vor)
axis = fig_vor.get_axes()[0]
axis.set_xlim(-3, 3)
axis.set_ylim(-3, 3);
# +
# let's take a step back though & see if we can generate a similar plot using the
# vor object and matplotlib (instead of using voronoi_plot_2d directly), as an
# exercise in learning about matplotlib and SciPy
# the blue points above are the "generators," while the orange points are the Voronoi
# vertices bounding the Voronoi regions
f, ax_manual = plt.subplots(1, 1)
# there's a convenient way to access the Voronoi vertices in SciPy
vor_vertices = vor.vertices
ax_manual.scatter(vor_vertices[...,0], # x coords
vor_vertices[...,1], # y coords
color='orange')
# to connect the Voronoi vertices into the Voronoi edges (the polygon
# edges that enclose Voronoi regions) we can use the "ridges:""
vor_ridges = vor.ridge_vertices
# the above ridges are actually indices of Voronoi vertices, so we
# will iterate through and plot accordingly
for edge in vor_ridges:
if -1 in edge:
# some edges can continue to infinity
# those are dashed lines in voronoi_plot_2d, but let's
# ignore them here
continue
edge_start = vor_vertices[edge[0]]
edge_end = vor_vertices[edge[1]]
ax_manual.plot([edge_start[0], edge_end[0]], # the two x coords
[edge_start[1], edge_end[1]], # the two y coords
color='black')
ax_manual.set_xlim(-3, 3)
ax_manual.set_ylim(-3, 3)
# let's add the generators back in as well, to facilitate comparison
# with plot above
ax_manual.scatter(tree_positions[...,0], # x coordinates
tree_positions[...,1], # y coordinates
color='blue');
# -
# So, the plots look pretty similar whether we use matplotlib manually in conjunction with SciPy or if we use the the built-in convenience function voronoi_plot_2d()
# +
# if we instead wanted to calculate the area of the entire forest we could do that quite easily
# with SciPy as well by wrapping all the trees with an "elastic band" (the Convex Hull)
hull = scipy.spatial.ConvexHull(tree_positions)
forest_area = hull.area
forest_area
# +
# to confirm the elastic band nature of the Convex Hull, let's plot it using
# matplotlib as usual
hull_fig, hull_ax = plt.subplots(1, 1)
for simplex in hull.simplices:
hull_ax.plot(hull.points[simplex, 0],
hull.points[simplex, 1],
'-',
lw=6)
# and restore scatter of the tree positions as well
hull_ax.scatter(tree_positions[...,0], # x coordinates
tree_positions[...,1], # y coordinates
color='black',
s=200);
# -
# Now, perhaps we've discovered that the region affected by the forest fire can actually be estimated as the area between
# a curve defined by a function and a roughly straight line ocean / coastal boundary.
#
# Let's say this may be expressed as the following definite integral:
#
# $$\int_{-3}^{3} (x^2 + 5x + 30) \,dx$$
# +
# if we want to estimate the numerical result of that definite integral (area affected by forest fire)
# we'll want to use scipy.integrate.quad()
import scipy.integrate
# start by defining the function of interest
def forest_func(x):
return x ** 2 + (5 * x) + 30
# call quad() using the function name and the limits of the definite integral
area_estimate, upper_bound_error = scipy.integrate.quad(forest_func, -3, 3)
area_estimate, upper_bound_error
# +
# let's plot the function over the limits of integration and shade in the
# area we just estimated
from matplotlib.patches import Polygon
fig_integrate, ax_integrate = plt.subplots(1, 1)
# plot the function over a slightly wider limit range for clarity
x = np.linspace(-5, 5)
y = forest_func(x)
ax_integrate.plot(x, y, 'black')
# for the shaded region
# see: https://matplotlib.org/examples/showcase/integral_demo.html
ix = np.linspace(-3, 3)
iy = forest_func(ix)
verts = [(-3, 0)] + list(zip(ix, iy)) + [(3, 0)]
poly = Polygon(verts, facecolor='blue', edgecolor='blue', alpha=0.3)
ax_integrate.add_patch(poly);
# -
# If we want to find the point on the curve (forest region) that is closest to the ocean / coastal boundary we might
# want to find the minimum of the function we just integrated. There are various ways to do this, but for demonstration
# purposes let's try to *minimize* our function using SciPy. Specifically, we'll use `scipy.optimize.minimize`
# +
import scipy.optimize
# this is a pretty naive optimization (I rarely use scipy.optimize)
# we haven't specified the algorithm to use and so on
# but maybe that's a good thing for clarity anyway
optimization_result = scipy.optimize.minimize(fun=forest_func,
x0=250) # perhaps we're really bad at guessing the solution!
optimization_result
# -
# `x` is the solution of the minimization / optimization, and `x = -2.5` looks about right for the minimum of our function above
#
# likewise, `fun` is the `y` value of our "objective function" at the minimum `x` value; again, the value of `23.75` looks about right based on visual inspection of this quadratic
# Now, let's say we want to take a close look at how our forest boundary is defined based on the set of discrete data points we have access to.
# Perhaps we've discovered that we actually only have about 10 data points, which isn't very many
# +
x = np.linspace(-5, 5, num=10)
y = forest_func(x)
# let's try two types of interpolation to connect our boundary points
import scipy.interpolate
# we first generate interpolation functions, which can then
# operate on i.e., a denser range of x values
f_linear = scipy.interpolate.interp1d(x, y)
f_cubic = scipy.interpolate.interp1d(x, y, kind='cubic')
x_new = np.linspace(-5, 5, num=15) # denser range for the interpolation plots
# plot the interpolations
# see: https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
fig_interp, ax_interp = plt.subplots(1, 1, figsize=(8, 8))
ax_interp.plot(x, y, 'o',
x_new, f_linear(x_new), '-',
x_new, f_cubic(x_new), '--')
ax_interp.legend(['data', 'linear', 'cubic'], loc='best', fontsize=20)
# focus on minimum of the quadratic to emphasize interpolation differences
ax_interp.set_ylim(20, 32);
# -
# So, perhaps cubic interpolation does a slightly better job of approximating the function in this case.
# Let's imagine we've now been directed to study the behavioral / environmental impact of the forest fire in the affected area.
#
# Since the forest in question normally has two primary animals that generate noise at regular intervals during nighttime hours,
# we've gone ahead and recorded some audio data from several evenings. We'd now like to convert this periodic / time-domain
# data to frequency-based data so that we can confirm the species of animals generating the noises.
# +
# for this particular application (converting periodic time domain data to frequency domain data, Fourier transforms are appropriate)
import scipy.fftpack
# example inspired by: https://docs.scipy.org/doc/scipy/reference/tutorial/fftpack.html
n_samples = 600
sample_spacing = 1.0 / 800.0
time_values = np.linspace(0.0, n_samples * sample_spacing, n_samples)
recorded_values = np.sin(50.0 * 2.0*np.pi*time_values) + 0.5*np.sin(80.0 * 2.0*np.pi*time_values)
# convert to frequency domain data using a Fast Fourier Transform
y_freq = scipy.fftpack.fft(recorded_values)
x_freq = np.linspace(0.0, 1.0/(2.0 * sample_spacing), n_samples // 2)
fig_fft, ax_fft = plt.subplots(1, 1)
# typically only the positive fft values are plotted
ax_fft.plot(x_freq,
2.0 / n_samples * np.abs(y_freq[0:n_samples//2]))
ax_fft.grid()
ax_fft.set_xlabel('Frequency', fontsize=20)
ax_fft.set_ylabel('Intensity', fontsize=20);
# -
# Perhaps we could identify the two animal species that remain after the forest fire based on the audio frequency data above.
# ### Windowing
plt.plot(time_values, recorded_values);
print(recorded_values[0], recorded_values[-1])
# Note that there is a discontinuity at the end of our signal. The FFT is used to model periodic signals, so this discontinuity introduces artifacts in the spectrum. We can multiply the signal with a windowing function, that forces it to zero at the ends, which gets rid of this problem, at the cost of some signal magnitude.
# +
import scipy.signal
window = scipy.signal.windows.hann(len(recorded_values))
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 5))
ax0.plot(window)
ax0.set_title('Hann window')
ax1.plot(time_values, window * recorded_values);
ax1.set_title('Windowed signal');
# +
y_freq = scipy.fftpack.fft(window * recorded_values)
x_freq = np.linspace(0.0, 1.0/(2.0 * sample_spacing), n_samples // 2)
fig_fft, ax_fft = plt.subplots(1, 1)
# typically only the positive fft values are plotted
ax_fft.plot(x_freq,
2.0 / n_samples * np.abs(y_freq[0:n_samples//2]))
ax_fft.grid()
ax_fft.set_xlabel('Windowed spectrum', fontsize=20)
ax_fft.set_ylabel('Intensity', fontsize=20);
# -
# Note, in the figure above, that you get better localisation in the spectrum, but that the peak magnitudes are lower.
# ---
#
# It turns out that a "citizen scientist" has managed to capture a photo of one of these suspected animal species, and while
# we think we know what it is, we'd eventually like to automate the process of classifying images submitted by citizens who live
# nearby. Perhaps we have a colleague who would like the image data processed through edge filtering, so that their special
# algorithm can just work on the edge data for classification.
#
# So, we will try to edge filter our data using some functionality from the SciPy signal processing submodule, `scipy.signal`
# +
# inspired by SciPy signal processing tutorial content: https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html
import scipy.misc
import scipy.signal
# here is the image we are working with:
image = scipy.misc.face(gray=True).astype(np.float32)
fig_image, ax_image = plt.subplots(1, 1)
ax_image.imshow(image, cmap='gray');
# -
# determine the B-spline interpolation coefficients to be used for edge filtering
coefs = scipy.signal.cspline2d(image,
8.0) # lambda specifies the amount of "smoothing"
coefs.shape, image.shape
# +
# define the so-called "separation filter"
derfilt = np.array([1.0, -2, 1.0], dtype=np.float32)
# we now effectively calculate a second derivative to get the important / transition "edges" from the original image
deriv = (scipy.signal.sepfir2d(coefs, derfilt, [1]) + scipy.signal.sepfir2d(coefs, [1], derfilt))
fig_image, ax_image = plt.subplots(1, 1)
ax_image.imshow(deriv, cmap='gray');
# -
# Now we can send the `deriv` array to our colleague for processing (perhaps by pickling it, or using `np.savetxt` -- which may be more portable)
# Now, let's say that we have a dataset containing the number of animal sightings reported per day, and we hypothesize that
# the number of reported sightings is more or less randomly distributed. There are probably various ways to analyze the data,
# but let's focus on an initial visual assessment that combines `scipy.stats` with `matplotlib`
# +
import scipy.stats
fig_stats, ax_stats = plt.subplots(1, 1)
# start off by generating the distribution of random animal sightings reported
animal_sightings = scipy.stats.norm.rvs(loc=5, size=1000) # mean is 5 animal sightings reported per day
# plot histogram of the normally distributed random data
ax_stats.hist(animal_sightings, density=True, histtype='stepfilled', alpha=0.2)
# let's see if the probability density of the animal sightings matches the probability density function
# for random variates
x = np.linspace(0, 10, 300)
ax_stats.plot(x, scipy.stats.norm.pdf(x, loc=5), 'k-', lw=2, label='frozen pdf');
# -
# Looks like a sensible match!
# ## 3D plotting
# +
# here's a small example of 3d plotting in matplotlib
# matplotlib is generally best-suited to 2d plots, but does have some
# 3d support
# initial set up
from mpl_toolkits.mplot3d import Axes3D
fig_3d = plt.figure()
ax_3d = fig_3d.add_subplot(111, projection='3d')
# try scattering some points in 3D
np.random.seed(123)
vals = np.random.randn(20, 3)
ax_3d.scatter(vals[...,0],
vals[...,1],
vals[...,2],
c = 'black')
fig_3d.set_size_inches(8, 8)
# -
# The above gave a taste of some of the things that SciPy offers, and the way that matplotlib can be used to generate plots in conjunction with SciPy workflows. Let's now turn our attention to a practice exercise to try applying what we've just learned about SciPy.
# ## Practice Exercise
#
# You're an engineer tasked with assessing the quality of ball bearings and you know that perfect spheres are ideal. To assess sphericity,
# you can use the following formula:
#
# $\psi = \frac{{\pi}^{1/3}{(6V_{p})}^{2/3}}{A_{p}}$
#
# where $\psi$ is sphericity (perfect sphere has a value of `1`; all other polyhedra have values $<1$)
#
# $V_p$ is the volume of the polyhedron (ball bearing), and $A_p$ is its surface area
#
# Given the array of 3D surface coordinates for the ball bearing provided below,
# determine the value for its sphericity.
# **NOTE**: this should have clarified that some of the points are not on the surface, but that the surface
# has been well-sampled
#
# If you manage to calculate $\psi$, try plotting the 3D polyhedron (ball bearing surface coords)
# to see if the sphericity value matches with a visual inspection.
#
# Hint: for 3D plots use `from mpl_toolkits.mplot3d import Axes3D` and `add_subplot(111, projection='3d')`
ball_bearing_surface_arr = np.loadtxt('sphericity_exercise_vertices.out')
# ## Solution:
# +
# we assume that the convex hull is a reasonable representation of the outer surface of the object
hull = scipy.spatial.ConvexHull(ball_bearing_surface_arr)
# luckily, ConvexHull has area and volume attributes:
area = hull.area
volume = hull.volume
# let's define a function to calculate sphericity using the Volume and Area values calculated from above
def calc_sphericity(area, volume):
"""Calculate and return the sphericity of a polyhedron given its surface area and volume."""
numerator = (np.pi ** (1. / 3.)) * ((6 * volume) ** (2. / 3.))
denominator = area
sphericity = numerator / denominator
return sphericity
# use the new function to determine the sphericity of the ball bearing
sphericity = calc_sphericity(area=area,
volume=volume)
# display the result
sphericity
# +
# the above result for sphericity suggests a relatively round object, but certainly not a perfect sphere
# let's produce a 3D scatter plot as a crude visual confirmation of this finding
# set up for 3D plotting with matplotlib
from mpl_toolkits.mplot3d import Axes3D
fig_solution = plt.figure()
ax_solution = fig_solution.add_subplot(111, projection='3d')
# scatter plot the ball bearing coordinates
ax_solution.scatter(ball_bearing_surface_arr[...,0], # all x values
ball_bearing_surface_arr[...,1], # all y values
ball_bearing_surface_arr[...,2], # all z values
color='black')
# add a title and some axis labels
fontsize=20
ax_solution.set_title('Ball Bearing Coordinates',fontsize=fontsize)
ax_solution.set_xlabel('x', fontsize=fontsize)
ax_solution.set_ylabel('y', fontsize=fontsize)
ax_solution.set_zlabel('z', fontsize=fontsize)
# increase the size of the figure
fig_solution.set_size_inches(10, 10)
# -
| notebooks/matplotlib_SciPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Algorithm for deriving moment equations
# $$
# \def\n{\mathbf{n}}
# \def\x{\mathbf{x}}
# \def\N{\mathbb{\mathbb{N}}}
# \def\X{\mathbb{X}}
# \def\NX{\mathbb{\N_0^\X}}
# \def\C{\mathcal{C}}
# \def\Jc{\mathcal{J}_c}
# \def\DM{\Delta M_{c,j}}
# \newcommand\diff{\mathop{}\!\mathrm{d}}
# \def\Xc{\mathbf{X}_c}
# \newcommand{\muset}[1]{\dot{\{}#1\dot{\}}}
# $$
# For a compartment population $\n \in \NX$ evolving stochastically according to stoichiometric equations from transition classes $\C$, we want to find an expression for
# $$
# \frac{\diff}{\diff t}\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>
# $$
# in terms of expectations of population moments $M^\alpha, M^{\beta}, \ldots$
# ### (1)
# From the definition of the compartment dynamics, we have
# $$
# \diff M^\gamma = \sum_{c \in \C} \sum_{j \in \Jc} \DM^\gamma \diff R_{c,j}
# $$
# We apply Ito's rule to derive
# $$
# \diff f(M^\gamma, M^{\gamma'}, \ldots) = \sum_{c \in \C} \sum_{j \in \Jc}
# \left(
# f(M^\gamma + \DM^\gamma, M^{\gamma'} + \DM^{\gamma'}, \ldots)
# - f(M^\gamma, M^{\gamma'}, \ldots)
# \right) \diff R_{c,j}
# $$
# Assume, that $f(M^\gamma, M^{\gamma'}, \ldots)$ is a polynomial in $M^{\gamma^i}$ with $\gamma^i \in \N_0^D$.
#
# Then $\diff f(M^\gamma, M^{\gamma'}, \ldots)$ is a polynomial in $M^{\gamma^k}, \DM^{\gamma^l}$ with $\gamma^k, \gamma^l \in \N_0^D$, that is,
# $$
# \diff f(M^\gamma, M^{\gamma'}, \ldots) = \sum_{c \in \C} \sum_{j \in \Jc}
# \sum_{q=1}^{n_q} Q_q(M^{\gamma^k}, \DM^{\gamma^l})
# \diff R_{c,j}
# $$
# where $Q_q(M^{\gamma^k}, \DM^{\gamma^l})$ are monomials in $M^{\gamma^k}, \DM^{\gamma^l}$.
# ### (2)
# Let's write $Q_q(M^{\gamma^k}, \DM^{\gamma^l})$ as
# $$
# Q_q(M^{\gamma^k}, \DM^{\gamma^l}) = k_q \cdot \Pi M^{\gamma^k} \cdot \Pi M^{\gamma^k}
# $$
# where $k_q$ is a constant,
# $\Pi M^{\gamma^k}$ is a product of powers of $M^{\gamma^k}$, and
# $\Pi \DM^{\gamma^l}$ is a product of powers of $\DM^{\gamma^l}$.
#
# Analogous to the derivation in SI Appendix S.3, we arrive at the expected moment dynamics
# $$
# \frac{\diff\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>}{\diff t} =
# \sum_{c \in \C} \sum_{q=1}^{n_q} \left<
# \sum_{j \in \Jc} k_q \cdot \Pi M^{\gamma^k} \cdot \Pi \DM^{\gamma^k} \cdot h_{c,j}(\n)
# \right>
# $$
# ### (3)
# Analogous to SI Appendix S.4, the contribution of class $c$, monomial $q$ to the expected dynamics of $f(M^\gamma, M^{\gamma'}, \ldots)$ is
# $$
# \begin{align}
# \frac{\diff\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>}{\diff t}
# &= \left<
# {\large\sum_{j \in \Jc}} k_q \cdot \Pi M^{\gamma^k} \cdot \Pi \DM^{\gamma^l} \cdot h_{c,j}(\n)
# \right>
# \\
# &= \left<
# {\large\sum_{\Xc}} w(\n; \Xc) \cdot k_c \cdot k_q \cdot \Pi M^{\gamma^k} \cdot g_c(\Xc) \cdot
# \left<
# \Pi \DM^{\gamma^l} \;\big|\; \Xc
# \right>
# \right>
# \end{align}
# $$
#
# ### (4)
# Let's consider the expression $A = \sum_{\Xc} w(\n; \Xc) \cdot l(\n; \Xc)$ for the following cases of reactant compartments:
# $\Xc = \emptyset$,
# $\Xc = \muset{\x}$, and
# $\Xc = \muset{\x, \x'}$.
#
# (1) $\Xc = \emptyset$:
#
# Then $w(\n; \Xc) = 1$, and
# $$
# A = l(\n)
# $$
#
# (2) $\Xc = \muset{\x}$:
#
# Then $w(\n; \Xc) = \n(\x)$, and
# $$
# A = \sum_{\x \in \X} \n(\x) \cdot l(\n; \muset{\x})
# $$
#
# (3) $\Xc = \muset{\x, \x'}$:
#
# Then
# $$
# w(\n; \Xc) = \frac{\n(\x)\cdot(\n(\x')-\delta_{\x,\x'})}
# {1+\delta_{\x,\x'}},
# $$
# and
# $$
# \begin{align}
# A &= \sum_{\x \in \X} \sum_{\x' \in \X}
# \frac{1}{2-\delta_{\x,\x'}}
# \cdot w(\n; \Xc) \cdot l(\n; \muset{\x, \x'}) \\
# &= \sum_{\x \in \X} \sum_{\x' \in \X}
# \frac{\n(\x)\cdot(\n(\x')-\delta_{\x,\x'})}{2}
# \cdot l(\n; \muset{\x, \x'}) \\
# &= \sum_{\x \in \X} \sum_{\x' \in \X}
# \n(\x)\cdot\n(\x') \cdot \frac{1}{2}l(\n; \muset{\x, \x'})
# \: - \:
# \sum_{\x \in \X}
# \n(\x) \cdot \frac{1}{2}l(\n; \muset{\x, \x})
# \end{align}
# $$
# ### (5)
# Now let
# $$
# l(\n; \Xc) = k_c \cdot k_q \cdot \Pi(M^{\gamma^k}) \cdot g_c(\Xc) \cdot
# \left<
# \Pi \DM^{\gamma^l} \;\big|\; \Xc
# \right>
# $$
#
# Plugging in the concrete $\gamma^l$ and expanding, $l(\n; \Xc)$ is a polynomial in $\Xc$.
#
# Monomials are of the form $k \x^\alpha$ or $k \x^\alpha \x'^\beta$ with $\alpha, \beta \in \N_0^D$.
# (Note that occurences of $\Pi M^{\gamma^k}$ are part of the constants $k$.)
#
# Consider again the different cases of reactant compartments $\Xc$:
#
# (1) $\Xc = \emptyset$:
# $$
# \frac{\diff\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>}{\diff t}
# = \left<l(\n)\right>
# $$
#
# (2) $\Xc = \muset{\x}$:
# $$
# \frac{\diff\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>}{\diff t}
# = \left<R(l(\n; \muset{\x})\right>
# $$
# where $R$ replaces all $k \x^\alpha$ by $k M^\alpha$.
#
# (3) $\Xc = \muset{\x, \x'}$:
# $$
# \frac{\diff\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>}{\diff t}
# = \frac{1}{2}\left<R'(l(\n; \muset{\x, \x'})\right>
# \: - \:
# \frac{1}{2}\left<R(l(\n; \muset{\x, \x})\right>
# $$
# where $R'$ replaces all $k \x^\alpha \x'^\beta$ by $k M^\alpha M^\beta$,
# and again $R$ replaces all $k \x^\alpha$ by $k M^\alpha$.
#
#
#
# ### (6)
# Finally, sum over contributions from all $c$, $q$ for the total
# $$
# \frac{\diff\left< f(M^\gamma, M^{\gamma'}, \ldots) \right>}{\diff t}
# $$
| (EXTRA) Algorithm overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="m09hVT9BGjZn"
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# + id="WLKjoGlIWUSs"
data=pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
# + colab={"base_uri": "https://localhost:8080/"} id="Vaq0s_K9U5EZ" outputId="9a5aa748-68ac-4c80-d1c1-b1cc1d0072df"
y=data.SalePrice
y.shape
# + colab={"base_uri": "https://localhost:8080/"} id="MhkQ4KNdY8P5" outputId="e51c5bd0-c170-4aee-b9ff-e78902893a2e"
test.shape
# + id="J771jVu_kDp8"
x=data.drop(labels=['SalePrice'],axis=1,)
# + colab={"base_uri": "https://localhost:8080/", "height": 352} id="wAZB0GX_mxm9" outputId="cf103081-9280-40ea-dfd3-b00e084f932e"
import seaborn as sns
sns.heatmap(x.isnull())
# + colab={"base_uri": "https://localhost:8080/"} id="SIFL-7Olmxrq" outputId="705a15f0-9b22-4d52-ca2a-69be6c4b94c9"
cdrop=['Alley','FireplaceQu','PoolQC','MiscFeature','Fence']
x.drop(cdrop,axis=1,inplace=True)
x.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="rkLwOwF8h5bs" outputId="ab31a2ac-fa9f-49b7-ca7c-56a05607dafe"
import seaborn as sns
sns.heatmap(x.isnull())
# + id="8CiosMZ7uADo"
cmean=['GarageYrBlt', 'MasVnrArea','LotFrontage']
for a in cmean:
x[a]=x[a].fillna(x[a].mean())
# + id="8fwpFUnLvZnD"
sns.heatmap(x.isnull())
# + id="SHhJVMeVvz8x"
x.info()
# + id="ZtVRHwI8evu9"
# + id="E24Dj_7GwPOq"
cmode=['BsmtQual','MasVnrType','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','Electrical','GarageType','GarageFinish','GarageQual','GarageCond']
for a in cmode:
x[a]=x[a].fillna(x[a].mode()[0])
# + id="w8S4utk1flGv"
x.info()
sns.heatmap(x.isnull())
# + id="2iXpiMQtfwrp"
cfeature= [ colunm for column in x.columns
if type(column) is (int or float)]
# + colab={"base_uri": "https://localhost:8080/"} id="f15SyFVshXPa" outputId="ef031809-78ec-497f-b23a-12c82e05dfb3"
for a in x.iloc[1,:]:
print (a)
# + colab={"base_uri": "https://localhost:8080/"} id="1I3kEZE-pCMC" outputId="ebb1a495-725f-42a2-a1db-31c38ce501d0"
x.shape
# + colab={"base_uri": "https://localhost:8080/"} id="AoYhAhLEvgK2" outputId="cc83c620-0c12-40cd-e1ba-1d8cecde9e70"
cdrop=['Alley','FireplaceQu','PoolQC','MiscFeature','Fence']
test.drop(cdrop,axis=1,inplace=True)
test.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="toh6sNZCuaQO" outputId="68665a74-0b81-434c-da8a-21c3c7e9a9b8"
sns.heatmap(test.isnull())
test.info()
# + id="_K2930KzvSmG"
cmean=['LotFrontage','GarageYrBlt']
for a in cmean:
test[a]=test[a].fillna(test[a].mean())
# + id="lYSeanfExsrf"
for a in test.columns:
test[a]=test[a].fillna(test[a].mode()[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="lcRQyVjJyBdW" outputId="05f7bc87-1d74-4b0c-a70f-e143254e173a"
sns.heatmap(test.isnull())
# + colab={"base_uri": "https://localhost:8080/"} id="L9PYNDn6yHkL" outputId="0345f090-f0cc-439b-dcd4-bb6977360e63"
test.shape
# + id="FsVhAjmDrC6g"
columns=['MSZoning','Street','LotShape','LandContour','Utilities','LotConfig','LandSlope','Neighborhood',
'Condition2','BldgType','Condition1','HouseStyle','SaleType',
'SaleCondition','ExterCond',
'ExterQual','Foundation','BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2',
'RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType','Heating','HeatingQC',
'CentralAir',
'Electrical','KitchenQual','Functional',
'GarageType','GarageFinish','GarageQual','GarageCond','PavedDrive']
# + id="ugMzgsP1y3vH"
xmain=x.copy()
# + id="VqNEqJaLzKDE"
x=pd.concat([x,test],axis=0)
# + colab={"base_uri": "https://localhost:8080/"} id="yAUN5nZ8Esp4" outputId="f254f9ac-bcfe-46ef-b861-1c7c8a152d16"
test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="Btmj8iykzY4A" outputId="1a8fb107-ace4-4a65-e53f-862726891df0"
x.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="lYJ_EExXFH_a" outputId="f1cc384b-ca80-4f90-f8cf-f99799f96154"
sns.heatmap(test.isnull())
# + id="q6uqO7ISDRro"
temp=x
for col in columns:
col_temp=pd.get_dummies(x[col],prefix=col)
temp=pd.concat([temp,col_temp],axis=1).drop(col,axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="RAaLpsThGenf" outputId="dfc83559-35df-4b2a-dc1a-2cf05341b7da"
temp.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 461} id="Xb0NKnKlJCC-" outputId="51321106-04ba-4069-a1aa-9a866f759e9c"
temp
# + id="73r59-QVN9sJ"
xtrain=temp.iloc[:1460,:]
xtest=temp.iloc[1460:,:]
# + colab={"base_uri": "https://localhost:8080/"} id="r0d2aetnT36a" outputId="02b4ef69-2bd9-4aa1-f57e-7657c799bdd8"
xtrain.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 461} id="hclAm934VHX1" outputId="a05a847e-ec78-49b7-f5d5-ce4ca562fc20"
xtrain.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 394} id="4rgxpB-BVSxW" outputId="fc8dbb46-9d7c-44e8-92ad-2062212439e5"
import seaborn as sns
corrmat = xtrain.corr()
sns.heatmap(corrmat)
# + id="fIC7jTVWWI8D"
def correlation(dataset, threshold):
col_corr = set()
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if abs(corr_matrix.iloc[i, j]) > threshold:
colname = corr_matrix.columns[i]
col_corr.add(colname)
return col_corr
# + colab={"base_uri": "https://localhost:8080/"} id="HG315aLtV9We" outputId="bb062c91-718a-43f0-9393-35d3cd60ddc3"
corr_features = correlation(xtrain, 0.9)
len(set(corr_features))
# + colab={"base_uri": "https://localhost:8080/", "height": 461} id="n8-K1J3PWY0I" outputId="82103247-de61-42c1-89f5-61c4a0b4cc88"
xtrain.drop(corr_features,axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 395} id="2B9h2batXVkZ" outputId="897c5cde-f7f2-4651-c8c7-80c41b8dc9ed"
sns.heatmap(xtrain.isnull())
# + colab={"base_uri": "https://localhost:8080/"} id="dSANXqDaXfTm" outputId="aa29f3a7-24e9-4000-fcbe-f330b3d27812"
xtrain.info()
# + colab={"base_uri": "https://localhost:8080/"} id="aAU3E0y5WdVD" outputId="4dd8b246-6e91-4a59-b83f-59eceb1ba5fa"
from sklearn.ensemble import RandomForestRegressor
regression = RandomForestRegressor()
regression.fit(xtrain,y)
# + id="RQjlLYnxYXbi"
predictions = regression.predict(xtest)
# + colab={"base_uri": "https://localhost:8080/"} id="5f1CRiYPY1Z1" outputId="56591bac-f10a-4078-f9c3-526c56a87833"
predictions.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 423} id="NgpS5fffYyVW" outputId="6c228473-fa1f-4dc6-f87f-e521507c9577"
output = pd.DataFrame({'Id': test.Id,
'SalePrice': predictions})
output.to_csv('submission.csv', index=False)
output
| Python/house_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook objective
# This notebook's objective is to explore the dataset provided by the Whereismytransport team, in order to understand how to use this to complete the exercise.
# Data source: https://drive.google.com/drive/folders/1akT7jbXYy2-zLHEtnK3q6vqcQ-JdRO9H
# # Preparation
# ## Import required libraries
import numpy as np
import pandas as pd
import os
import seaborn as sns
# ## Folder structure
input_folder = '../data/raw/'
output_folder = '../reports/figures/'
# ## Data import
alerts_df = pd.read_csv(input_folder + 'alerts.csv')
alerts_df.head(2)
# +
dateparse = lambda x: datetime.strptime(x, '%Y%m%d')
notifications_df = pd.read_csv(input_folder + 'notfications.csv', parse_dates=['event_date'])
notifications_df.head(2)
# -
# # Exploratory analysis
# ## alerts
# Calculate the missing data % in training data
missing_data = alerts_df.isna().sum() / alerts_df.isna().count() * 100
missing_data[missing_data > 0].sort_values(ascending=False)
alerts_df[alerts_df['published_at'].isnull()]
# The data shows that the column 'published_at' is missing 0.47% of values. Considering the small sample without these values, I will likely drop them from this analysis.
#
# If the objective of this exercise was to create a system that can be applied to customers, I would've recommended an internal investigation to find out why those values are missing.
# Visualise missing data
sns.heatmap(alerts_df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
alerts_df.dtypes
alerts_df['document_id'].nunique()
# Based on how the data looks, document_id is the unique message identifier. Based on this assumption, there are 8564 unique messages.
alerts_df['description'].nunique()
# Interestingly, when looking at the messages, there are only 8515 unique messages. This could mean that there are 46 message ids with a non-unique message.
dups_df = alerts_df[['document_id', 'description']].copy()
# concatenate messages and id
dups_df['concat'] = dups_df['document_id'] + dups_df['description']
# keep unique combinations
dups_df = dups_df[dups_df.duplicated(subset='concat', keep='first')]
# Select most occuring message as example
most_occuring = dups_df['description'].value_counts().index.tolist()[0]
dups_df[dups_df['description'] == most_occuring][['document_id', 'description']]
# There are quite a few duplicate messages with different document_id. Depending on the required outcome, it may be useful to recode the document id into unique values.
alerts_df['published_at'].describe()
# The datetime field is not in a format that is easy for Python to recognise and currently considered a string type. For Python to recognise the column as a date field, a data transformation needs to happen.
# Make date field a pandas date type
alerts_df['published_at'] = alerts_df['published_at'].str[:19]
alerts_df['published_at'] = pd.to_datetime(alerts_df['published_at'])
alerts_df['published_at']
# Describe the date field
alerts_df['published_at'].describe()
# Looking at the datetime column, the data in this dataset contains six months, from Jan 1st until July 7th 2021.
# # Data exploration
notifications_df.head()
# Having a quick look at this dataset shows that it contains data on actions which took place after the message was sent. This table can likely be joined with the 'join_key_value' column.
notifications_df.shape
# The dataste contains 6 columns and ~2.3 million rows.
# Calculate the missing data % in training data
missing_data = notifications_df.isna().sum() / notifications_df.isna().count() * 100
missing_data[missing_data > 0].sort_values(ascending=False)
notifications_df[notifications_df['join_key_value'].isnull()]
# This data shows that the column 'join_key_value' is missing 2% of values. Considering the small sample without these values, I will likely drop them from this analysis.
#
# If the objective of this exercise would be to create a system that can be applied to customers, I would've recommended an internal investigation to find out why those values are missing.
# Visualise missing data
sns.heatmap(notifications_df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
notifications_df['event_date'].describe()
# This dataset contains one more day than the alerts dataset (2021-07-08). This will likely be useful, as it may take some time for customers to take any action on the message they receive.
# Recode similar event names
notifications_df['event_name'].replace('notification_received',
'notification_receive', inplace=True)
notifications_df['event_name'].replace('notification_opened',
'notification_open', inplace=True)
notifications_df['event_name'].value_counts()
# Looking at the event type column, there are quite a few similar events. According to the exercise description, similar exercises can be combined.
notifications_df['event_name'].value_counts()
# After recoding, we can see 5 types of notifications. The receive notification is the most common category, which makes sense as most users will receive a message.
#
# Also, not all users who received a notification have any follow-up event. Some users may not have interacted based on the message they received or they may not have received any messages.
# Looking at the notifications per user, 8ac62 is the most active one with 5750 notifications.
notifications_df[notifications_df['user_id'] == '8ac62']['event_name'].value_counts()
# When looking at the notifications for that user, interestingly only 52 messages were opened and most were dismissed (93%).
# # Next
# Now that we have a fairly good idea on which data this dataset contains, the next step is to create the customer segments as per the requirements.
| notebooks/1 - Exploratory analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from PIL import Image
import numpy as np
import os
import cv2
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout, GaussianNoise
import pandas as pd
import sys
# %matplotlib inline
import matplotlib.pyplot as plt
import plotly.express as px
def readData(filepath, label):
cells = []
labels = []
file = os.listdir(filepath)
for img in file:
try:
image = cv2.imread(filepath + img)
image_from_array = Image.fromarray(image, 'RGB')
size_image = image_from_array.resize((50, 50))
cells.append(np.array(size_image))
labels.append(label)
except AttributeError as e:
print('Skipping file: ', img, e)
print(len(cells), ' Data Points Read!')
return np.array(cells), np.array(labels)
def reg_train(file):
print('Reading Training Data')
ParasitizedCells, ParasitizedLabels = readData(file + '/Parasitized/', 1)
UninfectedCells, UninfectedLabels = readData(file + '/Uninfected/', 0)
Cells = np.concatenate((ParasitizedCells, UninfectedCells))
Labels = np.concatenate((ParasitizedLabels, UninfectedLabels))
print('Reading Testing Data')
TestParasitizedCells, TestParasitizedLabels = readData('./input/fed/test/Parasitized/', 1)
TestUninfectedCells, TestUninfectedLabels = readData('./input/fed/test/Uninfected/', 0)
TestCells = np.concatenate((TestParasitizedCells, TestUninfectedCells))
TestLabels = np.concatenate((TestParasitizedLabels, TestUninfectedLabels))
s = np.arange(Cells.shape[0])
np.random.shuffle(s)
Cells = Cells[s]
Labels = Labels[s]
sTest = np.arange(TestCells.shape[0])
np.random.shuffle(sTest)
TestCells = TestCells[sTest]
TestLabels = TestLabels[sTest]
num_classes=len(np.unique(Labels))
len_data=len(Cells)
print(len_data, ' Data Points')
(x_train,x_test)=Cells, TestCells
(y_train,y_test)=Labels, TestLabels
# Since we're working on image data, we normalize data by divinding 255.
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
train_len=len(x_train)
test_len=len(x_test)
#Doing One hot encoding as classifier has multiple classes
y_train=keras.utils.to_categorical(y_train,num_classes)
y_test=keras.utils.to_categorical(y_test,num_classes)
#creating sequential model
model=Sequential()
model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(50,50,3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(500,activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(2,activation="softmax"))#2 represent output layer neurons
# model.summary()
# compile the model with loss as categorical_crossentropy and using adam optimizer
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Fit the model with min batch size as 50[can tune batch size to some factor of 2^power ]
model.fit(x_train, y_train, batch_size=100, epochs=5, verbose=1)
scores = model.evaluate(x_test, y_test)
print("Loss: ", scores[0]) #Loss
print("Accuracy: ", scores[1]) #Accuracy
#Saving Model
model.save("./output.h5")
return scores[1]
def gaussian_train(file, g):
print('Reading Training Data')
ParasitizedCells, ParasitizedLabels = readData(file + '/Parasitized/', 1)
UninfectedCells, UninfectedLabels = readData(file + '/Uninfected/', 0)
Cells = np.concatenate((ParasitizedCells, UninfectedCells))
Labels = np.concatenate((ParasitizedLabels, UninfectedLabels))
print('Reading Testing Data')
TestParasitizedCells, TestParasitizedLabels = readData('./input/fed/test/Parasitized/', 1)
TestUninfectedCells, TestUninfectedLabels = readData('./input/fed/test/Uninfected/', 0)
TestCells = np.concatenate((TestParasitizedCells, TestUninfectedCells))
TestLabels = np.concatenate((TestParasitizedLabels, TestUninfectedLabels))
s = np.arange(Cells.shape[0])
np.random.shuffle(s)
Cells = Cells[s]
Labels = Labels[s]
sTest = np.arange(TestCells.shape[0])
np.random.shuffle(sTest)
TestCells = TestCells[sTest]
TestLabels = TestLabels[sTest]
num_classes=len(np.unique(Labels))
len_data=len(Cells)
print(len_data, ' Data Points')
(x_train,x_test)=Cells, TestCells
(y_train,y_test)=Labels, TestLabels
# Since we're working on image data, we normalize data by divinding 255.
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
train_len=len(x_train)
test_len=len(x_test)
#Doing One hot encoding as classifier has multiple classes
y_train=keras.utils.to_categorical(y_train,num_classes)
y_test=keras.utils.to_categorical(y_test,num_classes)
#creating sequential model
model=Sequential()
model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(50,50,3)))
model.add(GaussianNoise(g))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(500,activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(2,activation="softmax"))#2 represent output layer neurons
# model.summary()
# compile the model with loss as categorical_crossentropy and using adam optimizer
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Fit the model with min batch size as 50[can tune batch size to some factor of 2^power ]
model.fit(x_train, y_train, batch_size=100, epochs=5, verbose=1)
scores = model.evaluate(x_test, y_test)
print("Loss: ", scores[0]) #Loss
print("Accuracy: ", scores[1]) #Accuracy
#Saving Model
model.save("./output.h5")
return scores[1]
reg_train('./input/cell_images')
gaussian_train('./input/cell_images', 0.2)
gaussian_train('./input/cell_images', 0.1)
gaussian_train('./input/cell_images', 0.05)
| Jupyter Simulations/1. Malaria Image CNN Simulations/Gaussian Noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 2 Exercise 2: Creating Denormalized Tables
#
# <img src="images/postgresSQLlogo.png" width="250" height="250">
# ## Walk through the basics of modeling data from normalized from to denormalized form. We will create tables in PostgreSQL, insert rows of data, and do simple JOIN SQL queries to show how these multiple tables can work together.
#
# #### Where you see ##### you will need to fill in code. This exercise will be more challenging than the last. Use the information provided to create the tables and write the insert statements.
#
# #### Remember the examples shown are simple, but imagine these situations at scale with large datasets, many users, and the need for quick response time.
#
# ### Import the library
# Note: An error might popup after this command has exectuted. If it does read it careful before ignoring.
import psycopg2
# ### Create a connection to the database, get a cursor, and set autocommit to true
try:
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=student password=<PASSWORD>")
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
try:
cur = conn.cursor()
except psycopg2.Error as e:
print("Error: Could not get cursor to the Database")
print(e)
conn.set_session(autocommit=True)
# #### Let's start with our normalized (3NF) database set of tables we had in the last exercise, but we have added a new table `sales`.
#
# `Table Name: transactions2
# column 0: transaction Id
# column 1: Customer Name
# column 2: Cashier Id
# column 3: Year `
#
# `Table Name: albums_sold
# column 0: Album Id
# column 1: Transaction Id
# column 3: Album Name`
#
# `Table Name: employees
# column 0: Employee Id
# column 1: <NAME> `
#
# `Table Name: sales
# column 0: Transaction Id
# column 1: Amount Spent
# `
# <img src="images/table16.png" width="450" height="450"> <img src="images/table15.png" width="450" height="450"> <img src="images/table17.png" width="350" height="350"> <img src="images/table18.png" width="350" height="350">
#
# ### TO-DO: Add all Create statements for all Tables and Insert data into the tables
# +
# TO-DO: Add all Create statements for all tables
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
# TO-DO: Insert data into the tables
try:
cur.execute("INSERT INTO ##### (transaction_id, customer_name, cashier_id, year) \
VALUES (%s, %s, %s, %s)", \
(1, "Amanda", 1, 2000))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (transaction_id, customer_name, cashier_id, year) \
VALUES (%s, %s, %s, %s)", \
(2, "Toby", 1, 2000))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (transaction_id, customer_name, cashier_id, year) \
VALUES (%s, %s, %s, %s)", \
(3, "Max", 2, 2018))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (album_id, transaction_id, album_name) \
VALUES (%s, %s, %s)", \
(1, 1, "Rubber Soul"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (album_id, transaction_id, album_name) \
VALUES (%s, %s, %s)", \
(2, 1, "Let It Be"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (album_id, transaction_id, album_name) \
VALUES (%s, %s, %s)", \
(3, 2, "My Generation"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (album_id, transaction_id, album_name) \
VALUES (%s, %s, %s)", \
(4, 3, "Meet the Beatles"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (album_id, transaction_id, album_name) \
VALUES (%s, %s, %s)", \
(5, 3, "Help!"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (employee_id, employee_name) \
VALUES (%s, %s)", \
(1, "Sam"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (employee_id, employee_name) \
VALUES (%s, %s)", \
(2, "Bob"))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (transaction_id, amount_spent) \
VALUES (%s, %s)", \
(1, 40))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (transaction_id, amount_spent) \
VALUES (%s, %s)", \
(2, 19))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (transaction_id, amount_spent) \
VALUES (%s, %s)", \
(3, 45))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# -
# #### TO-DO: Confirm using the Select statement the data were added correctly
# +
print("Table: #####\n")
try:
cur.execute("SELECT * FROM #####;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
print("\nTable: #####\n")
try:
cur.execute("SELECT * FROM #####;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
print("\nTable: #####\n")
try:
cur.execute("SELECT * FROM #####;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
print("\nTable: #####\n")
try:
cur.execute("SELECT * FROM #####;")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# -
# ### Let's say you need to do a query that gives:
#
# `transaction_id
# customer_name
# cashier name
# year
# albums sold
# amount sold`
#
# ### TO-DO: Complete the statement below to perform a 3 way `JOIN` on the 4 tables you have created.
# +
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# -
# #### Great we were able to get the data we wanted.
#
# ### But, we had a to 3 way `JOIN` to get there. While it's great we had that flexibility, we need to remember that `JOINS` are slow and if we have a read heavy workload that required low latency queries we want to reduce the number of `JOINS`. Let's think about denormalizing our normalized tables.
# ### With denormalization you want to think about the queries you are running and how to reduce the number of JOINS even if that means duplicating data. The following are the queries you need to run.
# #### Query 1 : `select transaction_id, customer_name, amount_sent FROM <min number of tables>`
# It should generate the amount spent on each transaction
# #### Query 2: `select cashier_name SUM(amount_sold) FROM <min number of tables> GROUP BY cashier_name`
# It should generate the total sales by cashier
# ### Query 1: `select transaction_id, customer_name, amount_sent FROM <min number of tables>`
#
# One way to do this would be to do a JOIN on the `sales` and `transactions2` table but we want to minimize the use of `JOINS`.
#
# To reduce the number of tables, first add `amount_spent` to the `transactions` table so that you will not need to do a JOIN at all.
#
# `Table Name: transactions
# column 0: transaction Id
# column 1: Customer Name
# column 2: Cashier Id
# column 3: Year
# column 4: amount_spent`
#
# <img src="images/table19.png" width="450" height="450">
#
# ### TO-DO: Add the tables as part of the denormalization process
# +
# TO-DO: Create all tables
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
#Insert data into all tables
try:
cur.execute("INSERT INTO transactions (#####) \
VALUES (%s, %s, %s, %s, %s)", \
(#####))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO transactions (#####) \
VALUES (%s, %s, %s, %s, %s)", \
(#####))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO transactions (#####) \
VALUES (%s, %s, %s, %s, %s)", \
(#####))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# -
# ### Now you should be able to do a simplifed query to get the information you need. No `JOIN` is needed.
# +
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# -
# #### Your output for the above cell should be the following:
# (1, 'Amanda', 40)<br>
# (2, 'Toby', 19)<br>
# (3, 'Max', 45)
# ### Query 2: `select cashier_name SUM(amount_sold) FROM <min number of tables> GROUP BY cashier_name`
#
# To avoid using any `JOINS`, first create a new table with just the information we need.
#
# `Table Name: cashier_sales
# col: Transaction Id
# Col: Cashier Name
# Col: Cashier Id
# col: Amount_Spent
# `
#
# <img src="images/table20.png" width="350" height="350">
#
# ### TO-DO: Create a new table with just the information you need.
# +
# Create the tables
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: Issue creating table")
print (e)
#Insert into all tables
try:
cur.execute("INSERT INTO ##### (#####) \
VALUES (%s, %s, %s, %s)", \
(##### ))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (#####) \
VALUES (%s, %s, %s, %s)", \
(##### ))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
try:
cur.execute("INSERT INTO ##### (#####) \
VALUES (%s, %s, %s, %s)", \
(#####))
except psycopg2.Error as e:
print("Error: Inserting Rows")
print (e)
# -
# ### Run the query
# +
try:
cur.execute("#####")
except psycopg2.Error as e:
print("Error: select *")
print (e)
row = cur.fetchone()
while row:
print(row)
row = cur.fetchone()
# -
# #### Your output for the above cell should be the following:
# ('Max', 225L)<br>
# ('Sam', 295L)
#
# #### We have successfully taken normalized table and denormalized them inorder to speed up our performance and allow for simplier queries to be executed.
# ### Drop the tables
try:
cur.execute("DROP table ####")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table #####")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table #####")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table #####")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
try:
cur.execute("DROP table #####")
except psycopg2.Error as e:
print("Error: Dropping table")
print (e)
# ### And finally close your cursor and connection.
cur.close()
conn.close()
| lesson2/.ipynb_checkpoints/Lesson 2 Exercise 2 Creating Denormalized Tables-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
#
# The :class:`Evoked <mne.Evoked>` data structure: evoked/averaged data
# =====================================================================
#
#
# +
import os.path as op
import mne
# -
# The :class:`Evoked <mne.Evoked>` data structure is mainly used for storing
# averaged data over trials. In MNE the evoked objects are created by averaging
# epochs data with :func:`mne.Epochs.average`. Here we read the evoked dataset
# from a file.
#
#
data_path = mne.datasets.sample.data_path()
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
print(evokeds)
# Notice that the reader function returned a list of evoked instances. This is
# because you can store multiple categories into a single file. Here we have
# categories of
# ``['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']``.
# We can also use ``condition`` parameter to read in only one category.
#
#
evoked = mne.read_evokeds(fname, condition='Left Auditory')
evoked.apply_baseline((None, 0)).apply_proj()
print(evoked)
# If you're gone through the tutorials of raw and epochs datasets, you're
# probably already familiar with the :class:`Info <mne.Info>` attribute.
# There is nothing new or special with the ``evoked.info``. All the relevant
# info is still there.
#
#
print(evoked.info)
print(evoked.times)
# The evoked data structure also contains some new attributes easily
# accessible:
#
#
print(evoked.nave) # Number of averaged epochs.
print(evoked.first) # First time sample.
print(evoked.last) # Last time sample.
print(evoked.comment) # Comment on dataset. Usually the condition.
print(evoked.kind) # Type of data, either average or standard_error.
# The data is also easily accessible. Since the evoked data arrays are usually
# much smaller than raw or epochs datasets, they are preloaded into the memory
# when the evoked object is constructed. You can access the data as a numpy
# array.
#
#
data = evoked.data
print(data.shape)
# The data is arranged in an array of shape `(n_channels, n_times)`. Notice
# that unlike epochs, evoked object does not support indexing. This means that
# to access the data of a specific channel you must use the data array
# directly.
#
#
print('Data from channel {0}:'.format(evoked.ch_names[10]))
print(data[10])
# If you want to import evoked data from some other system and you have it in a
# numpy array you can use :class:`mne.EvokedArray` for that. All you need is
# the data and some info about the evoked data. For more information, see
# `tut_creating_data_structures`.
#
#
evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0])
evoked.plot()
# To write an evoked dataset to a file, use the :meth:`mne.Evoked.save` method.
# To save multiple categories to a single file, see :func:`mne.write_evokeds`.
#
#
| 0.14/_downloads/plot_object_evoked.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Outputs you can update by name
#
# This notebook demonstrates the new name-based display functionality in the notebook. Previously, notebooks could only attach output to the cell that was currently being executed:
#
#
print("typical output")
# There was no simple way to make code in one cell to write output to another cell. Now there is!
#
# This feature will only work on IPython `>= 5.4`, so upgrade if you need to with `pip install --upgrade ipython`. Feel free to update with
#
# ```
# pip install --upgrade ipython
# ```
#
# then restart the kernel with `Language -> Restart Running Kernel`. We'll wait here...
# ## The fun stuff
#
# You made it _to the future_! Pat yourself on the back and take a deep breath, the scariest part is over. The `display` function now has an optional `display_id` parameter. Let's give our next display the boring name and call it *some_destination*.
h1 = display('initial display', display_id='some_destination')
# Ok, so far, nothing earth shattering. But what happens if you call display with the same `display_id` again?
#
#
#
#
h2 = display('spoiler alert: output updated in both', display_id='some_destination')
# Fantastic! We have a way of mirroring output in multiple places. But what if you only want update the previously named displays, without creating a new one? Just call `display` with `update=True`, like this:
h3 = display('no output here, update above', display_id='some_destination', update=True)
# Though we have been working with text so far, this also works for the all other output types. Let's make an HTML-based progress bar!
# + inputHidden=false outputHidden=false
import os
from binascii import hexlify
class ProgressBar(object):
def __init__(self, capacity):
self._display_id = hexlify(os.urandom(8)).decode('ascii')
self.capacity = capacity
self.progress = 0
def _repr_html_(self):
return "<progress style='width:100%' max='{}' value='{}'></progress>".format(self.capacity, self.progress)
def display(self):
display(self, display_id=self._display_id)
def update(self):
display(self, display_id=self._display_id, update=True)
bar = ProgressBar(100)
bar.display()
# -
# The progress bar is drawn and it starts off at `0`. Fill it up half way and call its `update` method to get a redraw.
bar.progress = 50
bar.update()
# Now go half-way again
bar.progress = 75
bar.update()
# Our original bar is kind of far away now, let's get another view of it below.
bar.display()
# This is good, but it would be awesome to have a progress bar that would automatically update whenever its progress was modified - that would be truly *progress*ive. We subclass `ProgressBar` and now we make `progress` into a [Python property](https://docs.python.org/3/library/functions.html#property), which will allow us to set it and get it like an attribute, but do that using methods. In particular, whenever we assign a new value to `progress`, we also call `update`.
class AutoupdatingProgressBar(ProgressBar):
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, value):
self._progress = value
self.update()
better_bar = AutoupdatingProgressBar(100)
better_bar.display()
better_bar.progress = 40
# Much better. No more pesky `update` calls. Let's make a little animation that [Zeno](https://en.wikipedia.org/wiki/Zeno%27s_paradoxes#Dichotomy_paradox) would be proud of:
import time
better_bar.progress = 0
for _ in range(10):
time.sleep(.5)
better_bar.progress += (better_bar.capacity - better_bar.progress) / 2
# You might have noticed that each `ProgressBar` autogenerates a random `display_id` which is handy if you want to have several of them.
#
#
num_bars = 5
bars = [AutoupdatingProgressBar(100) for _ in range(num_bars)]
for b in bars:
b.display()
import random
for x in range(40):
time.sleep(.1)
idx = random.randrange(num_bars)
bars[idx].progress += random.randint(-2, 10)
for b in bars:
b.display()
# + inputHidden=false outputHidden=false
| python/display-updates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demonstrate the use of OpSimOutput
# This demo notebook demonstrates the following:
# - recommended way of reading in from an OpSim database
# - zeroDDFdither = True or False options
import opsimsummary as oss
from opsimsummary import OpSimOutput
import os
# We use a small opsim output package for demonstration that is shipped with the package.
opsimdb = os.path.join(oss.__path__[0], 'example_data', 'enigma_1189_micro.db')
opsout = OpSimOutput.fromOpSimDB(opsimdb, subset='unique_all')
opsout.propIDDict
opsout.summary['ditherInRA'] = opsout.summary.ditheredRA -opsout.summary.fieldRA
opsout.summary['ditherInDec'] = opsout.summary.ditheredDec -opsout.summary.fieldDec
opsout.summary.query('propID == 366').ditherInRA.unique()
opsout.summary.query('propID == 366').ditherInDec.unique()
opsout.summary.query('propID == 364').ditherInRA.unique()
opsout.summary.query('propID == 364').ditherInDec.unique()
opsout_likemaf = OpSimOutput.fromOpSimDB(opsimdb, subset='unique_all', zeroDDFDithers=False)
opsout_likemaf.summary['ditherInRA'] = opsout_likemaf.summary.ditheredRA -opsout_likemaf.summary.fieldRA
opsout_likemaf.summary['ditherInDec'] = opsout_likemaf.summary.ditheredDec -opsout_likemaf.summary.fieldDec
opsout_likemaf.summary.query('propID == 366').ditherInRA.unique()
| example/Demo_OpSimOutput.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import print_function, division #compatibility py2 - py3
import random, math, numpy
import matplotlib.pyplot as plt
# +
V = 2e-6
DT = 0.2
L = 100e-6
P1 = 0.9
P2 = 0.5
N = 10
def get_density(x,y): #version A
return 1./(1.+math.hypot(x-L/2.,y-L/2.))
##def get_density(x,y): #version B
## return float(math.hypot(x-L/2.,y-L/2.) < 15e-6)
def draw(b_list, n, t):
m = numpy.zeros((n,n))
for x in range(n):
for y in range(n):
m[x,y] = get_density(x*L/n,y*L/n)
for bacteria in b_list:
x,y = int(bacteria.x*n/L), int(bacteria.y*n/L)
m[x,y] = 1.
plt.imshow(m) #add interpolation='None' for non-smoothed image
plt.savefig("bacteria"+str(t)+".png")
## plt.show() #directly show the image
class Bacteria(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.vx = None
self.vy = None
self.randomize_velocity()
self.old_density = get_density(self.x, self.y)
def randomize_velocity(self):
alpha = random.random()*math.pi*2
self.vx = math.cos(alpha) * V
self.vy = math.sin(alpha) * V
assert (math.hypot(self.vx, self.vy) - V) < 0.0000001
def update(self):
current_density = get_density(self.x, self.y)
go_forward = False
if current_density > self.old_density:
###### Question 1 #########
if random.random() < P1:
go_forward = True
else:
###### Question 2 #########
if random.random() < P2:
go_forward = True
if not go_forward:
###### Question 3 #########
self.randomize_velocity()
self.x += self.vx * DT
self.y += self.vy * DT
#domain periodicity:
self.x %= L
self.y %= L
self.old_density = current_density
b_list = [Bacteria(random.random()*L, random.random()*L) for i in range(N)]
for t in range(200):
if t%40 == 0:
draw(b_list, 100, t)
for bacteria in b_list:
bacteria.update()
# -
| modelling_simulation/Multi-agents model - bacterium chemitaxy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Apple Mobility Trends Report
#
# Data from https://covid19.apple.com/mobility
import pandas as pd
import numpy as np
import pycountry
import json
from datetime import datetime
from functools import reduce
import requests
from io import StringIO
import re
# + tags=["parameters"]
# papermill parameters
output_folder = "../output/"
# +
# get today's path
prefix = "https://covid19-static.cdn-apple.com"
response = requests.get(prefix + "/covid19-mobility-data/current/v3/index.json")
assert response.status_code == 200
body = json.loads(response.text)
# get data version
version = body["basePath"]
csv_path = body["regions"]["en-us"]["csvPath"]
path = prefix + version + csv_path
file_response = requests.get(path)
# -
df = pd.read_csv(StringIO(file_response.content.decode("utf-8")))
subdivisions = {}
def resolve_region(geo_type, region, sub_region, country):
if geo_type == "country/region":
# get iso_3166_1
pycountry_object = pycountry.countries.get(name=region) or pycountry.countries.search_fuzzy(region)
if pycountry_object:
if isinstance(pycountry_object, list):
pycountry_object = pycountry_object[0]
country_code = pycountry_object.alpha_2
country_name = pycountry_object.name
df.loc[(df["geo_type"] == geo_type) & (df["region"] == region), "ISO3166-1"] = country_code
df.loc[(df["geo_type"] == geo_type) & (df["region"] == region), "region"] = country_name
else:
country_code
print(f"unable to parse country: {country}")
else:
pycountry_object = pycountry.countries.get(name=country) or pycountry.countries.search_fuzzy(country)
if pycountry_object:
if isinstance(pycountry_object, list):
pycountry_object = pycountry_object[0]
country_code = pycountry_object.alpha_2
# country_name = pycountry_object.name
df.loc[(df["geo_type"] == geo_type) & (df["country"] == country), "ISO3166-1"] = country_code
# df.loc[(df["geo_type"] == geo_type) & (df["country"] == country), "region"] = country_name
else:
print(f"unable to parse non-country: {country}")
if country_code not in list(subdivisions):
sub = pycountry.subdivisions.get(country_code=country_code)
subdivision_dict = {subdivision.name: subdivision.code for subdivision in sub}
subdivisions[country_code] = subdivision_dict
name = np.nan
if geo_type != "country/region" and country_code in list(subdivisions):
if geo_type in ["county", "city"]:
# sub-region
try:
sub_region_striped = re.match(r"^^(?:Canton of )?(.*?)(?:\s(?:Region|County|Prefecture|Province|\(\w+\)).*)?$", sub_region).group(1)
code = subdivisions[country_code][sub_region_striped]
code = re.sub(r"^\w*?-?(\w+)$", r"\1", code)
df.loc[
(df["geo_type"] == geo_type) &
(df["ISO3166-1"] == country_code) &
(df["sub-region"] == sub_region),
"ISO3166-2"] = code
except:
pass
elif geo_type == "sub-region":
# region name
try:
region_striped = re.match(r"^^(?:Canton of )?(.*?)(?:\s(?:Region|County|Prefecture|Province|\(\w+\)).*)?$", region).group(1)
code = subdivisions[country_code][region_striped]
code = re.sub(r"^\w*?-?(\w+)$", r"\1", code)
df.loc[
(df["geo_type"] == geo_type) &
(df["ISO3166-1"] == country_code) &
(df["region"] == region),
"ISO3166-2"] = code
except:
pass
else:
pass
df["ISO3166-1"] = ""
df["ISO3166-2"] = ""
places = df[["geo_type", "region", "sub-region", "country"]].fillna("").groupby(["geo_type", "region", "sub-region", "country"])
for row in places:
resolve_region(*row[0])
cols = list(filter(lambda col: not re.search(r"\d{4}-\d{2}-\d{2}", col), list(df.columns)))
vals = list(filter(lambda col: re.search(r"\d{4}-\d{2}-\d{2}", col), list(df.columns)))
# unpivot df
df = pd.melt(df, value_vars=vals, id_vars=cols)
df.loc[df["ISO3166-2"] == "", "ISO3166-2"] = np.nan
df.sample(25)
column_map = {
"region": "COUNTRY/REGION",
"sub-region": "PROVINCE/STATE",
"variable": "DATE",
"value": "DIFFERENCE",
"transportation_type": "TRANSPORTATION_TYPE"
}
df = df.rename(columns=column_map)
df["Last_Updated_Date"] = datetime.utcnow()
df['Last_Reported_Flag'] = df["DATE"].max() == df["DATE"]
df.to_csv(output_folder + "APPLE_MOBILITY.csv", index=False, columns=["COUNTRY/REGION",
"PROVINCE/STATE",
"DATE",
"TRANSPORTATION_TYPE",
"DIFFERENCE",
"ISO3166-1",
"ISO3166-2",
"Last_Updated_Date",
"Last_Reported_Flag"
])
| notebooks/APPLE_MOBILITY.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Estudo de Bioinformática
# ---
#
# ### Módulo 1
# Seja o trecho conhecido do código genético para *Vibria Colerae* como se segue: (http://bioinformaticsalgorithms.com/data/realdatasets/Replication/v_cholerae_oric.txt)
# ATCAATGATCAACGTAAGCTTCTAAGCATGATCAAGGTGCTCACACAGTTTATCCACAACCTGAGTGGATGACATCAAGATAGGTCGTTGTATCTCCTTCCTCTCGTACTCTCATGACCACGGAAAGATGATCAAGAGAGGATGATTTCTTGGCCATATCGCAATGAATACTTGTGACTTGTGCTTCCAATTGACATCTTCAGCGCCATATTGCGCTGGCCAAGGTGACGGAGCGGGATTACGAAAGCATGATCATGGCTGTTGTTCTGTTTATCTTGTTTTGACTGAGACTTGTTAGGATAGACGGTTTTTCATCACTGACTAGCCAAAGCCTTACTCTGCCTGACATCGACCGTAAATTGATAATGAATTTACATGCTTCCGCGACGATTTACCTCTTGATCATCGATCCGATTGAAGATCTTCAATTGTTAATTCTCTTGCCTCGACTCATAGCCATGATGAGCTCTTGATCATGTTTCCTTAACCCTCTATTTTTTACGGAAGAATGATCAAGCTGCTGCTCTTGATCATCGTTTC
vibriaColerae = 'ATCAATGATCAACGTAAGCTTCTAAGCATGATCAAGGTGCTCACACAGTTTATCCACAACCTGAGTGGATGACATCAAGATAGGTCGTTGTATCTCCTTCCTCTCGTACTCTCATGACCACGGAAAGATGATCAAGAGAGGATGATTTCTTGGCCATATCGCAATGAATACTTGTGACTTGTGCTTCCAATTGACATCTTCAGCGCCATATTGCGCTGGCCAAGGTGACGGAGCGGGATTACGAAAGCATGATCATGGCTGTTGTTCTGTTTATCTTGTTTTGACTGAGACTTGTTAGGATAGACGGTTTTTCATCACTGACTAGCCAAAGCCTTACTCTGCCTGACATCGACCGTAAATTGATAATGAATTTACATGCTTCCGCGACGATTTACCTCTTGATCATCGATCCGATTGAAGATCTTCAATTGTTAATTCTCTTGCCTCGACTCATAGCCATGATGAGCTCTTGATCATGTTTCCTTAACCCTCTATTTTTTACGGAAGAATGATCAAGCTGCTGCTCTTGATCATCGTTTC'
def PatternCount(Text, Pattern):
count = 0
for i in range( len(Text) - len(Pattern) + 1 ):
if Text[i:i+len(Pattern)] == Pattern:
count += 1
return count
PatternCount(vibriaColerae, 'ATCA')
# Note que conseguimos contar 11 ocorrências da sequência 'ATCA' (a primeira ocorre no início da sequência fornecida).
#
# Quaisquer que sejam as sequências fornecidas o algoritmo irá percorrer a "palavra" atrás de ocorrências do "padrão" fornecido.
# Abaixo está o procedimento para a definição de um dicionário, ___frequency map___.
#
# Seja as bases nucleotídicas 'A', 'T', 'C', 'G':
bases = ['A', 'T', 'C', 'G']
# O problema "k-mer" define a ocorrência de todas as substrings em uma string.
#
# Para definir nosso problema precisamos saber o tamanho de nossa substring 'k'.
#
# Assim, para um k = 2, nossa "frequency map" se dá de acordo com 4² possíveis substrings:
#
# 'AA', 'AT', 'AC', 'AG',
#
# 'TA', 'TT', 'TC', 'TG',
#
# 'CA', 'CT', 'CC', 'CG',
#
# 'GA', 'GT', 'GC', 'GG'
#
# Pela definição de permutação, para um arranjo de n-tuplas (onde a repetição é permitida), teremos que em um conjunto S de k elementos, o número de n-tuplas sobre S será de:
#
# $$ k^n $$
#
# Uma "tupla" é uma lista ordenada que não permite mudanças uma vez definida, isto é, se definimos nossa _frequency map_ com um determinado número de combinações possíveis de nucleotídeos, procuraremos apenas pelas combinações contendo aquela lista definida.
def frequencyMapGenerator ( freq ):
bases = ['A', 'T', 'C', 'G']
mapa = []
tempMap = []
count = 0
string = ''
for i in bases:
for j in bases:
tempMap.append(i + j)
mapa = tempMap
if (freq == 0) :
return None
elif (freq == 1):
return bases
elif (freq == 2):
return mapa
else:
for k in range(freq):
if ( k > 1 ):
tempMap = []
for i in mapa:
for j in bases:
tempMap.append(i + j)
mapa = tempMap
return(mapa)
# #### Teste da função
mapSet3 = frequencyMapGenerator(3)
print(mapSet3)
# Uma outra forma de fazer a permutação seria utilizando alguma biblioteca pronta, como se segue:
import itertools
print(list(itertools.product(bases, repeat=3)))
# #### Prosseguindo com a contagem de ocorrências
#
# Agora já é possível contar nossas ocorrências, mas ao invés de passar apenas um padrão, eu quero mapear todas as ocorrências de três pares de base:
for s in mapSet3:
counter = PatternCount(vibriaColerae, s)
print('Pattern ', s, ' has ', counter, 'matchs.')
a = 4
# # Teste
| coursera_bioinformatics/Where in the Genome Does Replication Begin? (Part 1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # The Piecewise-Parabolic Method
#
# ## Author: <NAME>
#
# **Notebook Status:** <font color='green'><b>Validated</b></font>
#
# **Validation Notes:** This module is self-validated against [its module](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) and is also validated against the corresponding algorithm in the old `GiRaFFE` code in [this tutorial](Tutorial-Start_to_Finish-GiRaFFE_NRPy-PPM.ipynb).
#
# # This module presents the functionality of [GiRaFFE_NRPy_PPM.py](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) .
# This notebook documents the function from the original `GiRaFFE` that implements the reconstruction algorithm used by the piecewise-parabolic method (PPM) of [Colella and Woodward (1984)](https://crd.lbl.gov/assets/pubs_presos/AMCS/ANAG/A141984.pdf).
#
# The differential equations that `GiRaFFE` evolves have two different terms that contribute to the time evolution of some quantity: the flux term and the source term. The PPM method is what the original `GiRaFFE` uses to handle the flux term; hopefully, using this instead of finite-differencing will fix some of the problems we've been having with `GiRaFFE_NRPy`.
#
# This algorithm is not quite as accessible as the much simpler finite-difference methods; as such, [this notebook](https://mybinder.org/v2/gh/python-hydro/how_to_write_a_hydro_code/master) is recommended as an introduction. It covers a simpler reconstruction scheme, and proved useful in preparing the documentation for this more complicated scheme.
#
# The algorithm for finite-volume methods in general is as follows:
#
# 1. **The Reconstruction Step (This notebook)**
# 1. **Within each cell, fit to a function that conserves the volume in that cell using information from the neighboring cells**
# * **For PPM, we will naturally use parabolas**
# 1. **Use that fit to define the state at the left and right interface of each cell**
# 1. **Apply a slope limiter to mitigate Gibbs phenomenon**
# 1. Solving the Riemann Problem
# 1. Use the left and right reconstructed states to calculate the unique state at boundary
# 1. Use the unique state to estimate the derivative in the cell
# 1. Repeat the above for each conservative gridfunction in each direction
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 0. [Step 0](#prelim): Preliminaries
# 1. [Step 1](#reconstruction): The reconstruction function
# 1. [Step 1.a](#define): Some definitions and declarations
# 1. [Step 1.b](#func): The function definition
# 1. [Step 1.c](#face): Interpolate the face values
# 1. [Step 1.d](#monotonize): Monotonize the values within each cell
# 1. [Step 1.e](#shift): Shift indices
# 1. [Step 2](#slope_limit): The slope limiter
# 1. [Step 3](#monotonize_def): The monotonization algorithm
# 1. [Step 4](#loops): Dependencies
# 1. [Step 5](#code_validation): Code Validation against `GiRaFFE_NRPy_PPM.py`
# 1. [Step 6](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='prelim'></a>
#
# # Step 0: Preliminaries \[Back to [top](#toc)\]
# $$\label{prelim}$$
#
# This first block of code just sets up a subdirectory within `GiRaFFE_standalone_Ccodes/` to which we will write the C code.
# +
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
Ccodesdir = "GiRaFFE_standalone_Ccodes/PPM"
cmd.mkdir(os.path.join(Ccodesdir))
# -
# When we convert the code to work with NRPy+, we will be able to make a simplification: since `GiRaFFE_NRPy` does not use staggered grids, we will be able to skip reconstructing the staggered quantities
#
# The structure `gf_and_gz_struct` is a C++ structure used to keep track of ghostzone information between routines. It contains a pointer and two arrays. It is specified by the following code:
#
# ```c
# // Keeping track of ghostzones between routines is a nightmare, so
# // we instead attach ghostzone info to each gridfunction and set
# // the ghostzone information correctly within each routine.
# struct gf_and_gz_struct {
# REAL *gf;
# int gz_lo[4],gz_hi[4];
# };
# ```
#
# In NRPy+, we will have to interact with our arrays on a lower level than normal in order to get this pointer. NRPy+ stores gridfunctions all in one array, but values for a specific quantity (e.g., `ValenciavU0`) are still stored contiguously. That means that the pointer we are after is simply the one that points to the first point of that quantity, i.e. `i0=0`, `i1=0`, and `i2=0`. Recall the typical macro we use for memory access:
# ```c
# #define IDX4S(g,i,j,k) \
# ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#
# ```
# It thus follows that the pointer that needs to be stored in this structure is `Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2*g`, where `g` is, in this example, `VALENCIAVU0GF`.
# <a id='reconstruction'></a>
#
# # Step 1: The reconstruction function \[Back to [top](#toc)\]
# $$\label{reconstruction}$$
#
# <a id='define'></a>
#
# ## Step 1.a: Some definitions and declarations \[Back to [top](#toc)\]
# $$\label{define}$$
#
# This file contains the functions necessary for reconstruction. It is based on Colella & Woodward PPM in the case where pressure and density $P = \rho = 0$.
#
# We start by defining the values of `MINUS2`...`PLUS2` as $\{0, \ldots ,4\}$ for the sake of convenience later on; we also define `MAXNUMINDICES` as 5 so we can easily loop over the above. We include `loop_defines_reconstruction_NRPy.h` for some macros that will allow us to conveniently write common loops that we will use and give the function prototypes for our slope limiter, `slope_limit_NRPy()`, and our monotization algorithm, `monotonize_NRPy()`.
# +
# %%writefile $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
/*****************************************
* PPM Reconstruction Interface.
* <NAME> (2013)
*
* This version of PPM implements the standard
* Colella & Woodward PPM, but in the GRFFE
* limit, where P=rho=0. Thus, e.g., ftilde=0.
*****************************************/
#define MINUS2 0
#define MINUS1 1
#define PLUS0 2
#define PLUS1 3
#define PLUS2 4
#define MAXNUMINDICES 5
// ^^^^^^^^^^^^^ Be _sure_ to define MAXNUMINDICES appropriately!
#define MIN(a,b) ( ((a) < (b)) ? (a) : (b) )
#define MAX(a,b) ( ((a) > (b)) ? (a) : (b) )
#define SQR(x) ((x) * (x))
// FIXME: Should make this zero-offset for NRPy+ standards. Probably a wrapper function for compatibility with a minimum of other changes?
const int kronecker_delta[4][3] = { { 0,0,0 },
{ 1,0,0 },
{ 0,1,0 },
{ 0,0,1 } };
// You'll find the #define's for LOOP_DEFINE and SET_INDEX_ARRAYS_NRPY inside:
#include "loop_defines_reconstruction_NRPy.h"
static inline REAL slope_limit_NRPy(const REAL dU,const REAL dUp1);
static inline void monotonize_NRPy(const REAL U,REAL Ur,REAL Ul);
# -
# <a id='func'></a>
#
# ## Step 1.b: The function definition \[Back to [top](#toc)\]
# $$\label{func}$$
#
# Here, we start the function definition for the main function for our reconstruction, `reconstruct_set_of_prims_PPM_GRFFE_NRPy()`. Among its parameters are the arrays that define the grid (that will need to be replaced with NRPy+ equivalents), a flux direction, the integer array specifying which primitives to reconstruct (as well as the number of primitives to reconstruct), the input structure `in_prims`, the output structures `out_prims_r` and `out_prims_l`, and a temporary array (this will be used to help switch variable names).
#
# We then check the number of ghostzones and error out if there are too few - this method requires three. Note the `for` loop here; it continues through the next two cells as well, looping over each primitive we will reconstruct in the chosen direction.
# +
# %%writefile -a $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
static void reconstruct_set_of_prims_PPM_GRFFE_NRPy(const paramstruct *params,REAL *auxevol_gfs,const int flux_dirn,
const int num_prims_to_reconstruct,const int *which_prims_to_reconstruct,
const gf_and_gz_struct *in_prims,gf_and_gz_struct *out_prims_r,
gf_and_gz_struct *out_prims_l,REAL *temporary) {
#include "set_Cparameters.h"
const int Nxx_plus_2NGHOSTS[3] = {Nxx_plus_2NGHOSTS0,Nxx_plus_2NGHOSTS1,Nxx_plus_2NGHOSTS2};
REAL U[NUM_RECONSTRUCT_GFS][MAXNUMINDICES],dU[NUM_RECONSTRUCT_GFS][MAXNUMINDICES],slope_lim_dU[NUM_RECONSTRUCT_GFS][MAXNUMINDICES],
Ur[NUM_RECONSTRUCT_GFS][MAXNUMINDICES],Ul[NUM_RECONSTRUCT_GFS][MAXNUMINDICES];
int ijkgz_lo_hi[4][2];
for(int ww=0;ww<num_prims_to_reconstruct;ww++) {
const int whichvar=which_prims_to_reconstruct[ww];
if(in_prims[whichvar].gz_lo[flux_dirn]!=0 || in_prims[whichvar].gz_hi[flux_dirn]!=0) {
printf("TOO MANY GZ'S! WHICHVAR=%d: %d %d %d : %d %d %d DIRECTION %d",whichvar,
in_prims[whichvar].gz_lo[1],in_prims[whichvar].gz_lo[2],in_prims[whichvar].gz_lo[3],
in_prims[whichvar].gz_hi[1],in_prims[whichvar].gz_hi[2],in_prims[whichvar].gz_hi[3],flux_dirn);
exit(0);
}
# -
# <a id='face'></a>
#
# ## Step 1.c: Interpolate the face values \[Back to [top](#toc)\]
# $$\label{face}$$
#
# In Loop 1, we will interpolate the face values at the left and right interfaces, `Ur` and `Ul`, respectively. This is done on a point-by-point basis as defined by the `LOOP_DEFINE`.
#
# After reading in the relevant values from memory, we calculate the simple `dU`:
# \begin{align}
# dU_{-1} &= U_{-1} - U_{-2} \\
# dU_{+0} &= U_{+0} - U_{-1} \\
# dU_{+1} &= U_{+1} - U_{+0} \\
# dU_{+2} &= U_{+2} - U_{+1}. \\
# \end{align}
# From that, we compute the slope-limited `slope_lim_dU`, or $\nabla U$ (see [below](#slope_limit)). Then, we compute the face values using eq. A1 from [arxiv:astro-ph/050342](http://arxiv.org/pdf/astro-ph/0503420.pdf), adapted from 1.9 in [Colella and Woodward (1984)](https://crd.lbl.gov/assets/pubs_presos/AMCS/ANAG/A141984.pdf):
# \begin{align}
# U_r &= \frac{1}{2} \left( U_{+1} + U_{+0} \right) + \frac{1}{6} \left( \nabla U_{+0} - \nabla U_{+1} \right) \\
# U_l &= \frac{1}{2} \left( U_{+0} + U_{-1} \right) + \frac{1}{6} \left( \nabla U_{-1} - \nabla U_{+0} \right). \\
# \end{align}
# (Note, however, that we use the standard coefficient $1/6$ instead of $1/8$.) Finally, we write the values to memory in the output structures.
# %%writefile -a $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
// *** LOOP 1: Interpolate to Ur and Ul, which are face values ***
// You will find that Ur depends on U at MINUS1,PLUS0, PLUS1,PLUS2, and
// Ul depends on U at MINUS2,MINUS1,PLUS0,PLUS1.
// However, we define the below loop from MINUS2 to PLUS2. Why not split
// this up and get additional points? Maybe we should. In GRMHD, the
// reason is that later on, Ur and Ul depend on ftilde, which is
// defined from MINUS2 to PLUS2, so we would lose those points anyway.
// But in GRFFE, ftilde is set to zero, so there may be a potential
// for boosting performance here.
LOOP_DEFINE(2,2, Nxx_plus_2NGHOSTS,flux_dirn, ijkgz_lo_hi,in_prims[whichvar].gz_lo,in_prims[whichvar].gz_hi) {
SET_INDEX_ARRAYS_NRPY(-2,2,flux_dirn);
/* *** LOOP 1a: READ INPUT *** */
// Read in a primitive at all gridpoints between m = MINUS2 & PLUS2, where m's direction is given by flux_dirn. Store to U.
for(int ii=MINUS2;ii<=PLUS2;ii++) U[whichvar][ii] = in_prims[whichvar].gf[index_arr[flux_dirn][ii]];
/* *** LOOP 1b: DO COMPUTATION *** */
/* First, compute simple dU = U(i) - U(i-1), where direction of i
* is given by flux_dirn, and U is a primitive variable:
* {vx,vy,vz,Bx,By,Bz}. */
// Note that for Ur and Ul at i, we must compute dU(i-1),dU(i),dU(i+1),
// and dU(i+2)
dU[whichvar][MINUS1] = U[whichvar][MINUS1]- U[whichvar][MINUS2];
dU[whichvar][PLUS0] = U[whichvar][PLUS0] - U[whichvar][MINUS1];
dU[whichvar][PLUS1] = U[whichvar][PLUS1] - U[whichvar][PLUS0];
dU[whichvar][PLUS2] = U[whichvar][PLUS2] - U[whichvar][PLUS1];
// Then, compute slope-limited dU, using MC slope limiter:
slope_lim_dU[whichvar][MINUS1]=slope_limit_NRPy(dU[whichvar][MINUS1],dU[whichvar][PLUS0]);
slope_lim_dU[whichvar][PLUS0] =slope_limit_NRPy(dU[whichvar][PLUS0], dU[whichvar][PLUS1]);
slope_lim_dU[whichvar][PLUS1] =slope_limit_NRPy(dU[whichvar][PLUS1], dU[whichvar][PLUS2]);
// Finally, compute face values Ur and Ul based on the PPM prescription
// (Eq. A1 in http://arxiv.org/pdf/astro-ph/0503420.pdf, but using standard 1/6=(1.0/6.0) coefficient)
// Ur[PLUS0] represents U(i+1/2)
// We applied a simplification to the following line: Ur=U+0.5*(U(i+1)-U) + ... = 0.5*(U(i+1)+U) + ...
Ur[whichvar][PLUS0] = 0.5*(U[whichvar][PLUS1] + U[whichvar][PLUS0] ) + (1.0/6.0)*(slope_lim_dU[whichvar][PLUS0] - slope_lim_dU[whichvar][PLUS1]);
// Ul[PLUS0] represents U(i-1/2)
// We applied a simplification to the following line: Ul=U(i-1)+0.5*(U-U(i-1)) + ... = 0.5*(U+U(i-1)) + ...
Ul[whichvar][PLUS0] = 0.5*(U[whichvar][PLUS0] + U[whichvar][MINUS1]) + (1.0/6.0)*(slope_lim_dU[whichvar][MINUS1] - slope_lim_dU[whichvar][PLUS0]);
/* *** LOOP 1c: WRITE OUTPUT *** */
// Store right face values to {vxr,vyr,vzr,Bxr,Byr,Bzr},
// and left face values to {vxl,vyl,vzl,Bxl,Byl,Bzl}
out_prims_r[whichvar].gf[index_arr[flux_dirn][PLUS0]] = Ur[whichvar][PLUS0];
out_prims_l[whichvar].gf[index_arr[flux_dirn][PLUS0]] = Ul[whichvar][PLUS0];
}
# <a id='monotonize'></a>
#
# ## Step 1.d: Monotonize the values within each cell \[Back to [top](#toc)\]
# $$\label{monotonize}$$
#
# We skip Loop 2 in GRFFE; then, we flatten the data in Loop 3 (but since we flattenbased on `ftilde_gf`, which is 0 in GRFFE, we again don't really do anything). Also in Loop 3, we call the `monotonize_NRPy()` function on the face values. This function adjusts the face values to ensure that the data is monotonic within each cell to avoid the Gibbs phenomenon.
# %%writefile -a $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
// *** LOOP 2 (REMOVED): STEEPEN RHOB. RHOB DOES NOT EXIST IN GRFFE EQUATIONS ***
}
// *** LOOP 3: FLATTEN BASED ON FTILDE AND MONOTONIZE ***
for(int ww=0;ww<num_prims_to_reconstruct;ww++) {
const int whichvar=which_prims_to_reconstruct[ww];
// ftilde() depends on P(MINUS2,MINUS1,PLUS1,PLUS2), THUS IS SET TO ZERO IN GRFFE
LOOP_DEFINE(2,2, Nxx_plus_2NGHOSTS,flux_dirn, ijkgz_lo_hi,in_prims[whichvar].gz_lo,in_prims[whichvar].gz_hi) {
SET_INDEX_ARRAYS_NRPY(0,0,flux_dirn);
U[whichvar][PLUS0] = in_prims[whichvar].gf[index_arr[flux_dirn][PLUS0]];
Ur[whichvar][PLUS0] = out_prims_r[whichvar].gf[index_arr[flux_dirn][PLUS0]];
Ul[whichvar][PLUS0] = out_prims_l[whichvar].gf[index_arr[flux_dirn][PLUS0]];
// ftilde_gf was computed in the function compute_ftilde_gf(), called before this routine
//REAL ftilde = ftilde_gf[index_arr[flux_dirn][PLUS0]];
// ...and then flatten (local operation)
Ur[whichvar][PLUS0] = Ur[whichvar][PLUS0];
Ul[whichvar][PLUS0] = Ul[whichvar][PLUS0];
// Then monotonize
monotonize_NRPy(U[whichvar][PLUS0],Ur[whichvar][PLUS0],Ul[whichvar][PLUS0]);
out_prims_r[whichvar].gf[index_arr[flux_dirn][PLUS0]] = Ur[whichvar][PLUS0];
out_prims_l[whichvar].gf[index_arr[flux_dirn][PLUS0]] = Ul[whichvar][PLUS0];
}
// Note: ftilde=0 in GRFFE. Ur depends on ftilde, which depends on points of U between MINUS2 and PLUS2
out_prims_r[whichvar].gz_lo[flux_dirn]+=2;
out_prims_r[whichvar].gz_hi[flux_dirn]+=2;
// Note: ftilde=0 in GRFFE. Ul depends on ftilde, which depends on points of U between MINUS2 and PLUS2
out_prims_l[whichvar].gz_lo[flux_dirn]+=2;
out_prims_l[whichvar].gz_hi[flux_dirn]+=2;
}
# <a id='shift'></a>
#
# ## Step 1.e: Shift indices \[Back to [top](#toc)\]
# $$\label{shift}$$
#
# In Loop 4, we will shift the indices of `Ur` and `Ul`. So far, we have been concerned with the behavior of the data within a single cell. In that context, it makes sense to call the value of data at the left end of the cell `Ul` and the data at the right end of the cell `Ur`. However, going forward, we will be concerned about the behavior of the data at the interface between cells. In this context, it sense to call the value of data on the left of the interface (which is at the right end of the cell!) `Ul` and the data on the right of the interface `Ur`. So, using the array `temporary`, we switch the two names while shifting `Ur` appropriately.
# +
# %%writefile -a $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
// *** LOOP 4: SHIFT Ur AND Ul ***
/* Currently face values are set so that
* a) Ur(i) represents U(i+1/2), and
* b) Ul(i) represents U(i-1/2)
* Here, we shift so that the indices are consistent:
* a) U(i-1/2+epsilon) = oldUl(i) = newUr(i)
* b) U(i-1/2-epsilon) = oldUr(i-1) = newUl(i)
* Note that this step is not strictly necessary if you keep
* track of indices when computing the flux. */
for(int ww=0;ww<num_prims_to_reconstruct;ww++) {
const int whichvar=which_prims_to_reconstruct[ww];
LOOP_DEFINE(3,2, Nxx_plus_2NGHOSTS,flux_dirn, ijkgz_lo_hi,in_prims[whichvar].gz_lo,in_prims[whichvar].gz_hi) {
SET_INDEX_ARRAYS_NRPY(-1,0,flux_dirn);
temporary[index_arr[flux_dirn][PLUS0]] = out_prims_r[whichvar].gf[index_arr[flux_dirn][MINUS1]];
}
LOOP_DEFINE(3,2, Nxx_plus_2NGHOSTS,flux_dirn, ijkgz_lo_hi,in_prims[whichvar].gz_lo,in_prims[whichvar].gz_hi) {
SET_INDEX_ARRAYS_NRPY(0,0,flux_dirn);
// Then shift so that Ur represents the gridpoint at i-1/2+epsilon,
// and Ul represents the gridpoint at i-1/2-epsilon.
// Ur(i-1/2) = Ul(i-1/2) = U(i-1/2+epsilon)
// Ul(i-1/2) = Ur(i+1/2 - 1) = U(i-1/2-epsilon)
out_prims_r[whichvar].gf[index_arr[flux_dirn][PLUS0]] = out_prims_l[whichvar].gf[index_arr[flux_dirn][PLUS0]];
out_prims_l[whichvar].gf[index_arr[flux_dirn][PLUS0]] = temporary[index_arr[flux_dirn][PLUS0]];
}
// Ul was just shifted, so we lost another ghostzone.
out_prims_l[whichvar].gz_lo[flux_dirn]+=1;
out_prims_l[whichvar].gz_hi[flux_dirn]+=0;
// As for Ur, we didn't need to get rid of another ghostzone,
// but we did ... seems wasteful!
out_prims_r[whichvar].gz_lo[flux_dirn]+=1;
out_prims_r[whichvar].gz_hi[flux_dirn]+=0;
}
}
# -
# <a id='slope_limit'></a>
#
# # Step 2: The slope limiter \[Back to [top](#toc)\]
# $$\label{slope_limit}$$
#
# The first function here implements the Monotonized Central (MC) reconstruction slope limiter:
# $$ MC(a,b) = \left \{ \begin{array}{ll}
# 0 & {\rm if} ab \leq 0 \\
# {\rm sign}(a) \min(2|a|,2|b|, |a+b|/2) & {\rm otherwise.}
# \end{array} \right.
# $$
#
# This is adapted from eq. 1.8 of [Colella and Woodward (1984)](https://crd.lbl.gov/assets/pubs_presos/AMCS/ANAG/A141984.pdf).
#
# +
# %%writefile -a $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
// Set SLOPE_LIMITER_COEFF = 2.0 for MC, 1 for minmod
#define SLOPE_LIMITER_COEFF 2.0
//Eq. 60 in JOURNAL OF COMPUTATIONAL PHYSICS 123, 1-14 (1996)
// [note the factor of 2 missing in the |a_{j+1} - a_{j}| term].
// Recall that dU = U_{i} - U_{i-1}.
static inline REAL slope_limit_NRPy(const REAL dU,const REAL dUp1) {
if(dU*dUp1 > 0.0) {
//delta_m_U=0.5 * [ (u_(i+1)-u_i) + (u_i-u_(i-1)) ] = (u_(i+1) - u_(i-1))/2 <-- first derivative, second-order; this should happen most of the time (smooth flows)
const REAL delta_m_U = 0.5*(dU + dUp1);
// EXPLANATION OF BELOW LINE OF CODE.
// In short, sign_delta_a_j = sign(delta_m_U) = (0.0 < delta_m_U) - (delta_m_U < 0.0).
// If delta_m_U>0, then (0.0 < delta_m_U)==1, and (delta_m_U < 0.0)==0, so sign_delta_a_j=+1
// If delta_m_U<0, then (0.0 < delta_m_U)==0, and (delta_m_U < 0.0)==1, so sign_delta_a_j=-1
// If delta_m_U==0,then (0.0 < delta_m_U)==0, and (delta_m_U < 0.0)==0, so sign_delta_a_j=0
const int sign_delta_m_U = (0.0 < delta_m_U) - (delta_m_U < 0.0);
//Decide whether to use 2nd order derivative or first-order derivative, limiting slope.
return sign_delta_m_U*MIN(fabs(delta_m_U),MIN(SLOPE_LIMITER_COEFF*fabs(dUp1),SLOPE_LIMITER_COEFF*fabs(dU)));
}
return 0.0;
}
# -
# <a id='monotonize_def'></a>
#
# # Step 3: The monotonization algorithm \[Back to [top](#toc)\]
# $$\label{monotonize_def}$$
#
# The next function monotonizes the slopes using the algorithm from [Colella and Woodward (1984)](https://crd.lbl.gov/assets/pubs_presos/AMCS/ANAG/A141984.pdf), eq. 1.10. We want the slope to be monotonic in a cell in order to reduce the impact of the Gibbs phenomenon. So, we consider three values in the cell: the cell average, `U`; on the left interface of the cell, `Ul`; and on the right interface of the cell, `Ur`. The goal of the algorithm is to ensure monotonicity; so, it first checks to see if the cell contains a local extremum. If it does, we make the interpolation function a constant.We must then also consider the case where `U` is "close" to `Ur` or `Ul`, and an interpolating polynomial between them would not be monotonic over the cell. So, the basic algorithm is as follows:
#
# * `dU = Ur - Ul`
# * `mU = 0.5*(Ur+Ul)`.
# * If the cell has an extremum:
# * `Ur = U`
# * `Ul = U`
# * If `U` is too close to `Ul`
# * Move `Ul` farther away
# * If `U` is too close to `Ur`
# * Move `Ur` farther away
#
# More rigorous definitions of "Too Close" and "Farther Away" are derived from parabolas with vertices on the interfaces, as can be seen in the code below:
# +
# %%writefile -a $Ccodesdir/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c
static inline void monotonize_NRPy(const REAL U,REAL Ur,REAL Ul) {
const REAL dU = Ur - Ul;
const REAL mU = 0.5*(Ur+Ul);
if ( (Ur-U)*(U-Ul) <= 0.0) {
Ur = U;
Ul = U;
return;
}
if ( dU*(U-mU) > (1.0/6.0)*SQR(dU)) {
Ul = 3.0*U - 2.0*Ur;
return;
}
if ( dU*(U-mU) < -(1.0/6.0)*SQR(dU)) {
Ur = 3.0*U - 2.0*Ul;
return;
}
}
# -
# <a id='loops'></a>
#
# # Step 4: Dependencies \[Back to [top](#toc)\]
# $$\label{loops}$$
#
# This above functions include calls to several functions not given there. We will include those files below.
# +
# %%writefile $Ccodesdir/loop_defines_reconstruction_NRPy.h
#ifndef loop_defines_reconstruction_NRPy_H_
#define loop_defines_reconstruction_NRPy_H_
#define LOOP_DEFINE(gz_shift_lo,gz_shift_hi, ext,flux_dirn, ijkgz_lo_hi,gz_lo,gz_hi) \
for(int rr=1;rr<=3;rr++) { \
ijkgz_lo_hi[rr][0]= gz_lo[rr]; \
ijkgz_lo_hi[rr][1]=ext[rr-1]-gz_hi[rr]; \
} \
ijkgz_lo_hi[flux_dirn][0] += gz_shift_lo; \
ijkgz_lo_hi[flux_dirn][1] -= gz_shift_hi; \
/* The following line is valid C99 */ \
_Pragma("omp parallel for private(U,dU,slope_lim_dU,Ur,Ul)") \
for(int k=ijkgz_lo_hi[3][0];k<ijkgz_lo_hi[3][1];k++) \
for(int j=ijkgz_lo_hi[2][0];j<ijkgz_lo_hi[2][1];j++) \
for(int i=ijkgz_lo_hi[1][0];i<ijkgz_lo_hi[1][1];i++)
// This define only sets indices.
// FIXME: benchmark with and without the if() statement.
// FIXME: try without index_arr being defined in all directions.
#define SET_INDEX_ARRAYS_NRPY(IMIN,IMAX,flux_dirn) \
const int max_shift=(MAXNUMINDICES/2); \
/* DEBUGGING ONLY: if(IMIN<-max_shift || IMAX>max_shift) CCTK_VError(VERR_DEF_PARAMS,"FIX MAXNUMINDICES!"); */ \
int index_arr[4][MAXNUMINDICES]; \
for(int idx=IMIN;idx<=IMAX;idx++) { \
index_arr[flux_dirn][idx+max_shift]= \
IDX3S( \
i+idx*kronecker_delta[flux_dirn][0], \
j+idx*kronecker_delta[flux_dirn][1], \
k+idx*kronecker_delta[flux_dirn][2]); \
}
#define SET_INDEX_ARRAYS_NRPY_3DBLOCK(IJKLOHI) \
const int max_shift=(MAXNUMINDICES/2); \
int index_arr_3DB[MAXNUMINDICES][MAXNUMINDICES][MAXNUMINDICES]; \
for(int idx_k=IJKLOHI[4];idx_k<=IJKLOHI[5];idx_k++) for(int idx_j=IJKLOHI[2];idx_j<=IJKLOHI[3];idx_j++) for(int idx_i=IJKLOHI[0];idx_i<=IJKLOHI[1];idx_i++) { \
index_arr_3DB[idx_k+max_shift][idx_j+max_shift][idx_i+max_shift]=CCTK_GFINDEX3D(cctkGH,i+idx_i,j+idx_j,k+idx_k); \
}
#endif /* loop_defines_reconstruction_NRPy_H_ */
# -
# <a id='code_validation'></a>
#
# # Step 5: Code Validation against `GiRaFFE_NRPy_PPM.py` \[Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# To validate the code in this tutorial we check for agreement between the files
#
# 1. that were written in this tutorial and
# 1. those that are generated by [`GiRaFFE_NRPy_PPM.py`](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py)
#
# +
# Define the directory that we wish to validate against:
valdir = "GiRaFFE_NRPy/GiRaFFE_Ccode_library/PPM/"
import GiRaFFE_NRPy.GiRaFFE_NRPy_PPM as PPM
PPM.GiRaFFE_NRPy_PPM(valdir)
import difflib
import sys
print("Printing difference between original C code and this code...")
# Open the files to compare
files = ["reconstruct_set_of_prims_PPM_GRFFE_NRPy.c",\
"loop_defines_reconstruction_NRPy.h"]
for file in files:
print("Checking file " + file)
with open(os.path.join(valdir,file)) as file1, open(os.path.join(Ccodesdir,file)) as file2:
# Read the lines of each file
file1_lines = file1.readlines()
file2_lines = file2.readlines()
num_diffs = 0
for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir+file), tofile=os.path.join(Ccodesdir+file)):
sys.stdout.writelines(line)
num_diffs = num_diffs + 1
if num_diffs == 0:
print("No difference. TEST PASSED!")
else:
print("ERROR: Disagreement found with .py file. See differences above.")
# -
# <a id='example'></a>
#
# # Step 6: Example code in order to use this algorithm \[Back to [top](#toc)\]
# $$\label{example}$$
#
# Recall that the gridfunction and ghostzone structures (here rewritten in proper C) require a pointer `gf`
# ```c
# // Keeping track of ghostzones between routines is a nightmare, so
# // we instead attach ghostzone info to each gridfunction and set
# // the ghostzone information correctly within each routine.
# typedef struct __gf_and_gz_struct__ {
# REAL *gf;
# int gz_lo[4],gz_hi[4];
# } gf_and_gz_struct;
# ```
# that can be found in NRPy+ as
# ```c
# #define IDX4S(g,i,j,k) \
# ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#
# ```
# So, the prerequisites that go into calling the PPM function will be as follows.
#
#
# ```c
# gf_and_gz_struct in_prims[NUM_RECONSTRUCT_GFS], out_prims_r[NUM_RECONSTRUCT_GFS], out_prims_l[NUM_RECONSTRUCT_GFS];
# int which_prims_to_reconstruct[NUM_RECONSTRUCT_GFS],num_prims_to_reconstruct;
#
# int ww=0;
# in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU0GF;
# out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU0GF;
# out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU0GF;
# ww++;
# in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU1GF;
# out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU1GF;
# out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU1GF;
# ww++;
# in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU2GF;
# out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU2GF;
# out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU2GF;
# ww++;
# in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU0GF;
# out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU0GF;
# out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU0GF;
# ww++;
# in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU1GF;
# out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU1GF;
# out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU1GF;
# ww++;
# in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU2GF;
# out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU2GF;
# out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU2GF;
# ww++;
#
# // Prims are defined AT ALL GRIDPOINTS, so we set the # of ghostzones to zero:
# for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { in_prims[i].gz_lo[j]=0; in_prims[i].gz_hi[j]=0; }
# // Left/right variables are not yet defined, yet we set the # of gz's to zero by default:
# for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { out_prims_r[i].gz_lo[j]=0; out_prims_r[i].gz_hi[j]=0; }
# for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { out_prims_l[i].gz_lo[j]=0; out_prims_l[i].gz_hi[j]=0; }
#
# ww=0;
# which_prims_to_reconstruct[ww]=VALENCIAVU0; ww++;
# which_prims_to_reconstruct[ww]=VALENCIAVU1; ww++;
# which_prims_to_reconstruct[ww]=VALENCIAVU2; ww++;
# which_prims_to_reconstruct[ww]=BU0; ww++;
# which_prims_to_reconstruct[ww]=BU1; ww++;
# which_prims_to_reconstruct[ww]=BU2; ww++;
# num_prims_to_reconstruct=ww;
# // This function is housed in the file: "reconstruct_set_of_prims_PPM_GRFFE_NRPy.c"
# reconstruct_set_of_prims_PPM_GRFFE_NRPy(params, auxevol_gfs, flux_dirn, num_prims_to_reconstruct, which_prims_to_reconstruct, in_prims, out_prims_r, out_prims_l, temporary);
#
# ```
#
# Declaring `temporary` should looks something like this, but I haven't found where it's actually done in the old code:
# ```c
# REAL temporary[Nxxp2NG012];
# ```
#
# This supposes that the following constants have been set:
# ```c
# const int VX=0,VY=1,VZ=2,BX=3,BY=4,BZ=5;
# const int NUM_RECONSTRUCT_GFS = 6;
# const int Nxxp2NG012 = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
# ```
# <a id='latex_pdf_output'></a>
#
# # Step 7: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-GiRaFFE_NRPy-PPM.pdf](Tutorial-GiRaFFE_NRPy-PPM.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-GiRaFFE_NRPy-PPM.ipynb
# !pdflatex -interaction=batchmode Tutorial-GiRaFFE_NRPy-PPM.tex
# !pdflatex -interaction=batchmode Tutorial-GiRaFFE_NRPy-PPM.tex
# !pdflatex -interaction=batchmode Tutorial-GiRaFFE_NRPy-PPM.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| in_progress/Tutorial-GiRaFFE_NRPy-PPM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import math as m
import numpy as np
with open("date.txt", "r") as fin: # citire din fisier
rand = [int(x) for x in fin.readline().split()]
dimensiune_populatie = rand[0]
precizie = rand[1]
etape = rand[2]
a_lim_inf = rand[3]
b_lim_sup = rand[4]
rand = [float(x) for x in fin.readline().split()]
prob_crossover = rand[0]
prob_mutatie = rand[1]
def my_func(x):
return -x ** 2 + x + 2
def lungime_crom():
var = (b_lim_sup - a_lim_inf) * (10 ** precizie) # formula discretizare interval
minim = m.log(var, 2)
return int(minim) + 1
def transform_binar_interval(x, lung_crm):
var = int(x, 2)
interval_nr = (var / (2 ** lung_crm - 1)) * (b_lim_sup - a_lim_inf) + a_lim_inf
# valoarea codificata din D = [a,b] - translatie liniara
return interval_nr
pop_baza_binar = []
pop_baza_decimal = []
def generare_random(lungime_crm):
# se genereaza aleator cromozomii (populatia initiala)
for _ in range(dimensiune_populatie):
crm_bit = np.random.randint(2, size = lungime_crm)
pop_baza_binar.append(list(crm_bit))
for elem in pop_baza_binar:
new_list = "".join(str(x) for x in elem)
pop_baza_decimal.append(transform_binar_interval(new_list, lungime_crm))
f =[]
def val_my_func():
# returnez performanta totala prin sum
sum = 0
for elem in pop_baza_decimal:
valoare_fct = my_func(elem)
f.append(valoare_fct)
sum = sum + valoare_fct
return sum
prob = []
def asociere_prob(FF):
# asociez fiecarui individ X i, o probabilitate P i de a fi selectat
# in functie de Performanta acestuia (data de func fitness)
for i in range(dimensiune_populatie):
prob.append(f[i] / FF)
# Functia de Fitness este chiar f in problema mea
selectie_interv = []
def aflare_selectie_interv():
# determin intervalele de selectie
selectie_interv.append(prob[0])
for i in range(1, dimensiune_populatie):
selectie_interv.append(selectie_interv[i - 1] + prob[i])
selectie_interv.insert(0, 0)
def cautare_binara(x):
# pentru Codificare,
# adica ii asociez unei configuratii din spatiul de cautare un cromozom
low = 0
high = len(selectie_interv) - 1
while low <= high:
mid = (high + low) // 2
if selectie_interv[mid] == x:
return mid
elif selectie_interv[mid] < x:
low = mid + 1
else:
high = mid - 1
return low
crm_selectati = []
def selectia():
# selectia Proportionala prin Metoda Ruletei
global f
global pop_baza_binar
global pop_baza_decimal
copie = []
copie1 = []
copie2 = []
for _ in range(dimensiune_populatie):
u_random = np.random.rand()
# generez o var uniforma pe [0,1)
# asociez un cromozom unei configuratii din spatiul de cautare
crm = cautare_binara(u_random)
# aflu carui interval apartine
crm_selectati.append(crm) # selectez cromozomul i+1
# aleg cromozomii pentru etapa viitoare
with open("evolutie.txt", "a") as fout:
fout.write("u = " + str(u_random) + " selectam cromozomul " + str(crm) + "\n")
# am grija sa mentin vectorii pentru crm selectati
copie.append(pop_baza_binar[crm - 1])
copie1.append(pop_baza_decimal[crm - 1])
copie2.append(f[crm - 1])
pop_baza_binar = copie.copy()
pop_baza_decimal = copie1.copy()
f = copie2.copy()
crm_incrucisare = []
def incrucisare():
# incrucisare cu un punct de taietura / rupere
with open("evolutie.txt", "a") as fout:
fout.write("\nProbabilitatea de incrucisare " + str(prob_crossover) + "\n\n")
for i in range(dimensiune_populatie):
u_random = np.random.rand()
# generez o var uniforma pe [0,1)
with open("evolutie.txt", "a") as fout:
fout.write( str(i + 1) + ": " + str(pop_baza_binar[i]) + " u = " + str(u_random) + "\n")
if u_random <= prob_crossover:
# atunci marcheaza X' i , va paticipa la incrucisare
crm_incrucisare.append(i)
fout.write(" < " + str(prob_crossover) + " participa\n")
def actualizare_val(h, i):
# am grija sa pastrez actualizate valorile
new_list = "".join(str(x) for x in pop_baza_binar[ crm_incrucisare[i] ])
pop_baza_decimal[crm_incrucisare[i]] = transform_binar_interval(new_list, h)
new_list = "".join(str(x) for x in pop_baza_binar[ crm_incrucisare[i + 1] ])
pop_baza_decimal[crm_incrucisare[i + 1]] = transform_binar_interval(new_list, h)
f[crm_incrucisare[i]] = my_func(pop_baza_decimal[ crm_incrucisare[i] ])
f[crm_incrucisare[i + 1]] = my_func(pop_baza_decimal[ crm_incrucisare[i + 1] ])
def actualizare_val_mutatie(h, i):
# pastrez actualizate valorile
new_list = "".join(str(x) for x in pop_baza_binar[i])
pop_baza_decimal[i] = transform_binar_interval(new_list, h)
f[i] = my_func(pop_baza_decimal[i])
def recombinare(h):
# in cazul lungimii de nr impar, renunt la ultimul cromozom
if len(crm_incrucisare) % 2:
crm_incrucisare.pop()
lung = len(crm_incrucisare) # cromozomii selectati in recombinare
# formez perechile de cromozomi
for i in range(0, lung, 2):
if lung == 0: # nu are sens daca lunigimea este 0
break
pct_rupere = np.random.randint(h) # punctul de rupere este generat random
with open("evolutie.txt", "a") as fout:
fout.write( "\nRecombinare dintre cromozomul "
+ str(crm_incrucisare[i] + 1) + " cu cromozomul " + str(crm_incrucisare[i + 1] + 1) + ":\n"
+ str(pop_baza_binar[crm_incrucisare[i]]) + " " + str(pop_baza_binar[crm_incrucisare[i + 1]])
+ " pct de rupere " + str(pct_rupere) + "\n\n")
copil1 = pop_baza_binar[crm_incrucisare[i] ] [0:pct_rupere] + pop_baza_binar[ crm_incrucisare[ i + 1] ] [ pct_rupere:]
copil2 = pop_baza_binar[crm_incrucisare[i + 1] ] [0:pct_rupere] + pop_baza_binar[ crm_incrucisare[ i] ] [ pct_rupere:]
# aplicand metoda Ruletei
pop_baza_binar[ crm_incrucisare[i] ] = copil1
pop_baza_binar[ crm_incrucisare[i + 1] ] = copil2
actualizare_val(h, i)
# pastrez actualizate valorile
with open("evolutie.txt", "a") as fout:
fout.write("Rezultat " + str(pop_baza_binar[ crm_incrucisare[i] ]) + " " + str(pop_baza_binar[ crm_incrucisare[i + 1] ]) + "\n")
# descendentii rezultati inlocuiesc parintii in populatie
def mutatie(h):
# Etapa de Mutatie rara
crm_mutatie = [] # cromozomii selectati in mutatie
for i in range(dimensiune_populatie):
prob = np.random.rand()
# generez o var uniforma pe [0,1)
if prob <= prob_mutatie:
crm_mutatie.append(i)
for elem in crm_mutatie:
with open("evolutie.txt", "a") as fout:
fout.write(str(elem + 1) + "\n")
gena = np.random.randint(h) # genereaza o pozitie aleatoare p
# trece gena p din cromozomul X" i -> la complement 0 <-> 1
pop_baza_binar[elem][gena] = 1 - pop_baza_binar[elem][gena]
actualizare_val_mutatie(h, elem)
# pastrez actualizate valorile
# pentru calculul efectiv al maximului functiei pentru verificare
Coeficienti = [int, int, int]
def functia_de_aplicat(coeficienti: Coeficienti, x):
a = coeficienti[0]
b = coeficienti [1]
c = coeficienti [2]
return a * x **2 + b * x + c
def maximul_functiei_calculat(coeficienti: Coeficienti, a_lim_inf, b_lim_sup):
a = coeficienti[0]
b = coeficienti [1]
x_v = -b / (2 * a)
if x_v > a_lim_inf and x_v < b_lim_sup:
return x_v
else:
if(functia_de_aplicat(coeficienti, a_lim_inf) > functia_de_aplicat(coeficienti, b_lim_sup)):
return a_lim_inf
else:
return b_lim_sup
if __name__ == "__main__":
file = open("evolutie.txt","a")
file.truncate(0)
file.close()
lung_crm = lungime_crom()
val_maxim_func = []
val_medie_perform=[]
for _ in range(etape):
generare_random(lung_crm)
s = val_my_func()
with open("evolutie.txt", "a") as fout:
fout.write("\nPopulatia initiala:\n")
for i in range(dimensiune_populatie):
fout.write( str(i + 1) + ": " + str(pop_baza_binar[i]) + " x = " + str(pop_baza_decimal[i]) + " f = " + str( f[i]) + "\n")
asociere_prob(s)
val_maxim_func.append( max(f) ) # calculez valorile maxime
val_medie_perform.append( s/lung_crm ) # calculez valorile pentru media performantei
with open("evolutie.txt", "a") as fout:
fout.write("\n\n Probabilitati selectie:\n")
for i in range(dimensiune_populatie):
fout.write( "cromozom " + str(i + 1) + " probabilitate " + str(prob[i]) + "\n")
aflare_selectie_interv()
with open("evolutie.txt", "a") as fout:
fout.write("\n\n Intervale probabilitati selectie:\n")
for i in range(dimensiune_populatie + 1):
fout.write(str(selectie_interv[i]) + "\n")
selectia()
with open("evolutie.txt", "a") as fout:
fout.write("\n\n Dupa selectie:\n")
for i in range(dimensiune_populatie):
fout.write( str(i + 1) + ": " + str(pop_baza_binar[i]) + " x = " + str(pop_baza_decimal[i]) + " f = " + str( f[i]) + "\n")
incrucisare()
recombinare(lung_crm)
with open("evolutie.txt", "a") as fout:
fout.write("\n\n Dupa recombinare:\n")
for i in range(dimensiune_populatie):
fout.write( str(i + 1) + ": " + str(pop_baza_binar[i]) + " x = " + str(pop_baza_decimal[i]) + " f = " + str( f[i]) + "\n")
with open("evolutie.txt", "a") as fout:
fout.write("\nProbabilitate de mutatie pentru fiecare gena " + str( prob_mutatie) + "\n\nAu fost modificati cromozomii: \n")
mutatie(lung_crm)
with open("evolutie.txt", "a") as fout:
fout.write("\n\nDupa mutatie:\n")
for i in range(dimensiune_populatie):
fout.write( str(i + 1) + ": " + str(pop_baza_binar[i]) + " x = " + str(pop_baza_decimal[i]) + " f = " + str( f[i]) + "\n")
pop_baza_decimal.clear()
pop_baza_binar.clear()
f.clear()
prob.clear()
selectie_interv.clear()
crm_selectati.clear()
crm_incrucisare.clear()
print("\nEvolutia maximului: \n")
# print(*val_maxim_func, sep='\n')
print(val_maxim_func)
x_maxim = maximul_functiei_calculat( [-1, 1, 2], a_lim_inf, b_lim_sup)
print("\nX maxim: " + str(x_maxim) )
x_funct = my_func(x_maxim)
print("f (X maxim): " + str(x_funct) )
print("\nEvolutia mediei performantei\n")
# print(*val_medie_perform, sep='\n')
print(val_medie_perform)
print()
with open("evolutie.txt", "a") as fout:
fout.write("\nEvolutia maximului: \n")
fout.write('\n' + str(val_maxim_func) + '\n')
fout.write('\n' + "f (X maxim): " + str(x_funct) + '\n')
fout.write("\nEvolutia mediei performantei\n")
fout.write('\n' + str(val_medie_perform) + '\n')
fout.close()
# Interfata Grafica
plt.title('Evolutia maximului')
plt.xlabel('Generatia')
plt.ylabel('Valori')
x = np.array(range(etape))
plt.plot(x, val_maxim_func, 'bo', marker='o', markersize = 8, linestyle='--', linewidth = 2)
plt.plot(x_maxim, x_funct, 'r*', markersize = 12)
plt.text(x_maxim, x_funct,'Max_Funct')
plt.grid()
plt.show()
plt.title('Evolutia mediei performantei')
plt.xlabel('Generatia')
plt.ylabel('Valori')
plt.plot(x, val_medie_perform, color='purple', marker='o', markersize = 8, linewidth = 2)
plt.grid()
plt.show()
# -
| MY_Genetic_Alg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit
# name: python37664bit40ccb972f06c4506ace5644e4690cca6
# ---
import pandas as pd
import glob
import re
import datetime
files=glob.glob("../../COVID-19/csse_covid_19_data/csse_covid_19_daily_reports/*.csv")
files.sort()
files
# +
def filedate(f):
matchObj = re.match( r'^.*\/(\d{2})-(\d{2})-(\d{4}).csv$', f, re.M)
result=None
if matchObj:
result= datetime.datetime(int(matchObj.group(3)), int(matchObj.group(1)), int(matchObj.group(2)))
return result
[ filedate(f) for f in files ]
# +
#dfs = [pd.read_csv(f, parse_dates=['Last Update'], infer_datetime_format=True) for f in files]
def read_csv(d, f):
df = pd.read_csv(f);
df.insert(0, 'Date', d)
df = df.rename(columns={'Province_State': "Province/State", 'Country_Region': 'Country/Region', 'Lat': 'Latitude', 'Long_': 'Longitude', 'Last_Update:': 'Last Update'})
return df
dfs = [read_csv(filedate(f), f) for f in files]
for df in dfs:
print (df.dtypes)
# +
df = pd.concat(dfs,ignore_index=True)
df.dtypes
# -
s=df.sort_values(by=['Date', 'Country/Region', 'Province/State', 'Admin2'])
s.to_csv("../data/daily_raw.csv", index=False)
s.describe()
us_last=s[(s['Country/Region']=='US')]
us_last.describe()
us_last=s[(s['Country/Region']=='US')&(s.Date==datetime.datetime(2020,3,23))]
us_last.describe()
us_last.Confirmed.sum()
us_last.Admin2
| pyscript/mergeDailyReports.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# # Explore UK Crime Data with Pandas and GeoPandas
#
#
# ## Table of Contents
#
# 1. [Introduction to GeoPandas](#geopandas)<br>
# 2. [Getting ready](#ready)<br>
# 3. [London boroughs](#boroughs)<br>
# 3.1. [Load data](#load1)<br>
# 3.2. [Explore data](#explore1)<br>
# 4. [Crime data](#crime)<br>
# 4.1. [Load data](#load2)<br>
# 4.2. [Explore data](#explore2)<br>
# 5. [OSM data](#osm)<br>
# 5.1. [Load data](#load3)<br>
# 5.2. [Explore data](#explore3)<br>
# +
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, LineString, Polygon
import matplotlib.pyplot as plt
from datetime import datetime
# %matplotlib inline
# -
# <a id="geopandas"></a>
# ## 1. Introduction to GeoPandas
#
# > If have not used Pandas before, please read through this [10 minute tutorial](http://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html) or check out this [workshop](https://github.com/IBMDeveloperUK/pandas-workshop/blob/master/README.md).
#
# A GeoDataSeries or GeoDataFrame is very similar to a Pandas DataFrame, but has an additional column with the geometry. You can load a file, or create your own:
# +
df = pd.DataFrame({'city': ['London','Manchester','Birmingham','Leeds','Glasgow'],
'population': [9787426, 2553379, 2440986, 1777934, 1209143],
'area': [1737.9, 630.3, 598.9, 487.8, 368.5 ],
'latitude': [51.50853, 53.48095, 52.48142, 53.79648,55.86515],
'longitude': [-0.12574, -2.23743, -1.89983, -1.54785,-4.25763]})
df['geometry'] = list(zip(df.longitude, df.latitude))
df['geometry'] = df['geometry'].apply(Point)
cities = gpd.GeoDataFrame(df, geometry='geometry')
cities.head()
# -
# Creating a basic map is similar to creating a plot from a Pandas DataFrame:
cities.plot(column='population');
# As `cities` is a DataFrame you can apply data manipulations, for instance:
cities['population'].mean()
# ### Points vs Lines vs Polygons
#
# We need some more data! Points by squeezing out the geometry for each city:
lon_point = cities.loc[cities['city'] == 'London', 'geometry'].squeeze()
man_point = cities.loc[cities['city'] == 'Manchester', 'geometry'].squeeze()
birm_point = cities.loc[cities['city'] == 'Birmingham', 'geometry'].squeeze()
leeds_point = cities.loc[cities['city'] == 'Leeds', 'geometry'].squeeze()
# Lines between 2 cities by creating a LineString between 2 points:
lon_man_line = gpd.GeoSeries(LineString([lon_point, man_point]))
man_birm_line = gpd.GeoSeries(LineString([man_point, birm_point]))
birm_lon_line = gpd.GeoSeries(LineString([birm_point,lon_point]))
leeds_man_line = gpd.GeoSeries(LineString([leeds_point, man_point]))
birm_leeds_line = gpd.GeoSeries(LineString([birm_point,leeds_point]))
# A polygon between 3 cities by creating a Polygon between 3 points:
Polygon([[lon_point.x,lon_point.y],[man_point.x,man_point.y],[lon_point.x,lon_point.y]])
lon_man_birm_polygon = gpd.GeoSeries(Polygon([[lon_point.x,lon_point.y],[man_point.x,man_point.y],[birm_point.x,birm_point.y],[lon_point.x,lon_point.y]]))
leeds_man_birm_polygon = gpd.GeoSeries(Polygon([[leeds_point.x,leeds_point.y],[man_point.x,man_point.y],[birm_point.x,birm_point.y]]))
# And plot all of them together:
# +
fig, (poly1,poly2) = plt.subplots(ncols=2, sharex=True, sharey=True)
lon_man_birm_polygon.plot(ax=poly1, color='lightblue', edgecolor='black',alpha=0.5);
lon_man_line.plot(ax=poly1,color='violet',alpha=0.5);
man_birm_line.plot(ax=poly1,color='blue',alpha=0.5);
birm_lon_line.plot(ax=poly1,color='green',alpha=0.5);
leeds_man_birm_polygon.plot(ax=poly2, color='yellow', edgecolor='black',alpha=0.5);
leeds_man_line.plot(ax=poly2,color='red',alpha=0.5);
man_birm_line.plot(ax=poly2,color='blue',alpha=0.5);
birm_leeds_line.plot(ax=poly2,color='green',alpha=0.5);
# -
# ### Overlay
#
# With overlay you can combine geometries, for instance union, difference, symmetrical difference and intersection are some of the operations that can be performed.
#
# Let's combine the 2 polygons:
# +
poly1 = gpd.GeoDataFrame({'geometry': lon_man_birm_polygon})
poly2 = gpd.GeoDataFrame({'geometry': leeds_man_birm_polygon})
gpd.overlay( poly1, poly2, how='union').plot(color='red',alpha=0.5);
# -
# ### Buffer
cities1 = cities[0:1].copy()
cities1.head()
base = cities1.buffer(3).plot(color='blue',alpha=0.5);
cities1.buffer(2).plot(ax=base,color='green',alpha=0.5);
cities1.buffer(1).plot(ax=base,color='yellow',alpha=0.5);
cities1.plot(ax=base,color='red',alpha=0.5);
# ### Spatial relationships
#
# There are several functions to check geospatial relationships: `equals`, `contains`, `crosses`, `disjoint`,`intersects`,`overlaps`,`touches`,`within` and `covers`. These all use `shapely`: read more [here](https://shapely.readthedocs.io/en/stable/manual.html#predicates-and-relationships) and some more background [here](https://en.wikipedia.org/wiki/Spatial_relation).
#
# A few examples:
cities.head()
cities1.head()
cities1.contains(lon_point)
cities1[cities1.contains(lon_point)]
cities[cities.contains(man_point)]
# The inverse of `contains`:
cities[cities.within(cities1)]
cities[cities.disjoint(lon_point)]
# <a id="ready"></a>
# ## 2. Getting ready
#
# ### 2.1. Add data to Cloud Object Store (COS)
# The data for this workshop needs to be added to your project. Go to the GitHub repo and download the files in the [data folder](https://github.com/IBMDeveloperUK/python-geopandas-workshop/tree/master/data) to your machine.
#
# Add the files in the data menu on the right of the notebook (click the 1010 button at the top right if you do not see this) into COS:
#
# - boundaries.zip
# - 2018-1-metropolitan-street.zip
# - 2018-2-metropolitan-street.zip
# - 2018-metropolitan-stop-and-search.zip
# - london_inner_pois.zip
#
# ### 2.2. Project Access token
#
# As the data files are not simple csv files, we need a little trick to load the data. The first thing you need is a project access token to programmatically access COS.
#
# Click the 3 dots at the top of the notebook to insert the project token that you created earlier. This will create a new cell in the notebook that you will need to run first before continuing with the rest of the notebook. If you are sharing this notebook you should remove this cell, else anyone can use you Cloud Object Storage from this project.
#
# > If you cannot find the new cell it is probably at the top of this notebook. Scroll up, run the cell and continue with section 2.3
# ### 2.3. Helper function to load data into notebook
#
# The second thing you need to load data into the notebook is the below help function. Data will be copied to the local project space and loaded from there. The below helper function will do this for you.
# define the helper function
def download_file_to_local(project_filename, local_file_destination=None, project=None):
"""
Uses project-lib to get a bytearray and then downloads this file to local.
Requires a valid `project` object.
Args:
project_filename str: the filename to be passed to get_file
local_file_destination: the filename for the local file if different
Returns:
0 if everything worked
"""
project = project
# get the file
print("Attempting to get file {}".format(project_filename))
_bytes = project.get_file(project_filename).read()
# check for new file name, download the file
print("Downloading...")
if local_file_destination==None: local_file_destination = project_filename
with open(local_file_destination, 'wb') as f:
f.write(bytearray(_bytes))
print("Completed writing to {}".format(local_file_destination))
return 0
# <a id="boroughs"></a>
# ## 3. London boroughs
#
# <a id="load1"></a>
# ### 3.1. Load data
#
# Geospatial data comes in many formats, but with GeoPandas you can read most files with just one command. For example this geojson file with the London boroughs:
# load data from a url
boroughs = gpd.read_file("https://skgrange.github.io/www/data/london_boroughs.json")
boroughs.head()
# <a id="explore1"></a>
# ### 3.2. Explore data
#
# To plot a basic map add `.plot()` to a geoDataFrame.
boroughs.plot();
boroughs.plot(column='code');
boroughs.plot(column='area_hectares');
# ### Dissolve
#
# The boroughs are made up of many districts that you might want to combine. For this example this can be done by adding a new column and then use `.dissolve()`:
boroughs['all'] = 1
allboroughs = boroughs.dissolve(by='all',aggfunc='sum')
allboroughs.head()
allboroughs.plot();
# To change the size of the map and remove the box around the map, run the below:
[fig, ax] = plt.subplots(1, figsize=(10, 6))
allboroughs.plot(ax=ax);
ax.axis('off');
# ### Join
#
# Let's join this with some more data:
df = pd.read_csv('https://raw.githubusercontent.com/IBMDeveloperUK/python-pandas-workshop/master/london-borough-profiles.csv',encoding = 'unicode_escape')
df.head()
# The columns to join the two tables on are `code` and `Code`. To use the join method, first the index of both tables has to be set to this column.
#
# The below adds the columns from `df` to `boroughs`:
#
boroughs = boroughs.set_index('code').join(df.set_index('Code'))
boroughs.head()
# +
boroughs2 = boroughs.dissolve(by='Inner/_Outer_London',aggfunc='mean')
[fig, ax] = plt.subplots(1, figsize=(10, 6))
boroughs2.plot(column='id', cmap='Paired', linewidth=0.5, edgecolor='black', legend=False, ax=ax);
ax.axis('off');
# -
# Below is a map of the average gender pay gap for each borough.
#
# * add a new column `paygap`
# * define the size of the plot
# * plot the background
# * add the paygap data and a title
# +
boroughs['paygap'] =((boroughs['Gross_Annual_Pay_-_Male_(2016)'] - boroughs['Gross_Annual_Pay_-_Female_(2016)'])/ \
boroughs['Gross_Annual_Pay_-_Male_(2016)']) * 100
[fig,ax] = plt.subplots(1, figsize=(12, 8))
boroughs.plot(ax=ax, color="lightgrey", edgecolor='black', linewidth=0.5)
boroughs.dropna().plot(column='paygap', cmap='Reds', edgecolor='black', linewidth=0.5,
legend=True, ax=ax);
ax.axis('off');
ax.set_title('Gender pay gap in London (2016)');
# -
# <a id="crime"></a>
# ## 4. Crime data
#
# The crime data is pre-processed in this [notebook](https://github.com/IBMDeveloperUK/geopandas-workshop/blob/master/notebooks/prepare-uk-crime-data.ipynb) so it is easier to read here. We will only look at data from 2018.
#
# Data is downloaded from https://data.police.uk/ ([License](https://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/))
#
# <a id="load2"></a>
# ### 4.1. Load data
#
# This dataset cannot be loaded into a geoDataFrame directly. Instead the data is loaded into a DataFrame and then converted:
download_file_to_local('2018-1-metropolitan-street.zip', project=project)
download_file_to_local('2018-2-metropolitan-street.zip', project=project)
street = pd.read_csv("./2018-1-metropolitan-street.zip")
street2 = pd.read_csv("./2018-2-metropolitan-street.zip")
street = street.append(street2)
download_file_to_local('2018-metropolitan-stop-and-search.zip', project=project)
stop_search = pd.read_csv("./2018-metropolitan-stop-and-search.zip")
# Clean up of the local directory:
# ! rm *.zip
street.head()
stop_search.head()
# #### Convert to geoDataFrames
street['coordinates'] = list(zip(street.Longitude, street.Latitude))
street['coordinates'] = street['coordinates'].apply(Point)
street = gpd.GeoDataFrame(street, geometry='coordinates')
street.head()
stop_search['coordinates'] = list(zip(stop_search.Longitude, stop_search.Latitude))
stop_search['coordinates'] = stop_search['coordinates'].apply(Point)
stop_search = gpd.GeoDataFrame(stop_search, geometry='coordinates')
stop_search.head()
# <a id="explore2"></a>
# ### 4.2. Explore data
#
# <div class="alert alert-success">
# <b>EXERCISE</b> <br/>
# Explore the data with Pandas. There are no right or wrong answers, the questions below give you some suggestions at what to look at. <br/>
# <ul>
# <li>How much data is there? Is this changing over time? Can you plot this? </li>
# <li>Are there missing values? Should these rows be deleted? </li>
# <li>Which columns of the datasets contain useful information? What kind of categories are there and are they all meaningful?</li>
# <li>Which crimes occur most often? And near which location?</li>
# <li>Is there anything you want to explore further or are curious about? Is there any data that you will need for this?</li>
# <li>Notice anything odd about the latitude and longitudes? Read here how the data is anonymised: https://data.police.uk/about/.</li>
# </ul>
#
# Uncomment and run the cells starting with '# %load' to see some of the things that we came up with. Run each cell twice, once to load the code and then again to run the code.
# </div>
# your data exploration (add as many cells as you need by clicking the `+` at the top of the notebook)
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer1.py
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer2.py
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer3.py
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer4.py
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer5.py
# -
# Some things we noticed:
# * The number of stop and searches seems to go up. That is something you could investigate further. Is any of the categories increasing?
# * Another interesting question is how the object of search and the outcome are related. Are there types of searches where nothing is found more frequently?
# * In the original files there are also columns of gender, age range and ethnicity. If you want to explore this further you can change the code and re-process the data from this [notebook](https://github.com/IBMDeveloperUK/geopandas-workshop/blob/master/notebooks/prepare-uk-crime-data.ipynb) and use the full dataset.
# * And how could you combine the two datasets?
#
# ### Spatial join
#
# > The below solution was found [here](https://gis.stackexchange.com/questions/306674/geopandas-spatial-join-and-count) after googling for 'geopandas count points in polygon'
#
# The coordinate system (`crs`) needs to be the same for both GeoDataFrames.
print(boroughs.crs)
print(stop_search.crs)
# Add a borough to each point with a spatial join. This will add the `geometry` and other columns from `boroughs2` to the points in `stop_search`.
stop_search.crs = boroughs.crs
dfsjoin = gpd.sjoin(boroughs,stop_search)
dfsjoin.head()
# Then aggregate this table by creating a [pivot table](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html) where for each borough the number of types each of the categories in `Object of search` are counted. Then drop the pivot level and remove the index, so you can merge this new table back into the `boroughs2` DataFrame.
dfpivot = pd.pivot_table(dfsjoin,index='id',columns='Object of search',aggfunc={'Object of search':'count'})
dfpivot.columns = dfpivot.columns.droplevel()
dfpivot = dfpivot.reset_index()
dfpivot.head()
boroughs3 = boroughs.merge(dfpivot, how='left',on='id')
boroughs3.head()
# Let's make some maps!
# +
fig, axs = plt.subplots(1, 2, figsize=(20,5))
p1=boroughs3.plot(column='Controlled drugs',ax=axs[0],cmap='Blues',legend=True);
axs[0].set_title('Controlled drugs', fontdict={'fontsize': '12', 'fontweight' : '5'});
p2=boroughs3.plot(column='Stolen goods',ax=axs[1], cmap='Reds',legend=True);
axs[1].set_title('Stolen goods', fontdict={'fontsize': '12', 'fontweight' : '5'});
# -
# <div class="alert alert-success">
# <b>EXERCISE</b> <br/>
# Explore the data with GeoPandas. Again there are no right or wrong answers, the questions below give you some suggestions at what to look at. <br/>
# <ul>
# <li>Improve the above maps. How many arrests are there in each borough? Use the above method but first select only the arrests using the column 'Outcome'. Can you plot this? </li>
# <li>Are there changes over time? Is there a difference between months? Use `street` and look at Westminster or another borough where the crime rate seems higher. </li>
# </ul>
# </div>
# your data exploration (add as many cells as you need)
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer6.py
# +
# # %load https://raw.githubusercontent.com/IBMDeveloperUK/python-geopandas-workshop/master/answers/answer7.py
# -
# <a id="osm"></a>
# ## 5. OSM data
#
# The Open Street Map data is also pre-processed in this [notebook]() so it is easier to read into this notebook.
#
# Data is downloaded from http://download.geofabrik.de/europe/great-britain.html and more details decription of the data is [here](http://download.geofabrik.de/osm-data-in-gis-formats-free.pdf).
#
# <a id="load3"></a>
# ### 5.1. Load data
download_file_to_local('london_inner_pois.zip', project=project)
pois = gpd.read_file("zip://./london_inner_pois.zip")
pois.head()
# <a id="explore3"></a>
# ### 5.2. Explore data
pois.size
pois['fclass'].unique()
# Count and plot the number of pubs by borough:
# +
pubs = pois[pois['fclass']=='pub']
pubs2 = gpd.sjoin(boroughs,pubs)
pubs3 = pd.pivot_table(pubs2,index='id',columns='fclass',aggfunc={'fclass':'count'})
pubs3.columns = pubs3.columns.droplevel()
pubs3 = pubs3.reset_index()
boroughs5 = boroughs.merge(pubs3, left_on='id',right_on='id')
boroughs5.plot(column='pub',cmap='Blues',legend=True);
# -
# <div class="alert alert-success">
# <b>EXERCISE</b> <br/>
# Explore the data further. Again there are no right or wrong answers, the questions below give you some suggestions at what to look at. <br/>
# <ul>
# <li> Is there a category of POIs that relates to the number of crimes? You might have to aggregate the data on a different more detailed level for this one. </li>
# <li> Can you find if there is a category of POIs that related to the number of crimes? </li>
# <li> Count the number of crimes around a certain POI. Choose a point and use the buffer function from the top of the notebook. But note that the crimes are anonymised, so the exact location is not given, only an approximation. </li>
#
# </ul>
# </div>
# answers
# Hopefully you got an idea of the possibilities with geospatial data now. There is a lot more to explore with this data. Let us know if you find anything interesting! We are on Twitter as @MargrietGr and @yaminigrao
#
#
# ### Author
# <NAME> is a Data & AI Developer Advocate for IBM. She develops and presents talks and workshops about data science and AI. She is active in the local developer communities through attending, presenting and organising meetups. She has a background in climate science where she explored large observational datasets of carbon uptake by forests during her PhD, and global scale weather and climate models as a postdoctoral fellow.
#
# Copyright © 2019 IBM. This notebook and its source code are released under the terms of the MIT License.
| notebooks/geopandas-workshop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!-- dom:TITLE: Demo - Lid driven cavity -->
# # Demo - Lid driven cavity
# <!-- dom:AUTHOR: <NAME> Email:<EMAIL> at Department of Mathematics, University of Oslo. -->
# <!-- Author: -->
# **<NAME>** (email: `<EMAIL>`), Department of Mathematics, University of Oslo.
#
# Date: **Jul 6, 2020**
#
# Copyright 2020, <NAME>. Released under CC Attribution 4.0 license
#
# **Summary.** The lid driven cavity is a classical benchmark for Navier Stokes solvers.
# This is a demonstration of how the Python module [shenfun](https://github.com/spectralDNS/shenfun) can be used to solve the lid
# driven cavity problem with full spectral accuracy using a mixed (coupled) basis
# in a 2D tensor product domain. The demo also shows how to use mixed
# tensor product spaces for vector valued equations. Note that the regular
# lid driven cavity, where the top wall has constant velocity and the
# remaining three walls are stationary, has a singularity at the two
# upper corners, where the velocity is discontinuous.
# Due to their global nature, spectral methods
# are usually not very good at handling problems with discontinuities, and
# for this reason we will also look at a regularized lid driven cavity,
# where the top lid moves according to $(1-x)^2(1+x)^2$, thus removing
# the corner discontinuities.
#
#
#
#
#
#
#
#
# <!-- dom:FIGURE: [https://raw.githack.com/spectralDNS/spectralutilities/master/figures/DrivenCavity.png] Velocity vectors for $Re=100$. <div id="fig:drivencavity"></div> -->
# <!-- begin figure -->
# <div id="fig:drivencavity"></div>
#
# <p>Velocity vectors for $Re=100$.</p>
# <img src="https://raw.githack.com/spectralDNS/spectralutilities/master/figures/DrivenCavity.png" >
#
# <!-- end figure -->
#
#
#
#
# ## Navier Stokes equations
# <div id="demo:navierstokes"></div>
#
# The nonlinear steady Navier Stokes equations are given in strong form as
# $$
# \begin{align*}
# \nu \nabla^2 \boldsymbol{u} - \nabla p &= \nabla \cdot \boldsymbol{u} \boldsymbol{u} \quad \text{in } \Omega , \\
# \nabla \cdot \boldsymbol{u} &= 0 \quad \text{in } \Omega \\
# \int_{\Omega} p dx &= 0 \\
# \boldsymbol{u}(x, y=1) = (1, 0) \, &\text{ or }\, \boldsymbol{u}(x, y=1) = ((1-x)^2(1+x)^2, 0) \\
# \boldsymbol{u}(x, y=-1) &= (0, 0) \\
# \boldsymbol{u}(x=\pm 1, y) &= (0, 0)
# \end{align*}
# $$
# where $\boldsymbol{u}, p$ and $\nu$ are, respectively, the
# fluid velocity vector, pressure and kinematic viscosity. The domain
# $\Omega = [-1, 1]^2$ and the nonlinear term $\boldsymbol{u} \boldsymbol{u}$ is the
# outer product of vector $\boldsymbol{u}$ with itself. Note that the final
# $\int_{\Omega} p dx = 0$ is there because there is no Dirichlet boundary
# condition on the pressure and the system of equations would otherwise be
# ill conditioned.
#
# We want to solve these steady nonlinear Navier Stokes equations with the Galerkin
# method, using the [shenfun](https://github.com/spectralDNS/shenfun) Python
# package. The first thing we need to do then is to import all of shenfun's
# functionality
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from shenfun import *
# -
# Note that MPI for Python ([mpi4py](https://bitbucket.org/mpi4py/mpi4py))
# is a requirement for shenfun, but the current solver cannot be used with more
# than one processor.
#
# ## Bases and tensor product spaces
# <div id="sec:bases"></div>
#
# With the Galerkin method we need basis functions for both velocity and
# pressure, as well as for the
# nonlinear right hand side. A Dirichlet basis will be used for velocity,
# whereas there is no boundary restriction on the pressure basis. For both
# two-dimensional bases we will use one basis function for the $x$-direction,
# $\mathcal{X}_k(x)$, and one for the $y$-direction, $\mathcal{Y}_l(y)$. And
# then we create two-dimensional basis functions like
# <!-- Equation labels as ordinary links -->
# <div id="eq:nstestfunction"></div>
#
# $$
# \begin{equation}
# v_{kl}(x, y) = \mathcal{X}_k(x) \mathcal{Y}_l(y), \label{eq:nstestfunction} \tag{1}
# \end{equation}
# $$
# and solutions (trial functions) as
# <!-- Equation labels as ordinary links -->
# <div id="eq:nstrialfunction"></div>
#
# $$
# \begin{equation}
# u(x, y) = \sum_{k}\sum_{l} \hat{u}_{kl} v_{kl}(x, y). \label{eq:nstrialfunction} \tag{2}
# \end{equation}
# $$
# For the homogeneous Dirichlet boundary condition the basis functions
# $\mathcal{X}_k(x)$ and $\mathcal{Y}_l(y)$ are chosen as composite
# Legendre polynomials (we could also use Chebyshev):
# <!-- Equation labels as ordinary links -->
# <div id="eq:D0"></div>
#
# $$
# \begin{equation}
# \mathcal{X}_k(x) = L_k(x) - L_{k+2}(x), \quad \forall \, k \in \boldsymbol{k}^{N_0-2}, \label{eq:D0} \tag{3}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="eq:D1"></div>
#
# $$
# \begin{equation}
# \mathcal{Y}_l(y) = L_l(y) - L_{l+2}(y), \quad \forall \, l \in \boldsymbol{l}^{N_1-2}, \label{eq:D1} \tag{4}
# \end{equation}
# $$
# where $\boldsymbol{k}^{N_0-2} = (0, 1, \ldots, N_0-3)$, $\boldsymbol{l}^{N_1-2} = (0, 1, \ldots, N_1-3)$
# and $N = (N_0, N_1)$ is the number
# of quadrature points in each direction. Note that $N_0$ and $N_1$ do not need
# to be the same. The basis funciton ([3](#eq:D0)) satisfies
# the homogeneous Dirichlet boundary conditions at $x=\pm 1$ and ([4](#eq:D1)) the same
# at $y=\pm 1$. As such, the basis function $v_{kl}(x, y)$ satisfies the homogeneous Dirichlet boundary
# condition for the entire domain.
#
# With shenfun we create these homogeneous spaces, $D_0^{N_0}(x)=\text{span}\{L_k-L_{k+2}\}_{k=0}^{N_0-2}$ and
# $D_0^{N_1}(y)=\text{span}\{L_l-L_{l+2}\}_{l=0}^{N_1-2}$ as
N = (51, 51)
family = 'Legendre' # or use 'Chebyshev'
quad = 'LG' # for Chebyshev use 'GC' or 'GL'
D0X = FunctionSpace(N[0], family, quad=quad, bc=(0, 0))
D0Y = FunctionSpace(N[1], family, quad=quad, bc=(0, 0))
# The spaces are here the same, but we will use `D0X` in the $x$-direction and
# `D0Y` in the $y$-direction. But before we use these bases in
# tensor product spaces, they remain identical as long as $N_0 = N_1$.
#
# Special attention is required by the moving lid. To get a solution
# with nonzero boundary condition at $y=1$ we need to add one more basis function
# that satisfies that solution. In general, a nonzero boundary condition
# can be added on both sides of the domain using the following basis
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# \mathcal{Y}_l(y) = L_l(y) - L_{l+2}(y), \quad \forall \, l \in \boldsymbol{l}^{N_1-2}.
# \label{_auto1} \tag{5}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# \mathcal{Y}_{N_1-2}(y) = (L_0+L_1)/2 \quad \left(=(1+y)/2\right),
# \label{_auto2} \tag{6}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# \mathcal{Y}_{N_1-1}(y) = (L_0-L_1)/2 \quad \left(=(1-y)/2\right).
# \label{_auto3} \tag{7}
# \end{equation}
# $$
# And then the unknown component $N_1-2$ decides the value at $y=1$, whereas
# the unknown at $N_1-1$ decides the value at $y=-1$. Here we only need to
# add the $N_1-2$ component, but for generality this is implemented in shenfun
# using both additional basis functions. We create the space
# $D_1^{N_1}(y)=\text{span}\{\mathcal{Y}_l(y)\}_{l=0}^{N_1-1}$ as
D1Y = FunctionSpace(N[1], family, quad=quad, bc=(1, 0))
# where `bc=(1, 0)` fixes the values for $y=1$ and $y=-1$, respectively.
# For a regularized lid driven cavity the velocity of the top lid is
# $(1-x)^2(1+x)^2$ and not unity. To implement this boundary condition
# instead, we can make use of [sympy](https://www.sympy.org) and
# quite straight forward do
import sympy
x = sympy.symbols('x')
#D1Y = FunctionSpace(N[1], family, quad=quad, bc=((1-x)**2*(1+x)**2, 0))
# Uncomment the last line to run the regularized boundary conditions.
# Otherwise, there is no difference at all between the regular and the
# regularized lid driven cavity implementations.
#
# The pressure basis that comes with no restrictions for the boundary is a
# little trickier. The reason for this has to do with
# inf-sup stability. The obvious choice of basis functions are the
# regular Legendre polynomials $L_k(x)$ in $x$ and $L_l(y)$ in the
# $y$-directions. The problem is that for the natural choice of
# $(k, l) \in \boldsymbol{k}^{N_0} \times \boldsymbol{l}^{N_1}$
# there are nullspaces and the problem is not well-defined. It turns out
# that the proper choice for the pressure basis is simply the regular
# Legendre basis functions, but for
# $(k, l) \in \boldsymbol{k}^{N_0-2} \times \boldsymbol{l}^{N_1-2}$.
# The bases $P^{N_0}(x)=\text{span}\{L_k(x)\}_{k=0}^{N_0-3}$ and
# $P^{N_1}(y)=\text{span}\{L_l(y)\}_{l=0}^{N_1-3}$ are created as
PX = FunctionSpace(N[0], family, quad=quad)
PY = FunctionSpace(N[1], family, quad=quad)
PX.slice = lambda: slice(0, N[0]-2)
PY.slice = lambda: slice(0, N[1]-2)
# Note that we still use these spaces with the same $N_0 \cdot N_1$
# quadrature points in real space, but the two highest frequencies have
# been set to zero.
#
# We have now created all relevant function spaces for the problem at hand.
# It remains to combine these spaces into tensor product spaces, and to
# combine tensor product spaces into mixed (coupled) tensor product
# spaces. From the Dirichlet bases we create two different tensor
# product spaces, whereas one is enough for the pressure
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# V_{1}^{\boldsymbol{N}}(\boldsymbol{x}) = D_0^{N_0}(x) \otimes D_1^{N_1}(y),
# \label{_auto4} \tag{8}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# V_{0}^{\boldsymbol{N}}(\boldsymbol{x}) = D_0^{N_0}(x) \otimes D_0^{N_1}(y),
# \label{_auto5} \tag{9}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# P^{\boldsymbol{N}}(\boldsymbol{x}) = P^{N_0}(x) \otimes P^{N_1}(y).
# \label{_auto6} \tag{10}
# \end{equation}
# $$
# With shenfun the tensor product spaces are created as
V1 = TensorProductSpace(comm, (D0X, D1Y))
V0 = TensorProductSpace(comm, (D0X, D0Y))
P = TensorProductSpace(comm, (PX, PY))
# These tensor product spaces are all scalar valued.
# The velocity is a vector, and a vector requires a mixed vector basis like
# $W_1^{\boldsymbol{N}} = V_1^{\boldsymbol{N}} \times V_0^{\boldsymbol{N}}$. The vector basis is created
# in shenfun as
W1 = VectorSpace([V1, V0])
W0 = VectorSpace([V0, V0])
# Note that the second vector basis, $W_0^{\boldsymbol{N}} = V_0^{\boldsymbol{N}} \times V_0^{\boldsymbol{N}}$, uses
# homogeneous boundary conditions throughout.
#
# ## Mixed variational form
# <div id="sec:mixedform"></div>
#
# We now formulate a variational problem using the
# Galerkin method: Find
# $\boldsymbol{u} \in W_1^{\boldsymbol{N}}$ and $p \in P^{\boldsymbol{N}}$ such that
# <!-- Equation labels as ordinary links -->
# <div id="eq:nsvarform"></div>
#
# $$
# \begin{equation}
# \int_{\Omega} (\nu \nabla^2 \boldsymbol{u} - \nabla p ) \cdot \boldsymbol{v} \, dxdy = \int_{\Omega} (\nabla \cdot \boldsymbol{u}\boldsymbol{u}) \cdot \boldsymbol{v}\, dxdy \quad\forall \boldsymbol{v} \, \in \, W_0^{\boldsymbol{N}}, \label{eq:nsvarform} \tag{11}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# \int_{\Omega} \nabla \cdot \boldsymbol{u} \, q \, dxdy = 0 \quad\forall q \, \in \, P^{\boldsymbol{N}}.
# \label{_auto7} \tag{12}
# \end{equation}
# $$
# Note that we are using test functions $\boldsymbol{v}$ with homogeneous
# boundary conditions.
#
# The first obvious issue with Eq ([11](#eq:nsvarform)) is the nonlinearity.
# In other words we will
# need to linearize and iterate to be able to solve these equations with
# the Galerkin method. To this end we will introduce the solution on
# iteration $k \in [0, 1, \ldots]$ as $\boldsymbol{u}^k$ and compute the nonlinearity
# using only known solutions
# $\int_{\Omega} (\nabla \cdot \boldsymbol{u}^k\boldsymbol{u}^k) \cdot \boldsymbol{v}\, dxdy$.
# Using further integration by parts we end up with the equations to solve
# for iteration number $k+1$ (using $\boldsymbol{u} = \boldsymbol{u}^{k+1}$ and $p=p^{k+1}$
# for simplicity)
# <!-- Equation labels as ordinary links -->
# <div id="eq:nsvarform2"></div>
#
# $$
# \begin{equation}
# -\int_{\Omega} \nu \nabla \boldsymbol{u} \, \colon \nabla \boldsymbol{v} \, dxdy + \int_{\Omega} p \nabla \cdot \boldsymbol{v} \, dxdy = \int_{\Omega} (\nabla \cdot \boldsymbol{u}^k\boldsymbol{u}^k) \cdot \boldsymbol{v}\, dxdy \quad\forall \boldsymbol{v} \, \in \, W_0^{\boldsymbol{N}}, \label{eq:nsvarform2} \tag{13}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# \int_{\Omega} \nabla \cdot \boldsymbol{u} \, q \, dxdy = 0 \quad\forall q \, \in \, P^{\boldsymbol{N}}.
# \label{_auto8} \tag{14}
# \end{equation}
# $$
# Note that the nonlinear term may also be integrated by parts and
# evaluated as $\int_{\Omega}-\boldsymbol{u}^k\boldsymbol{u}^k \, \colon \nabla \boldsymbol{v} \, dxdy$. All
# boundary integrals disappear since we are using test functions with
# homogeneous boundary conditions.
#
# Since we are to solve for $\boldsymbol{u}$ and $p$ at the same time, we formulate a
# mixed (coupled) problem: find $(\boldsymbol{u}, p) \in W_1^{\boldsymbol{N}} \times P^{\boldsymbol{N}}$
# such that
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# a((\boldsymbol{u}, p), (\boldsymbol{v}, q)) = L((\boldsymbol{v}, q)) \quad \forall (\boldsymbol{v}, q) \in W_0^{\boldsymbol{N}} \times P^{\boldsymbol{N}},
# \label{_auto9} \tag{15}
# \end{equation}
# $$
# where bilinear ($a$) and linear ($L$) forms are given as
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# a((\boldsymbol{u}, p), (\boldsymbol{v}, q)) = -\int_{\Omega} \nu \nabla \boldsymbol{u} \, \colon \nabla \boldsymbol{v} \, dxdy + \int_{\Omega} p \nabla \cdot \boldsymbol{v} \, dxdy + \int_{\Omega} \nabla \cdot \boldsymbol{u} \, q \, dxdy,
# \label{_auto10} \tag{16}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto11"></div>
#
# $$
# \begin{equation}
# L((\boldsymbol{v}, q); \boldsymbol{u}^{k}) = \int_{\Omega} (\nabla \cdot \boldsymbol{u}^{k}\boldsymbol{u}^{k}) \cdot \boldsymbol{v}\, dxdy.
# \label{_auto11} \tag{17}
# \end{equation}
# $$
# Note that the bilinear form will assemble to a block matrix, whereas the right hand side
# linear form will assemble to a block vector. The bilinear form does not change
# with the solution and as such it does not need to be reassembled inside
# an iteration loop.
#
# The algorithm used to solve the equations are:
#
# * Set $k = 0$
#
# * Guess $\boldsymbol{u}^0 = (0, 0)$
#
# * while not converged:
#
# * assemble $L((\boldsymbol{v}, q); \boldsymbol{u}^{k})$
#
# * solve $a((\boldsymbol{u}, p), (\boldsymbol{v}, q)) = L((\boldsymbol{v}, q); \boldsymbol{u}^{k})$ for $\boldsymbol{u}^{k+1}, p^{k+1}$
#
# * compute error = $\int_{\Omega} (\boldsymbol{u}^{k+1}-\boldsymbol{u}^{k})^2 \, dxdy$
#
# * if error $<$ some tolerance then converged = True
#
# * $k$ += $1$
#
#
# ## Implementation of solver
#
# We will now implement the coupled variational problem described in previous
# sections. First of all, since we want to solve for the velocity and pressure
# in a coupled solver, we have to
# create a mixed tensor product space $VQ = W_1^{\boldsymbol{N}} \times P^{\boldsymbol{N}}$ that
# couples velocity and pressure
VQ = CompositeSpace([W1, P]) # Coupling velocity and pressure
# We can now create test- and trialfunctions for the coupled space $VQ$,
# and then split them up into components afterwards:
up = TrialFunction(VQ)
vq = TestFunction(VQ)
u, p = up
v, q = vq
# **Notice.**
#
# The test function `v` is using homogeneous Dirichlet boundary conditions even
# though it is derived from `VQ`, which contains `W1`. It is currently not (and will
# probably never be) possible to use test functions with inhomogeneous
# boundary conditions.
#
#
#
# With the basisfunctions in place we may assemble the different blocks of the
# final coefficient matrix. For this we also need to specify the kinematic
# viscosity, which is given here in terms of the Reynolds number:
Re = 100.
nu = 2./Re
A = inner(grad(v), -nu*grad(u))
G = inner(div(v), p)
D = inner(q, div(u))
# **Notice.**
#
# The inner products may also be assembled with one single line, as
# AA = inner(grad(v), -nu*grad(u)) + inner(div(v), p) + inner(q, div(u))
#
# But note that this requires addition, not subtraction, of inner products,
# and it is not possible to move the negation to `-inner(grad(v), nu*grad(u))`.
# This is because the `inner` function returns a list of
# tensor product matrices of type `TPMatrix`, and you cannot
# negate a list.
#
#
#
# The assembled subsystems `A, G` and `D` are lists containg the different blocks of
# the complete, coupled, coefficient matrix. `A` actually contains 4
# tensor product matrices of type `TPMatrix`. The first two
# matrices are for vector component zero of the test function `v[0]` and
# trial function `u[0]`, the
# matrices 2 and 3 are for components 1. The first two matrices are as such for
# A[0:2] = inner(grad(v[0]), -nu*grad(u[0]))
#
# Breaking it down the inner product is mathematically
# <!-- Equation labels as ordinary links -->
# <div id="eq:partialeq1"></div>
#
# $$
# \begin{equation}
# \label{eq:partialeq1} \tag{18}
# \int_{\Omega}-\nu \left(\frac{\partial \boldsymbol{v}[0]}{\partial x}, \frac{\partial \boldsymbol{v}[0]}{\partial y}\right) \cdot \left(\frac{\partial \boldsymbol{u}[0]}{\partial x}, \frac{\partial \boldsymbol{u}[0]}{\partial y}\right) dx dy .
# \end{equation}
# $$
# We can now insert for test function $\boldsymbol{v}[0]$
# <!-- Equation labels as ordinary links -->
# <div id="_auto12"></div>
#
# $$
# \begin{equation}
# \boldsymbol{v}[0]_{kl} = \mathcal{X}_k \mathcal{Y}_l, \quad (k, l) \in \boldsymbol{k}^{N_0-2} \times \boldsymbol{l}^{N_1-2}
# \label{_auto12} \tag{19}
# \end{equation}
# $$
# and trialfunction
# <!-- Equation labels as ordinary links -->
# <div id="_auto13"></div>
#
# $$
# \begin{equation}
# \boldsymbol{u}[0]_{mn} = \sum_{m=0}^{N_0-3} \sum_{n=0}^{N_1-1} \hat{\boldsymbol{u}}[0]_{mn} \mathcal{X}_m \mathcal{Y}_n,
# \label{_auto13} \tag{20}
# \end{equation}
# $$
# where $\hat{\boldsymbol{u}}$ are the unknown degrees of freedom for the velocity vector.
# Notice that the sum over the second
# index runs all the way to $N_1-1$, whereas the other indices runs to either
# $N_0-3$ or $N_1-3$. This is because of the additional basis functions required
# for the inhomogeneous boundary condition.
#
# Inserting for these basis functions into ([18](#eq:partialeq1)), we obtain after a few trivial
# manipulations
# <!-- Equation labels as ordinary links -->
# <div id="_auto14"></div>
#
# $$
# \begin{equation}
# -\sum_{m=0}^{N_0-3} \sum_{n=0}^{N_1-1} \nu \Big( \underbrace{\int_{-1}^{1} \frac{\partial \mathcal{X}_k(x)}{\partial x} \frac{\partial \mathcal{X}_m}{\partial x} dx \int_{-1}^{1} \mathcal{Y}_l \mathcal{Y}_n dy}_{A[0]} + \underbrace{\int_{-1}^{1} \mathcal{X}_k(x) X_m(x) dx \int_{-1}^{1} \frac{\partial \mathcal{Y}_l}{\partial y} \frac{\partial \mathcal{Y}_n}{\partial y} dy}_{A[1]} \Big) \hat{\boldsymbol{u}}[0]_{mn}.
# \label{_auto14} \tag{21}
# \end{equation}
# $$
# We see that each tensor product matrix (both A[0] and A[1]) is composed as
# outer products of two smaller matrices, one for each dimension.
# The first tensor product matrix, A[0], is
# <!-- Equation labels as ordinary links -->
# <div id="_auto15"></div>
#
# $$
# \begin{equation}
# \underbrace{\int_{-1}^{1} \frac{\partial \mathcal{X}_k(x)}{\partial x} \frac{\partial \mathcal{X}_m}{\partial x} dx}_{c_{km}} \underbrace{\int_{-1}^{1} \mathcal{Y}_l \mathcal{Y}_n dy}_{f_{ln}}
# \label{_auto15} \tag{22}
# \end{equation}
# $$
# where $C\in \mathbb{R}^{N_0-2 \times N_1-2}$ and $F \in \mathbb{R}^{N_0-2 \times N_1}$.
# Note that due to the inhomogeneous boundary conditions this last matrix $F$
# is actually not square. However, remember that all contributions from the two highest
# degrees of freedom ($\hat{\boldsymbol{u}}[0]_{m,N_1-2}$ and $\hat{\boldsymbol{u}}[0]_{m,N_1-1}$) are already
# known and they can, as such, be moved directly over to the right hand side of the
# linear algebra system that is to be solved. More precisely, we can split the
# tensor product matrix into two contributions and obtain
# $$
# \sum_{m=0}^{N_0-3}\sum_{n=0}^{N_1-1} c_{km}f_{ln} \hat{\boldsymbol{u}}[0]_{m, n} = \sum_{m=0}^{N_0-3}\sum_{n=0}^{N_1-3}c_{km}f_{ln}\hat{\boldsymbol{u}}[0]_{m, n} + \sum_{m=0}^{N_0-3}\sum_{n=N_1-2}^{N_1-1}c_{km}f_{ln}\hat{\boldsymbol{u}}[0]_{m, n}, \quad \forall (k, l) \in \boldsymbol{k}^{N_0-2} \times \boldsymbol{l}^{N_1-2},
# $$
# where the first term on the right hand side is square and the second term is known and
# can be moved to the right hand side of the linear algebra equation system.
#
# All the parts of the matrices that are to be moved to the right hand side
# can be extracted from A, G and D as follows
# Extract the boundary matrices
bc_mats = extract_bc_matrices([A, G, D])
# These matrices are applied to the solution below (see `BlockMatrix BM`).
# Furthermore, this leaves us with square submatrices (A, G, D), which make up a
# symmetric block matrix
# <!-- Equation labels as ordinary links -->
# <div id="eq:nsbmatrix"></div>
#
# $$
# M =
# \begin{bmatrix}
# A[0]+A[1] & 0 & G[0] \\ \label{eq:nsbmatrix} \tag{23}
# 0 & A[2]+A[3] & G[1] \\
# D[0] & D[1] & 0
# \end{bmatrix}
# $$
# This matrix, and the matrix responsible for the boundary degrees of freedom,
# can be assembled from the pieces we already have as
M = BlockMatrix(A+G+D)
BM = BlockMatrix(bc_mats)
# We now have all the matrices we need in order to solve the Navier Stokes equations.
# However, we also need some work arrays for iterations and we need to
# assemble the constant boundary contribution to the right hand side
# +
# Create Function to hold solution. Use set_boundary_dofs to fix the degrees
# of freedom in uh_hat that determines the boundary conditions.
uh_hat = Function(VQ).set_boundary_dofs()
ui_hat = uh_hat[0]
# New solution (iterative)
uh_new = Function(VQ).set_boundary_dofs()
ui_new = uh_new[0]
# Compute the constant contribution to rhs due to nonhomogeneous boundary conditions
bh_hat0 = Function(VQ)
bh_hat0 = BM.matvec(-uh_hat, bh_hat0) # Negative because moved to right hand side
bi_hat0 = bh_hat0[0]
# -
# Note that `bh_hat0` now contains the part of the right hand side that is
# due to the non-symmetric part of assembled matrices. The appended
# `set_boundary_dofs()` ensures the known boundary values of
# the solution are fixed for `ui_hat` and `ui_new`.
#
# The nonlinear right hand side also requires some additional attention.
# Nonlinear terms are usually computed in physical space before transforming
# to spectral. For this we need to evaluate the velocity vector on the
# quadrature mesh. We also need a rank 2 Array to hold the outer
# product $\boldsymbol{u}\boldsymbol{u}$. The required arrays and spaces are
# created as
# +
bh_hat = Function(VQ)
# Create arrays to hold velocity vector solution
ui = Array(W1)
# Create work arrays for nonlinear part
QT = CompositeSpace([W1, W0]) # for uiuj
uiuj = Array(QT)
uiuj_hat = Function(QT)
# -
# The right hand side $L((\boldsymbol{v}, q);\boldsymbol{u}^{k});$ is computed in its
# own function `compute_rhs` as
def compute_rhs(ui_hat, bh_hat):
global ui, uiuj, uiuj_hat, V1, bh_hat0
bh_hat.fill(0)
ui = W1.backward(ui_hat, ui)
uiuj = outer(ui, ui, uiuj)
uiuj_hat = uiuj.forward(uiuj_hat)
bi_hat = bh_hat[0]
#bi_hat = inner(v, div(uiuj_hat), output_array=bi_hat)
bi_hat = inner(grad(v), -uiuj_hat, output_array=bi_hat)
bh_hat += bh_hat0
return bh_hat
# Here `outer` is a shenfun function that computes the
# outer product of two vectors and returns the product in a rank two
# array (here `uiuj`). With `uiuj` forward transformed to `uiuj_hat`
# we can assemble the linear form either as `inner(v, div(uiuj_hat)` or
# `inner(grad(v), -uiuj_hat)`. Also notice that the constant contribution
# from the inhomogeneous boundary condition, `bh_hat0`,
# is added to the right hand side vector.
#
# Now all that remains is to guess an initial solution and solve
# iteratively until convergence. For initial solution we simply set the
# velocity and pressure to zero and solve the Stokes equations:
from scipy.sparse.linalg import splu
uh_hat, Ai = M.solve(bh_hat0, u=uh_hat, constraints=((2, 0, 0),), return_system=True) # Constraint for component 2 of mixed space
Alu = splu(Ai)
uh_new[:] = uh_hat
# Note that the `BlockMatrix` given by `M` has a solve method that sets up
# a sparse coefficient matrix `Ai` of size $\mathbb{R}^{3(N_0-2)(N_1-2) \times 3(N_0-2)(N_1-2)}$,
# and then solves using [scipy.sparse.linalg.spsolve](http://scipy.github.io/devdocs/generated/scipy.sparse.linalg.spsolve.html#scipy.sparse.linalg.spsolve).
# The matrix `Ai` is then pre-factored for reuse with [splu](http://scipy.github.io/devdocs/generated/scipy.sparse.linalg.splu.html#scipy.sparse.linalg.splu).
# Also note that the `constraints=((2, 0, 0),)` keyword argument
# ensures that the pressure integrates to zero, i.e., $\int_{\Omega} pdxdy=0$.
# Here the number 2 tells us that block component 2 in the mixed space
# (the pressure) should be integrated, dof 0 should be fixed, and it
# should be fixed to 0.
#
# With an initial solution from the Stokes equations we are ready to start iterating.
# However, for convergence it is necessary to add some underrelaxation $\alpha$,
# and update the solution each time step as
# $$
# \begin{align*}
# \hat{\boldsymbol{u}}^{k+1} &= \alpha \hat{\boldsymbol{u}}^* + (1-\alpha)\hat{\boldsymbol{u}}^{k},\\
# \hat{p}^{k+1} &= \alpha \hat{p}^* + (1-\alpha)\hat{p}^{k},
# \end{align*}
# $$
# where $\hat{\boldsymbol{u}}^*$ and $\hat{p}^*$ are the newly computed velocity
# and pressure returned from `M.solve`. Without underrelaxation the solution
# will quickly blow up. The iteration loop goes as follows
# +
converged = False
count = 0
alfa = 0.5
while not converged:
count += 1
bh_hat = compute_rhs(ui_hat, bh_hat)
uh_new = M.solve(bh_hat, u=uh_new, constraints=((2, 0, 0),), Alu=Alu) # Constraint for component 2 of mixed space
error = np.linalg.norm(ui_hat-ui_new)
uh_hat[:] = alfa*uh_new + (1-alfa)*uh_hat
converged = abs(error) < 1e-10 or count >= 10000
print('Iteration %d Error %2.4e' %(count, error))
up = uh_hat.backward()
u, p = up
X = V0.local_mesh(True)
plt.figure()
plt.quiver(X[0], X[1], u[0], u[1])
# -
# The last three lines plots the velocity vectors that are shown
# in [Figure](#fig:drivencavity). The solution is apparently nice
# and smooth, but hidden underneath are Gibbs oscillations from the
# corner discontinuities. This is painfully obvious when switching from
# Legendre to Chebyshev polynomials. With Chebyshev the same plot looks
# like [Figure](#fig:drivencavitycheb). However, choosing instead the
# regularized lid, the solutions will be nice and smooth, both for
# Legendre and Chebyshev polynomials.
#
# <!-- dom:FIGURE: [https://raw.githack.com/spectralDNS/spectralutilities/master/figures/DrivenCavityCheb.png] Velocity vectors for Re=100 using Chebyshev. <div id="fig:drivencavitycheb"></div> -->
# <!-- begin figure -->
# <div id="fig:drivencavitycheb"></div>
#
# <p>Velocity vectors for Re=100 using Chebyshev.</p>
# <img src="https://raw.githack.com/spectralDNS/spectralutilities/master/figures/DrivenCavityCheb.png" >
#
# <!-- end figure -->
#
#
#
# ## Complete solver
# <div id="sec:nscomplete"></div>
#
# A complete solver can be found in demo [NavierStokesDrivenCavity.py](https://github.com/spectralDNS/shenfun/blob/master/demo/NavierStokesDrivenCavity.py).
| binder/drivencavity.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # Computer Arithmetics
versioninfo()
# ## Units of computer storage
#
# * `bit` = `binary` + `digit` (coined by statistician [<NAME>](https://en.wikipedia.org/wiki/Bit#History)).
# * `byte` = 8 bits.
# * KB = kilobyte = $10^3$ bytes.
# * MB = megabytes = $10^6$ bytes.
# * GB = gigabytes = $10^9$ bytes. Typical RAM size.
# * TB = terabytes = $10^{12}$ bytes. Typical hard drive size. Size of NYSE each trading session.
# * PB = petabytes = $10^{15}$ bytes.
# * EB = exabytes = $10^{18}$ bytes. Size of all healthcare data in 2011 is ~150 EB.
# * ZB = zetabytes = $10^{21}$ bytes.
#
# Julia function `Base.summarysize` shows the amount of memory (in bytes) used by an object.
x = rand(100, 100)
Base.summarysize(x)
# `varinfo()` function prints all variables in workspace and their sizes.
varinfo() # similar to Matlab whos()
# ## Storage of Characters
#
# * Plain text files are stored in the form of characters: `.jl`, `.r`, `.c`, `.cpp`, `.ipynb`, `.html`, `.tex`, ...
# * ASCII (American Code for Information Interchange): 7 bits, only $2^7=128$ characters.
# integers 0, 1, ..., 127 and corresponding ascii character
[0:127 Char.(0:127)]
# * Extended ASCII: 8 bits, $2^8=256$ characters.
# integers 128, 129, ..., 255 and corresponding extended ascii character
# show(STDOUT, "text/plain", [128:255 Char.(128:255)])
[128:255 Char.(128:255)]
# * Unicode: UTF-8, UTF-16 and UTF-32 support many more characters including foreign characters; last 7 digits conform to ASCII.
#
# * [UTF-8](https://en.wikipedia.org/wiki/UTF-8) is the current dominant character encoding on internet.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/c/c4/Utf8webgrowth.svg" width="500" align="center"/>
#
# * Julia supports the full range of UTF-8 characters. You can type many Unicode math symbols by typing the backslashed LaTeX symbol name followed by tab.
# \beta-<tab>
β = 0.0
# \beta-<tab>-\hat-<tab>
β̂ = 0.0
# * For a table of unicode symbols that can be entered via tab completion of LaTeX-like abbreviations: <https://docs.julialang.org/en/v1.1/manual/unicode-input/#Unicode-Input-1>
# ## Integers: fixed-point number system
#
# * Fixed-point number system is a computer model for integers $\mathbb{Z}$.
#
# * The number of bits and method of representing negative numbers vary from system to system.
# - The `integer` type in R has $M=32$ or 64 bits, determined by machine word size.
# - Matlab has `(u)int8`, `(u)int16`, `(u)int32`, `(u)int64`.
#
# * Julia has even more integer types. Using <NAME>'s `Plots.jl` and `GraphRecipes.jl` packages, we can [visualize the type tree](http://www.breloff.com/Graphs/) under `Integer`
# - Storage for a `Signed` or `Unsigned` integer can be $M = 8, 16, 32, 64$ or 128 bits.
# - GraphRecipes.jl package has a convenience function for plotting the type hiearchy.
# +
using GraphRecipes, Plots
#pyplot(size=(800, 600))
gr(size=(600, 400))
theme(:default)
plot(Integer, method=:tree, fontsize=4)
# -
# ### Signed integers
#
# * First bit indicates sign.
# - `0` for nonnegative numbers
# - `1` for negative numbers
#
# * **Two's complement representation** for negative numbers.
# - Sign bit is set to 1
# - remaining bits are set to opposite values
# - 1 is added to the result
# - Two's complement representation of a negative integer `x` is same as the unsigned integer `2^64 + x`.
@show typeof(18)
@show bitstring(18)
@show bitstring(-18)
@show bitstring(UInt64(Int128(2)^64 - 18)) == bitstring(-18)
@show bitstring(2 * 18) # shift bits of 18
@show bitstring(2 * -18); # shift bits of -18
# * Two's complement representation respects modular arithmetic nicely.
# Addition of any two signed integers are just bitwise addition, possibly modulo $2^M$
#
# <img src="http://users.dickinson.edu/~braught/courses/cs251f02/classes/images/twosCompWheel.png" width="400" align="center"/>
# * **Range** of representable integers by $M$-bit **signed integer** is $[-2^{M-1},2^{M-1}-1]$.
# - Julia functions `typemin(T)` and `typemax(T)` give the lowest and highest representable number of a type `T` respectively
typemin(Int64), typemax(Int64)
for T in [Int8, Int16, Int32, Int64, Int128]
println(T, '\t', typemin(T), '\t', typemax(T))
end
# ### Unsigned integers
#
# * For unsigned integers, the range is $[0,2^M-1]$.
for t in [UInt8, UInt16, UInt32, UInt64, UInt128]
println(t, '\t', typemin(t), '\t', typemax(t))
end
# ### `BigInt`
#
# Julia `BigInt` type is arbitrary precision.
@show typemax(Int128)
@show typemax(Int128) + 1 # modular arithmetic!
@show BigInt(typemax(Int128)) + 1;
# ### Overflow and underflow for integer arithmetic
#
# R reports `NA` for integer overflow and underflow.
# **Julia outputs the result according to modular arithmetic.**
@show typemax(Int32)
@show typemax(Int32) + Int32(1); # modular arithmetics!
# +
using RCall
R"""
.Machine$integer.max
"""
# -
R"""
M <- 32
big <- 2^(M-1) - 1
as.integer(big)
"""
R"""
as.integer(big+1)
"""
# ## Real numbers: floating-number system
#
# Floating-point number system is a computer model for real numbers.
#
# * Most computer systems adopt the [IEEE 754 standard](https://en.wikipedia.org/wiki/IEEE_floating_point), established in 1985, for floating-point arithmetics.
# For the history, see an [interview with William Kahan](http://www.cs.berkeley.edu/~wkahan/ieee754status/754story.html).
#
# * In the scientific notation, a real number is represented as
# $$\pm d_0.d_1d_2 \cdots d_p \times b^e.$$
# In computer, the _base_ is $b=2$ and the digits $d_i$ are 0 or 1.
#
# * **Normalized** vs **denormalized** numbers. For example, decimal number 18 is
# $$ +1.0010 \times 2^4 \quad (\text{normalized})$$
# or, equivalently,
# $$ +0.1001 \times 2^5 \quad (\text{denormalized}).$$
#
# * In the floating-number system, computer stores
# - sign bit
# - the _fraction_ (or _mantissa_, or _significand_) of the **normalized** representation
# - the actual exponent _plus_ a bias
# +
using GraphRecipes, Plots
#pyplot(size=(800, 600))
gr(size=(600, 400))
theme(:default)
plot(AbstractFloat, method=:tree, fontsize=4)
# -
# ### Double precision (Float64)
#
# <img src="./double-precision-numbers.png" width="500" align="center"/>
#
# - Double precision (64 bits = 8 bytes) numbers are the dominant data type in scientific computing.
#
# - In Julia, `Float64` is the type for double precision numbers.
#
# - First bit is sign bit.
#
# - $p=52$ significant bits.
#
# - 11 exponent bits: $e_{\max}=1023$, $e_{\min}=-1022$, **bias**=1023.
#
# - $e_{\text{min}}-1$ and $e_{\text{max}}+1$ are reserved for special numbers.
#
# - range of **magnitude**: $10^{\pm 308}$ in decimal because $\log_{10} (2^{1023}) \approx 308$.
#
# - **precision** to the $- \log_{10}(2^{-52}) \approx 15$ decimal point.
println("Double precision:")
@show bitstring(Float64(18)) # 18 in double precision
@show bitstring(Float64(-18)); # -18 in double precision
# ### Single precision (Float32)
#
# <img src="./single-precision-numbers.png" width="500" align="center"/>
#
# - In Julia, `Float32` is the type for single precision numbers.
#
# - First bit is sign bit.
#
# - $p=23$ significant bits.
#
# - 8 exponent bits: $e_{\max}=127$, $e_{\min}=-126$, **bias**=127.
#
# - $e_{\text{min}}-1$ and $e_{\text{max}}+1$ are reserved for special numbers.
#
# - range of **magnitude**: $10^{\pm 38}$ in decimal because $\log_{10} (2^{127}) \approx 38$.
#
# - **precision**: $- \log_{10}(2^{-23}) \approx 7$ decimal point.
println("Single precision:")
@show bitstring(Float32(18.0)) # 18 in single precision
@show bitstring(Float32(-18.0)); # -18 in single precision
# ### Half precision (Float16)
#
# <img src="./half-precision-numbers.png" width="200" align="center"/>
#
# - In Julia, `Float16` is the type for half precision numbers.
#
# - First bit is sign bit.
#
# - $p=10$ significant bits.
#
# - 5 exponent bits: $e_{\max}=15$, $e_{\min}=-14$, bias=15.
#
# - $e_{\text{min}}-1$ and $e_{\text{max}}+1$ are reserved for special numbers.
#
# - range of **magnitude**: $10^{\pm 4}$ in decimal because $\log_{10} (2^{15}) \approx 4$.
#
# - **precision**: $\log_{10}(2^{10}) \approx 3$ decimal point.
println("Half precision:")
@show bitstring(Float16(18)) # 18 in half precision
@show bitstring(Float16(-18)); # -18 in half precision
# ### Special floating-point numbers.
#
# - Exponent $e_{\max}+1$ plus a zero mantissa means $\pm \infty$.
@show bitstring(Inf) # Inf in double precision
@show bitstring(-Inf); # -Inf in double precision
# - Exponent $e_{\max}+1$ plus a nonzero mantissa means `NaN`. `NaN` could be produced from `0 / 0`, `0 * Inf`, ...
#
# - In general `NaN ≠ NaN` bitwise. Test whether a number is `NaN` by `isnan` function.
@show bitstring(0 / 0) # NaN
@show bitstring(0 * Inf); # NaN
# - Exponent $e_{\min}-1$ with a zero mantissa represents the real number 0.
@show bitstring(0.0); # 0 in double precision
# - Exponent $e_{\min}-1$ with a nonzero mantissa are for numbers less than $b^{e_{\min}}$.
# Numbers are _denormalized_ in the range $(0,b^{e_{\min}})$ -- **graceful underflow**.
@show nextfloat(0.0) # next representable number
@show bitstring(nextfloat(0.0)); # denormalized
# ### Rounding
#
# * Rounding is necessary whenever a number has more than $p$ significand bits. Most computer systems use the default IEEE 754 _round to nearest_ mode (also called _ties to even_ mode). Julia offers several [rounding modes](https://docs.julialang.org/en/v1/base/math/#Base.Rounding.RoundingMode), the default being [`RoundNearest`](https://docs.julialang.org/en/v1/base/math/#Base.Rounding.RoundNearest). For example, the number 0.1 in decimal system cannot be represented accurately as a floating point number:
# $$ 0.1 = 1.10011001... \times 2^{-4} $$
# half precision Float16, ...110(011...) rounds down to 110
@show bitstring(Float16(0.1))
# single precision Float32, ...100(110...) rounds up to 101
@show bitstring(0.1f0)
# double precision Float64, ...001(100..) rounds up to 010
@show bitstring(0.1);
# For a number with mantissa ending with ...001(100..., all 0 digits after), it's a tie and will be rounded to ...010 to make the mantissa even.
# ### Summary
#
# - Single precision: range $\pm 10^{\pm 38}$ with precision up to 7 decimal digits.
#
# - Double precision: range $\pm 10^{\pm 308}$ with precision up to 16 decimal digits.
#
# - The floating-point numbers do not occur uniformly over the real number line
# <img src="http://www.volkerschatz.com/science/pics/fltscale-wh.png" width="700" align="center"/>
# Each magnitude has same number of representible numbers
#
# - **Machine epsilons** are the spacings of numbers around 1:
# $$\epsilon_{\min}=b^{-p}, \quad \epsilon_{\max} = b^{1-p}.$$
# <img src="./machine_epsilons.png" width="500" align="center"/>
@show eps(Float32) # machine epsilon for a floating point type
@show eps(Float64) # same as eps()
# eps(x) is the spacing after x
@show eps(100.0)
@show eps(0.0) # grace underflow
# nextfloat(x) and prevfloat(x) give the neighbors of x
@show x = 1.25f0
@show prevfloat(x), x, nextfloat(x)
@show bitstring(prevfloat(x)), bitstring(x), bitstring(nextfloat(x));
# * In R, the variable `.Machine` contains numerical characteristics of the machine.
R"""
.Machine
"""
# * Julia provides `Float16` (half precision), `Float32` (single precision), `Float64` (double precision), and `BigFloat` (arbitrary precision).
# ### Overflow and underflow of floating-point number
#
# * For double precision, the range is $\pm 10^{\pm 308}$. In most situations, underflow (magnitude of result is less than $10^{-308}$) is preferred over overflow (magnitude of result is larger than $10^{-308}$). Overflow produces $\pm \inf$. Underflow yields zeros or denormalized numbers.
#
# * E.g., the logit link function is
# $$p = \frac{\exp (x^T \beta)}{1 + \exp (x^T \beta)} = \frac{1}{1+\exp(- x^T \beta)}.$$
# The former expression can easily lead to `Inf / Inf = NaN`, while the latter expression leads to graceful underflow.
#
# * `floatmin` and `floatmax` functions gives largest and smallest _finite_ number represented by a type.
for T in [Float16, Float32, Float64]
println(T, '\t', floatmin(T), '\t', floatmax(T), '\t', typemin(T),
'\t', typemax(T), '\t', eps(T))
end
# ### Arbitrary precision
#
# * `BigFloat` in Julia offers arbitrary precision.
@show precision(BigFloat)
@show floatmin(BigFloat)
@show floatmax(BigFloat);
@show BigFloat(π); # default precision for BigFloat is 256 bits
# set precision to 1024 bits
setprecision(BigFloat, 1024) do
@show BigFloat(π)
end;
# ## Catastrophic cancellation
#
# * **Scenario 1**: Addition or subtraction of two numbers of widely different magnitudes: $a+b$ or $a-b$ where $a \gg b$ or $a \ll b$. We loose the precision in the number of smaller magnitude. Consider
# $$\begin{eqnarray*}
# a &=& x.xxx ... \times 2^{30} \\
# b &=& y.yyy... \times 2^{-30}
# \end{eqnarray*}$$
# What happens when computer calculates $a+b$? We get $a+b=a$!
@show a = 2.0^30
@show b = 2.0^-30
@show a + b == a
# * **Scenario 2**: Subtraction of two nearly equal numbers eliminates significant digits. $a-b$ where $a \approx b$. Consider
# $$\begin{eqnarray*}
# a &=& x.xxxxxxxxxx1ssss \\
# b &=& x.xxxxxxxxxx0tttt
# \end{eqnarray*}$$
# The result is $1.vvvvu...u$ where $u$ are unassigned digits.
a = 1.2345678f0 # rounding
@show bitstring(a) # rounding
b = 1.2345677f0
@show bitstring(b)
@show a - b # correct result should be 1e-7
# * Implications for numerical computation
# - Rule 1: add small numbers together before adding larger ones
# - Rule 2: add numbers of like magnitude together (paring). When all numbers are of same sign and similar magnitude, add in pairs so each stage the summands are of similar magnitude
# - Rule 3: avoid substraction of two numbers that are nearly equal
# ### Algebraic laws
#
# Floating-point numbers may violate many algebraic laws we are familiar with, such associative and distributive laws. See Homework 1 problems.
# ## Further reading
#
# * Textbook treatment, e.g., Chapter II.2 of [Computational Statistics](http://ucla.worldcat.org/title/computational-statistics/oclc/437345409&referer=brief_results) by <NAME> (2010).
#
# * [What every computer scientist should know about floating-point arithmetic](http://hua-zhou.github.io/teaching/biostatm280-2017spring/readings/Goldberg91FloatingPoint.pdf) by <NAME> (1991).
| slides/06-arith/arith.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Strings
string1 = "I learn Python and i love it"
string2 = """I learn Python and i love it"""
string3 = 'I learn Python and i love it'
quoteInString = """And i said: "awesome" """
print(quoteInString)
quoteInString = 'And i said: "awesome"'
print(quoteInString)
quoteInString = """And i said: "awesome"""" #the first three quotation marks are seen as the end of the string
print(quoteInString)
quoteInString = "And i said: \"awesome\""
print(quoteInString)
print("""useful:
you can use triple quote
for multilines strings""")
type('a')
# ## Variables in String
#
# We can use the $
#String in String
name = "Charles"
print("Hey, I am {name}.")
print(f"Hey, I am {name}.")
#Numbers in String; Int and Float
my_int = 5
my_float = 4.884
print("My favorite int is {int} and my favorite float is {float} (kidding i love them all)")
| Learn Python/08. Strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# modcell: Mammals
class Mammal:
def __init__(self):
self.response = 'Hello.'
def hello(self):
print(self.response)
class Dog(Mammal):
def __init__(self):
self.response = 'Henlo, gimme some food.'
class Cat(Mammal):
def __init__(self):
self.response = 'Meowning.'
# -
mammal = Mammal()
mammal.hello()
doggo = Dog()
doggo.hello()
kitten = Cat()
kitten.hello()
# +
# modcell: Birds
class Bird:
def __init__(self):
self.response = 'Hello.'
def hello(self):
print(self.response)
class Pigeon(Bird):
def __init__(self):
self.response = 'Coo coo.'
class Crow(Bird):
def __init__(self):
self.response = 'Caw!'
# -
Pigeon().hello()
Crow().hello()
# +
# modcell: Reptiles
class Reptile:
def __init__(self):
self.response = 'Hello.'
def hello(self):
print(self.response)
class Snake(Reptile):
def __init__(self):
self.response = 'Hiss.'
class Crocodile(Reptile):
def __init__(self):
self.response = '...'
# -
Snake().hello()
Crocodile().hello()
| module_notebook/animal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Problem Statement
# Wind energy is highly dependent on environmental factors such as wind speed. It is critical for energy traders to successfully predict wind energy production in order to maximize profits. By applying Deep Learning to financial risk, we aim to make a wind energy forecast model with with less loss. The goal is to implement a model that optimizes profits for wind farms, minimizing excess of shortfalls of energy production.
#
# #### Methodology
#
# On this project I have been given the power production model on csv. Using this model we are training the neural network. The input is one dimensional where wind speed is given on first column and output is on 2nd column how much power generated. On this method we need train the model and find out which is the less loss or profitable using the training data.
# +
# Neural networks.
import tensorflow.keras as kr
# Numerical arrays
import numpy as np
# Data frames.
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
# -
# Read the iris csv from online.
csv_url = 'https://raw.githubusercontent.com/babubaskaran/MLANDS-PROJECT-2020/main/powerproduction.csv'
df = pd.read_csv(csv_url)
# +
# Plot style.
plt.style.use("ggplot")
# Plot size.
plt.rcParams['figure.figsize'] = [14, 8]
# -
f = lambda x: 4.7 * x + 1
# Visualize the dataset of Power Production of windmill
plt.plot(df['speed'], df['power'], '.r')
plt.title('Power Production Data Set', fontsize=18)
plt.xlabel('Speed', fontsize=16)
plt.ylabel('Power', fontsize=16)
plt.grid()
plt.show()
# Create a training data frame with x and y values for Power Production
# The x values is first column spped of the wind
# y_i is f(x_i)
train = pd.DataFrame()
train['x'] = (df['speed'])
train['y'] = f(train['x'])
train
# Create a training data frame with x and y values for Power Production
# The x values is first column spped of the wind
test = pd.DataFrame()
test['x'] = (df['speed'])
test['y'] = f(test['x'])
test
# Create a neural network with one neuron.
model = kr.models.Sequential()
model.add(kr.layers.Dense(1, input_shape=(1,), activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
#model.add(kr.layers.Dense(2, input_shape=(1,), activation="relu", name="layer1"))
model.compile('adam', loss='mean_squared_error')
# Train the neural network on our training data.
model.fit(train['x'], train['y'], epochs=500)
# Let's plot our predictions of the x values we trained on
plt.plot(test['x'], test['y'], label='actual')
plt.plot(test['x'], model.predict(test['x']), label='prediction')
plt.legend();
# Evaluate the neural network on the test data.
model.evaluate(test['x'], test['y'])
#
# +
# Let's set f to a polynomial instead.
f = lambda x: 2.0 * x**2 + 3.0 * x + 4.0
#train['x'] = (df['speed'])
#train['y'] = f(train['x'])
poly = pd.DataFrame()
poly['x'] = df['speed']
#poly['x'] = np.linespace(-10.0, 10.0, 1000)
poly['y'] = poly['x'].apply(f)
# -
# Have a look.
plt.plot(poly['x'], poly['y']);
# +
# Re-build our model.
model = kr.models.Sequential()
model.add(kr.layers.Dense(1, input_shape=(1,), activation='linear', kernel_initializer="ones", bias_initializer="zeros"))
model.compile('adam', loss='mean_squared_error')
# -
# Fit the data.
model.fit(poly['x'], poly['y'], epochs=500)
# +
# Plot the predictions (on the training set itself).
plt.plot(poly['x'], poly['y'], label='actual')
plt.plot(poly['x'], model.predict(poly['x']), label='prediction')
plt.legend();
# -
# Change the activation function.
model = kr.models.Sequential()
model.add(kr.layers.Dense(1, input_shape=(1,), activation='sigmoid', kernel_initializer="ones", bias_initializer="zeros"))
model.compile('adam', loss='mean_squared_error')
# Without training, let's have a look at the output.
sigdata = pd.DataFrame()
sigdata['x'] = df['speed']
#sigdata['x'] = np.linespace(-10.0, 10.0, 1000)
sigdata['y'] = model.predict(sigdata['x'])
# Let's see what that looks like.
plt.plot(sigdata['x'], sigdata['y']);
#
# +
# Same polynomial.
f = lambda x: 2.0 * x**2 + 3.0 * x + 4.0
poly = pd.DataFrame()
poly['x'] = df['speed']
poly['y'] = poly['x'].apply(f)
# -
# Train a different model.
model = kr.models.Sequential()
model.add(kr.layers.Dense(50, input_shape=(1,), activation='sigmoid', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform"))
model.add(kr.layers.Dense(1, activation='linear', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform"))
model.compile(kr.optimizers.Adam(lr=0.001), loss='mean_squared_error')
# Fit the data.
model.fit(poly['x'], poly['y'], epochs=50, batch_size=10)
# Now let's see.
plt.plot(poly['x'], poly['y'], label='actual')
plt.plot(poly['x'], model.predict(poly['x']), label='prediction')
plt.legend();
# The aim is to train the model which has the values close to the given data set. By using the f = lambda x: 4.7 * x + 1 function the train model generated which is close to the value of data set. The test data also generated which has same value as train model. When using polynomial model the loss is high so we trained on different model and arrived the pridiction close to the actual as above. By doing this our train model using neural is almost close to the actual and having less loss.
#
# When I research this project there are lot of Deep Learning has been done on Wind Energy business. By training the data set further with different models in detail will bring more accurate data can be produced. Using this the Win Energy trade can be more profitable business.
#
# By doing lot of research on train a model I came to know different type of data anyalysis using the depp learning has been done worldwide and its become more interesting to learn more on training different model. i.e. coronavirus age and positives cases country wise. This is my 2nd model I did some research, since this has lot of data I was not able to arrive the data set easily. Due to time constraint I couldnt bring the data set and train the model.
# #### References
#
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html
#
# https://towardsdatascience.com/wind-energy-trade-with-deep-learning-time-series-forecasting-580bd41f163
#
# https://www.datacamp.com/community/tutorials/making-http-requests-in-python
#
# https://heartbeat.fritz.ai/linear-regression-using-keras-and-python-7cee2819a60c
#
# https://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/
#
# https://keras.io/getting_started/intro_to_keras_for_engineers/
#
# https://datascience.stackexchange.com/questions/51249/training-keras-model-with-multiple-csv-files
#
# https://towardsdatascience.com/thousands-of-csv-files-keras-and-tensorflow-96182f7fabac
#
# https://www.youtube.com/watch?v=aKLcvGdOP6g
#
# https://www.kaggle.com/samfiddis/simple-keras-model
#
#
| powerproduction_prj.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Computer Vision
# :label:`chap_cv`
#
# Many applications in the area of computer vision are closely related to our daily lives, now and in the future, whether medical diagnostics, driverless vehicles, camera monitoring, or smart filters. In recent years, deep learning technology has greatly enhanced computer vision systems' performance. It can be said that the most advanced computer vision applications are nearly inseparable from deep learning.
#
# We have introduced deep learning models commonly used in the area of computer vision in the chapter "Convolutional Neural Networks" and have practiced simple image classification tasks. In this chapter, we will introduce image augmentation and fine tuning methods and apply them to image classification. Then, we will explore various methods of object detection. After that, we will learn how to use fully convolutional networks to perform semantic segmentation on images. Then, we explain how to use style transfer technology to generate images that look like the cover of this book. Finally, we will perform practice exercises on two important computer vision datasets to review the content of this chapter and the previous chapters.
#
# :begin_tab:toc
# - [image-augmentation](image-augmentation.ipynb)
# - [fine-tuning](fine-tuning.ipynb)
# - [bounding-box](bounding-box.ipynb)
# - [anchor](anchor.ipynb)
# - [multiscale-object-detection](multiscale-object-detection.ipynb)
# - [object-detection-dataset](object-detection-dataset.ipynb)
# - [ssd](ssd.ipynb)
# - [rcnn](rcnn.ipynb)
# - [semantic-segmentation-and-dataset](semantic-segmentation-and-dataset.ipynb)
# - [transposed-conv](transposed-conv.ipynb)
# - [fcn](fcn.ipynb)
# - [neural-style](neural-style.ipynb)
# - [kaggle-cifar10](kaggle-cifar10.ipynb)
# - [kaggle-dog](kaggle-dog.ipynb)
# :end_tab:
#
| d2l-en/mxnet/chapter_computer-vision/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <img src="https://habrastorage.org/web/677/8e1/337/6778e1337c3d4b159d7e99df94227cb2.jpg"/>
# ## Специализация "Машинное обучение и анализ данных"
# <center>Автор материала: программист-исследователь Mail.Ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий
# # <center> Capstone проект №1. Идентификация пользователей по посещенным веб-страницам
# <img src='http://i.istockimg.com/file_thumbview_approve/21546327/5/stock-illustration-21546327-identification-de-l-utilisateur.jpg'>
#
# # <center>Неделя 4. Сравнение алгоритмов классификации
#
# Теперь мы наконец подойдем к обучению моделей классификации, сравним на кросс-валидации несколько алгоритмов, разберемся, какие параметры длины сессии (*session_length* и *window_size*) лучше использовать. Также для выбранного алгоритма построим кривые валидации (как качество классификации зависит от одного из гиперпараметров алгоритма) и кривые обучения (как качество классификации зависит от объема выборки).
#
# **План 4 недели:**
# - Часть 1. Сравнение нескольких алгоритмов на сессиях из 10 сайтов
# - Часть 2. Выбор параметров – длины сессии и ширины окна
# - Часть 3. Идентификация конкретного пользователя и кривые обучения
#
#
#
# **В этой части проекта Вам могут быть полезны видеозаписи следующих лекций курса "Обучение на размеченных данных":**
# - [Линейная классификация](https://www.coursera.org/learn/supervised-learning/lecture/jqLcO/linieinaia-klassifikatsiia)
# - [Сравнение алгоритмов и выбор гиперпараметров](https://www.coursera.org/learn/supervised-learning/lecture/aF79U/sravnieniie-alghoritmov-i-vybor-ghipierparamietrov)
# - [Кросс-валидация. Sklearn.cross_validation](https://www.coursera.org/learn/supervised-learning/lecture/XbHEk/kross-validatsiia-sklearn-cross-validation)
# - [Линейные модели. Sklearn.linear_model. Классификация](https://www.coursera.org/learn/supervised-learning/lecture/EBg9t/linieinyie-modieli-sklearn-linear-model-klassifikatsiia)
# - и многие другие
#
# <font color='red'>**Задание:**</font> заполните код в этой тетрадке и выберите ответы в [веб-форме](https://docs.google.com/forms/d/12VB7kmzDoSVzSpQNaJp0tR-2t8K8PynQopP3dypf7i4).
# pip install watermark
# %load_ext watermark
# %watermark -v -m -p numpy,scipy,pandas,matplotlib,statsmodels,sklearn -g
from __future__ import division, print_function
# отключим всякие предупреждения Anaconda
import warnings
warnings.filterwarnings('ignore')
from time import time
import itertools
import os
import numpy as np
import pandas as pd
import seaborn as sns
# %matplotlib inline
from matplotlib import pyplot as plt
import pickle
from scipy.sparse import csr_matrix
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.metrics import accuracy_score, f1_score
# Поменяйте на свой путь к данным
PATH_TO_DATA = 'capstone_user_identification'
# ## Часть 1. Сравнение нескольких алгоритмов на сессиях из 10 сайтов
# **Загрузим сериализованные ранее объекты *X_sparse_10users* и *y_10users*, соответствующие обучающей выборке для 10 пользователей.**
with open(os.path.join(PATH_TO_DATA,
'X_sparse_10users.pkl'), 'rb') as X_sparse_10users_pkl:
X_sparse_10users = pickle.load(X_sparse_10users_pkl)
with open(os.path.join(PATH_TO_DATA,
'y_10users.pkl'), 'rb') as y_10users_pkl:
y_10users = pickle.load(y_10users_pkl)
# **Здесь более 14 тысяч сессий и почти 5 тысяч уникальных посещенных сайтов.**
X_sparse_10users.shape
# **Разобьем выборку на 2 части. На одной будем проводить кросс-валидацию, на второй – оценивать модель, обученную после кросс-валидации.**
X_train, X_valid, y_train, y_valid = train_test_split(X_sparse_10users, y_10users,
test_size=0.3,
random_state=17, stratify=y_10users)
# **Зададим заранее тип кросс-валидации: 3-кратная, с перемешиванием, параметр random_state=17 – для воспроизводимости.**
skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=17)
# **Вспомогательная функция для отрисовки кривых валидации после запуска GridSearchCV (или RandomizedCV).**
def plot_validation_curves(param_values, grid_cv_results_):
train_mu, train_std = grid_cv_results_['mean_train_score'], grid_cv_results_['std_train_score']
valid_mu, valid_std = grid_cv_results_['mean_test_score'], grid_cv_results_['std_test_score']
train_line = plt.plot(param_values, train_mu, '-', label='train', color='green')
valid_line = plt.plot(param_values, valid_mu, '-', label='test', color='red')
plt.fill_between(param_values, train_mu - train_std, train_mu + train_std, edgecolor='none',
facecolor=train_line[0].get_color(), alpha=0.2)
plt.fill_between(param_values, valid_mu - valid_std, valid_mu + valid_std, edgecolor='none',
facecolor=valid_line[0].get_color(), alpha=0.2)
plt.legend()
# **1. Обучите `KNeighborsClassifier` со 100 ближайшими соседями (остальные параметры оставьте по умолчанию, только `n_jobs`=-1 для распараллеливания) и посмотрите на долю правильных ответов на 3-кратной кросс-валидации (ради воспроизводимости используйте для этого объект `StratifiedKFold` `skf`) по выборке `(X_train, y_train)` и отдельно на выборке `(X_valid, y_valid)`.**
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier ''' ВАШ КОД ЗДЕСЬ '''
# **<font color='red'>Вопрос 1. </font> Посчитайте доли правильных ответов для KNeighborsClassifier на кросс-валидации и отложенной выборке. Округлите каждое до 3 знаков после запятой и введите через пробел.**
''' ВАШ КОД ЗДЕСЬ '''
# **2. Обучите случайный лес (`RandomForestClassifier`) из 100 деревьев (для воспроизводимости `random_state`=17). Посмотрите на OOB-оценку (для этого надо сразу установить `oob_score`=True) и на долю правильных ответов на выборке `(X_valid, y_valid)`. Для распараллеливания задайте `n_jobs`=-1.**
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier ''' ВАШ КОД ЗДЕСЬ '''
# **<font color='red'>Вопрос 2. </font> Посчитайте доли правильных ответов для `RandomForestClassifier` при Out-of-Bag оценке и на отложенной выборке. Округлите каждое до 3 знаков после запятой и введите через пробел.**
write_answer_to_file(''' ВАШ КОД ЗДЕСЬ ''',
'answer4_2.txt')
# !cat answer4_2.txt
# **3. Обучите логистическую регрессию (`LogisticRegression`) с параметром `C` по умолчанию и `random_state`=17 (для воспроизводимости). Посмотрите на долю правильных ответов на кросс-валидации (используйте объект `skf`, созданный ранее) и на выборке `(X_valid, y_valid)`. Для распараллеливания задайте `n_jobs=-1`.**
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
logit = LogisticRegression ''' ВАШ КОД ЗДЕСЬ '''
# **Почитайте документацию к [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html). Логистическая регрессия хорошо изучена, и для нее существуют алгоритмы быстрого подбора параметра регуляризации `C` (быстрее, чем с `GridSearchCV`).**
#
# **С помощью `LogisticRegressionCV` подберите параметр `C` для `LogisticRegression` сначала в широком диапазоне: 10 значений от 1e-4 до 1e2, используйте `logspace` из `NumPy`. Укажите у `LogisticRegressionCV` параметры `multi_class`='multinomial' и `random_state`=17. Для кросс-валидации используйте объект `skf`, созданный ранее. Для распараллеливания задайте `n_jobs=-1`.**
#
# **Нарисуйте кривые валидации по параметру `C`.**
# +
# %%time
logit_c_values1 = np.logspace(-4, 2, 10)
logit_grid_searcher1 = LogisticRegressionCV ''' ВАШ КОД ЗДЕСЬ '''
logit_grid_searcher1.fit(X_train, y_train)
# -
# Средние значения доли правильных ответов на кросс-валидации по каждому из 10 параметров `C`.
logit_mean_cv_scores1 = ''' ВАШ КОД ЗДЕСЬ '''
# Выведите лучшее значение доли правильных ответов на кросс-валидации и соответствующее значение `C`.
''' ВАШ КОД ЗДЕСЬ '''
# Нарисуйте график зависимости доли правильных ответов на кросс-валидации от `C`.
plt.plot(logit_c_values1, logit_mean_cv_scores1);
# **Теперь то же самое, только значения параметра `C` перебирайте в диапазоне `np.linspace`(0.1, 7, 20). Опять нарисуйте кривые валидации, определите максимальное значение доли правильных ответов на кросс-валидации.**
# +
# %%time
logit_c_values2 = np.linspace(0.1, 7, 20)
logit_grid_searcher2 = LogisticRegressionCV ''' ВАШ КОД ЗДЕСЬ '''
logit_grid_searcher2.fit(X_train, y_train)
# -
# Средние значения доли правильных ответов на кросс-валидации по каждому из 10 параметров `C`.
''' ВАШ КОД ЗДЕСЬ '''
# Выведите лучшее значение доли правильных ответов на кросс-валидации и соответствующее значение `C`.
''' ВАШ КОД ЗДЕСЬ '''
# Нарисуйте график зависимости доли правильных ответов на кросс-валидации от `C`.
plt.plot(logit_c_values2, logit_mean_cv_scores2);
# Выведите долю правильных ответов на выборке `(X_valid, y_valid)` для логистической регрессии с лучшим найденным значением `C`.
logit_cv_acc = accuracy_score ''' ВАШ КОД ЗДЕСЬ '''
# **<font color='red'>Вопрос 3. </font>Посчитайте доли правильных ответов для `logit_grid_searcher2` на кросс-валидации для лучшего значения параметра `C` и на отложенной выборке. Округлите каждое до 3 знаков после запятой и выведите через пробел.**
''' ВАШ КОД ЗДЕСЬ '''
# **4. Обучите линейный SVM (`LinearSVC`) с параметром `C`=1 и `random_state`=17 (для воспроизводимости). Посмотрите на долю правильных ответов на кросс-валидации (используйте объект `skf`, созданный ранее) и на выборке `(X_valid, y_valid)`.**
from sklearn.svm import LinearSVC
svm = LinearSVC ''' ВАШ КОД ЗДЕСЬ '''
# **С помощью `GridSearchCV` подберите параметр `C` для SVM сначала в широком диапазоне: 10 значений от 1e-4 до 1e4, используйте `linspace` из NumPy. Нарисуйте кривые валидации.**
# +
# %%time
svm_params1 = {'C': np.linspace(1e-4, 1e4, 10)}
svm_grid_searcher1 = GridSearchCV ''' ВАШ КОД ЗДЕСЬ '''
svm_grid_searcher1.fit(X_train, y_train)
# -
# Выведите лучшее значение доли правильных ответов на кросс-валидации и соответствующее значение `C`.
''' ВАШ КОД ЗДЕСЬ '''
# Нарисуйте график зависимости доли правильных ответов на кросс-валидации от `C`.
plot_validation_curves(svm_params1['C'], svm_grid_searcher1.cv_results_)
# **Но мы помним, что с параметром регуляризации по умолчанию (С=1) на кросс-валидации доля правильных ответов выше. Это тот случай (не редкий), когда можно ошибиться и перебирать параметры не в том диапазоне (причина в том, что мы взяли равномерную сетку на большом интервале и упустили действительно хороший интервал значений `C`). Здесь намного осмысленней подбирать `C` в районе 1, к тому же, так модель быстрее обучается, чем при больших `C`. **
#
# **С помощью `GridSearchCV` подберите параметр `C` для SVM в диапазоне (1e-3, 1), 30 значений, используйте `linspace` из NumPy. Нарисуйте кривые валидации.**
# +
# %%time
svm_params2 = {'C': np.linspace(1e-3, 1, 30)}
svm_grid_searcher2 = GridSearchCV ''' ВАШ КОД ЗДЕСЬ '''
svm_grid_searcher2.fit(X_train, y_train)
# -
# Выведите лучшее значение доли правильных ответов на кросс-валидации и соответствующее значение `C`.
''' ВАШ КОД ЗДЕСЬ '''
# Нарисуйте график зависимости доли правильных ответов на кросс-валидации от С.
plot_validation_curves(svm_params2['C'], svm_grid_searcher2.cv_results_)
# Выведите долю правильных ответов на выборке `(X_valid, y_valid)` для `LinearSVC` с лучшим найденным значением `C`.
svm_cv_acc = accuracy_score ''' ВАШ КОД ЗДЕСЬ '''
# **<font color='red'>Вопрос 4. </font> Посчитайте доли правильных ответов для `svm_grid_searcher2` на кросс-валидации для лучшего значения параметра `C` и на отложенной выборке. Округлите каждое до 3 знаков после запятой и выведите через пробел.**
''' ВАШ КОД ЗДЕСЬ '''
# ## Часть 2. Выбор параметров – длины сессии и ширины окна
# **Возьмем `LinearSVC`, показавший лучшее качество на кросс-валидации в 1 части, и проверим его работу еще на 8 выборках для 10 пользователей (с разными сочетаниями параметров *session_length* и *window_size*). Поскольку тут уже вычислений побольше, мы не будем каждый раз заново подбирать параметр регуляризации `C`.**
#
# **Определите функцию `model_assessment`, ее документация описана ниже. Обратите внимание на все детали. Например, на то, что разбиение выборки с `train_test_split` должно быть стратифицированным. Не теряйте нигде `random_state`.**
def model_assessment(estimator, path_to_X_pickle, path_to_y_pickle, cv, random_state=17, test_size=0.3):
'''
Estimates CV-accuracy for (1 - test_size) share of (X_sparse, y)
loaded from path_to_X_pickle and path_to_y_pickle and holdout accuracy for (test_size) share of (X_sparse, y).
The split is made with stratified train_test_split with params random_state and test_size.
:param estimator – Scikit-learn estimator (classifier or regressor)
:param path_to_X_pickle – path to pickled sparse X (instances and their features)
:param path_to_y_pickle – path to pickled y (responses)
:param cv – cross-validation as in cross_val_score (use StratifiedKFold here)
:param random_state – for train_test_split
:param test_size – for train_test_split
:returns mean CV-accuracy for (X_train, y_train) and accuracy for (X_valid, y_valid) where (X_train, y_train)
and (X_valid, y_valid) are (1 - test_size) and (testsize) shares of (X_sparse, y).
'''
''' <NAME> '''
# **Убедитесь, что функция работает.**
model_assessment(svm_grid_searcher2.best_estimator_,
os.path.join(PATH_TO_DATA, 'X_sparse_10users.pkl'),
os.path.join(PATH_TO_DATA, 'y_10users.pkl'), skf, random_state=17, test_size=0.3)
# **Примените функцию *model_assessment* для лучшего алгоритма из предыдущей части (а именно, `svm_grid_searcher2.best_estimator_`) и 9 выборок вида с разными сочетаниями параметров *session_length* и *window_size* для 10 пользователей. Выведите в цикле параметры *session_length* и *window_size*, а также результат вывода функции *model_assessment*.
# Удобно сделать так, чтоб *model_assessment* возвращала 3-им элементом время, за которое она выполнилась. На моем ноуте этот участок кода выполнился за 20 секунд. Но со 150 пользователями каждая итерация занимает уже несколько минут.**
# Здесь для удобства стоит создать копии ранее созданных pickle-файлов X_sparse_10users.pkl, X_sparse_150users.pkl, y_10users.pkl и y_150users.pkl, добавив к их названиям s10_w10, что означает длину сессии 10 и ширину окна 10.
# !cp $PATH_TO_DATA/X_sparse_10users.pkl $PATH_TO_DATA/X_sparse_10users_s10_w10.pkl
# !cp $PATH_TO_DATA/X_sparse_150users.pkl $PATH_TO_DATA/X_sparse_150users_s10_w10.pkl
# !cp $PATH_TO_DATA/y_10users.pkl $PATH_TO_DATA/y_10users_s10_w10.pkl
# !cp $PATH_TO_DATA/y_150users.pkl $PATH_TO_DATA/y_150users_s10_w10.pkl
# +
# %%time
estimator = svm_grid_searcher2.best_estimator_
for window_size, session_length in itertools.product([10, 7, 5], [15, 10, 7, 5]):
if window_size <= session_length:
path_to_X_pkl = ''' ВАШ КОД ЗДЕСЬ '''
path_to_y_pkl = ''' ВАШ КОД ЗДЕСЬ '''
print ''' ВАШ КОД ЗДЕСЬ '''
# -
# **<font color='red'>Вопрос 5. </font> Посчитайте доли правильных ответов для `LinearSVC` с настроенным параметром `C` и выборки `X_sparse_10users_s15_w5`. Укажите доли правильных ответов на кросс-валидации и на отложенной выборке. Округлите каждое до 3 знаков после запятой и выведите через пробел.**
''' ВАШ КОД ЗДЕСЬ '''
# **Прокомментируйте полученные результаты. Сравните для 150 пользователей доли правильных ответов на кросс-валидации и оставленной выборке для сочетаний параметров (*session_length, window_size*): (5,5), (7,7) и (10,10). На среднем ноуте это может занять до часа – запаситесь терпением, это Data Science :) **
#
# **Сделайте вывод о том, как качество классификации зависит от длины сессии и ширины окна.**
# +
# %%time
estimator = svm_grid_searcher2.best_estimator_
for window_size, session_length in [(5,5), (7,7), (10,10)]:
path_to_X_pkl = ''' ВАШ КОД ЗДЕСЬ '''
path_to_y_pkl = ''' ВАШ КОД ЗДЕСЬ '''
print ''' ВАШ КОД ЗДЕСЬ '''
# -
# **<font color='red'>Вопрос 6. </font> Посчитайте доли правильных ответов для `LinearSVC` с настроенным параметром `C` и выборки `X_sparse_150users`. Укажите доли правильных ответов на кросс-валидации и на отложенной выборке. Округлите каждое до 3 знаков после запятой и выведите через пробел.**
''' ВАШ КОД ЗДЕСЬ '''
# ## Часть 3. Идентификация конкретного пользователя и кривые обучения
# **Поскольку может разочаровать, что многоклассовая доля правильных ответов на выборке из 150 пользовалей невелика, порадуемся тому, что конкретного пользователя можно идентифицировать достаточно хорошо. **
# **Загрузим сериализованные ранее объекты *X_sparse_150users* и *y_150users*, соответствующие обучающей выборке для 150 пользователей с параметрами (*session_length, window_size*) = (10,10). Так же точно разобьем их на 70% и 30%.**
with open(os.path.join(PATH_TO_DATA, 'X_sparse_150users.pkl'), 'rb') as X_sparse_150users_pkl:
X_sparse_150users = pickle.load(X_sparse_150users_pkl)
with open(os.path.join(PATH_TO_DATA, 'y_150users.pkl'), 'rb') as y_150users_pkl:
y_150users = pickle.load(y_150users_pkl)
X_train_150, X_valid_150, y_train_150, y_valid_150 = train_test_split(X_sparse_150users,
y_150users, test_size=0.3,
random_state=17, stratify=y_150users)
# **Обучите `LogisticRegressionCV` для одного значения параметра `C` (лучшего на кросс-валидации в 1 части, используйте точное значение, не на глаз). Теперь будем решать 150 задач "Один-против-Всех", поэтому укажите аргумент `multi_class`='ovr'. Как всегда, где возможно, указывайте `n_jobs=-1` и `random_state`=17.**
# %%time
logit_cv_150users = LogisticRegressionCV ''' ВАШ КОД ЗДЕСЬ '''
logit_cv_150users.fit(X_train_150, y_train_150)
# **Посмотрите на средние доли правильных ответов на кросс-валидации в задаче идентификации каждого пользователя по отдельности.**
cv_scores_by_user = {}
for user_id in logit_cv_150users.scores_:
print('User {}, CV score: {}'.format ''' ВАШ КОД ЗДЕСЬ '''
# **Результаты кажутся впечатляющими, но возможно, мы забываем про дисбаланс классов, и высокую долю правильных ответов можно получить константным прогнозом. Посчитайте для каждого пользователя разницу между долей правильных ответов на кросс-валидации (только что посчитанную с помощью `LogisticRegressionCV`) и долей меток в *y_train_150*, отличных от ID
# этого пользователя (именно такую долю правильных ответов можно получить, если классификатор всегда "говорит", что это не пользователь с номером $i$ в задаче классификации $i$-vs-All).**
# +
class_distr = np.bincount(y_train_150.astype('int'))
for user_id in np.unique(y_train_150):
''' ВАШ КОД ЗДЕСЬ '''
# -
num_better_than_default = (np.array(list(acc_diff_vs_constant.values())) > 0).sum()
# **<font color='red'>Вопрос 7. </font> Посчитайте долю пользователей, для которых логистическая регрессия на кросс-валидации дает прогноз лучше константного. Округлите до 3 знаков после запятой.**
''' ВАШ КОД ЗДЕСЬ '''
# **Дальше будем строить кривые обучения для конкретного пользователя, допустим, для 128-го. Составьте новый бинарный вектор на основе *y_150users*, его значения будут 1 или 0 в зависимости от того, равен ли ID-шник пользователя 128.**
y_binary_128 = ''' ВАШ КОД ЗДЕСЬ '''
# +
from sklearn.model_selection import learning_curve
def plot_learning_curve(val_train, val_test, train_sizes,
xlabel='Training Set Size', ylabel='score'):
def plot_with_err(x, data, **kwargs):
mu, std = data.mean(1), data.std(1)
lines = plt.plot(x, mu, '-', **kwargs)
plt.fill_between(x, mu - std, mu + std, edgecolor='none',
facecolor=lines[0].get_color(), alpha=0.2)
plot_with_err(train_sizes, val_train, label='train')
plot_with_err(train_sizes, val_test, label='valid')
plt.xlabel(xlabel); plt.ylabel(ylabel)
plt.legend(loc='lower right');
# -
# **Посчитайте доли правильных ответов на кросс-валидации в задаче классификации "user128-vs-All" в зависимости от размера выборки. Не помешает посмотреть встроенную документацию для *learning_curve*.**
# %%time
train_sizes = np.linspace(0.25, 1, 20)
estimator = svm_grid_searcher2.best_estimator_
n_train, val_train, val_test = learning_curve ''' ВАШ КОД ЗДЕСЬ '''
plot_learning_curve(val_train, val_test, n_train,
xlabel='train_size', ylabel='accuracy')
# **Сделайте выводы о том, помогут ли алгоритму новые размеченные данные при той же постановке задачи.**
# На следующей неделе мы вспомним про линейные модели, обучаемые стохастическим градиентным спуском, и порадуемся тому, насколько быстрее они работают. Также сделаем первые (или не первые) посылки в [соревновании](https://inclass.kaggle.com/c/catch-me-if-you-can-intruder-detection-through-webpage-session-tracking2) Kaggle Inclass.
| jupyter/project_alice/week4_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hachoir
import requests
from random import randint
from time import sleep
from bs4 import BeautifulSoup
import pandas as pd
# Maintenant nous avons un résumé au dessus de la fonction
def get_url_aspirateur_tunisianet():
url_aspirateur_detailes = []
urls = [
"https://www.tunisianet.com.tn/558-aspirateur-tunisie-vapeur"
]
for page in range(2,5):
url = f"https://www.tunisianet.com.tn/558-aspirateur-tunisie-vapeur?page={page}"
response = requests.get(url)
page_contents = response.text
if response.status_code != 200:
raise Exception('Failed to load page {}'.format(items_url))
doc = BeautifulSoup(page_contents, "html.parser")
for item in doc.find_all("a", {'class': "thumbnail product-thumbnail first-img"}):
url_aspirateur_detailes.append(item['href'])
for page in urls:
url = page
response = requests.get(url)
page_contents = response.text
if response.status_code != 200:
raise Exception('Failed to load page {}'.format(items_url))
doc = BeautifulSoup(page_contents, "html.parser")
for item in doc.find_all("a", {'class': "thumbnail product-thumbnail first-img"}):
url_aspirateur_detailes.append(item['href'])
return url_aspirateur_detailes
url_aspirateur = get_url_aspirateur_tunisianet()
len(set(url_aspirateur))
def get_aspirateur(items_url):
images_aspirateurs = []
# télécharger la page
response = requests.get(items_url)
# vérifier le succès de réponse
if response.status_code != 200:
raise Exception('Failed to load page {}'.format(items_url))
# Parser la réponse à l'aide de beaufifulSoup
doc = BeautifulSoup(response.text, 'html.parser')
for i, img in enumerate(doc.find_all('a', {'class': 'thumb-container'})):
if i>= 1 and len(doc.find_all('a', {'class': 'thumb-container'})) > 1:
images_aspirateurs.append(img['data-image'])
return images_aspirateurs
images_aspirateurs = []
for url in url_aspirateur:
for image in get_aspirateur(url):
images_aspirateurs.append(image)
len(images_aspirateurs)
# +
import random
import urllib.request
import os
def download_aspirateurs(urls, doc):
os.makedirs(os.path.join('images', doc))
for i, url in enumerate(urls):
try:
fullname = "images/" + doc + "/" + str((i+1))+".jpg"
urllib.request.urlretrieve(url,fullname)
except:
pass
# -
download_aspirateurs(images_aspirateurs, 'aspirateur')
| aspirateur.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import pandas_profiling
from scipy import stats
from scipy import stats
import scipy
df = pd.read_csv(
"https://raw.githubusercontent.com/datasciencedojo/datasets/master/titanic.csv"
)
df
#Barchart
df['Survived'].value_counts().head(10).plot.bar()
#Histogram
df[df['Fare'] < 100]['Fare'].plot.hist()
import matplotlib
sns.boxplot(x = 'Survived',y = 'Age',data = df)
print(df.describe())
print(df.info())
# +
#piechart
# import the pyplot library
import numpy as np
import matplotlib.pyplot as plotter
# +
plotter.pie(df['Age'].head(5), labels = {"A", "B", "C",
"D", "E"},
autopct ='% 1.1f %%', shadow = True)
plotter.show()
# +
#Scatterplot
# import pyplot and numpy modules
import matplotlib.pyplot as plot
import numpy as np
# Draw the scatter plot
plot.scatter(df.Age, df.Fare)
plot.title('Hypothetical:Student age group and GMAT Score')
plot.xlabel('Age')
plot.ylabel('Fare')
plot.show()
# -
# Import statistics module
import statistics
#Harmonic Mean
print("Harmonic Mean is % s " % (statistics.harmonic_mean(df['Age'].head(5))))
#Arithmetic Mean
print("Arithmetic Mean is % s " % (statistics.mean(df['Age'].head(5))))
# +
#Geometric Mean
from scipy import stats
#axis=0 argument calculates the column wise geometric mean of the dataframe so the result will be
scipy.stats.gmean(df.iloc[:,9:10].head(5),axis=0)
# -
#IQR
scipy.stats.iqr(df['Fare'], axis=0, interpolation='linear')
| DescriptiveStatistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import xgboost
import numpy as np
import shap
import time
# +
from iml.common import convert_to_instance, convert_to_model, match_instance_to_data, match_model_to_data, convert_to_instance_with_index
from iml.explanations import AdditiveExplanation
from iml.links import convert_to_link, IdentityLink
from iml.datatypes import convert_to_data, DenseData
import logging
from iml.explanations import AdditiveExplanation
log = logging.getLogger('shap')
from shap import KernelExplainer
class IMEExplainer(KernelExplainer):
""" This is an implementation of the IME explanation method (aka. Shapley sampling values)
IME was proposed in "An Efficient Explanation of Individual Classifications using Game Theory",
<NAME>, <NAME>, JMLR 2010
"""
def __init__(self, model, data, **kwargs):
# silence warning about large datasets
level = log.level
log.setLevel(logging.ERROR)
super(IMEExplainer, self).__init__(model, data, **kwargs)
log.setLevel(level)
def explain(self, incoming_instance, **kwargs):
# convert incoming input to a standardized iml object
instance = convert_to_instance(incoming_instance)
match_instance_to_data(instance, self.data)
# pick a reasonable number of samples if the user didn't specify how many they wanted
self.nsamples = kwargs.get("nsamples", 0)
if self.nsamples == 0:
self.nsamples = 1000 * self.P
# divide up the samples among the features
self.nsamples_each = np.ones(self.P, dtype=np.int64) * 2 * (self.nsamples // (self.P * 2))
for i in range((self.nsamples % (self.P * 2)) // 2):
self.nsamples_each[i] += 2
model_out = self.model.f(instance.x)
# explain every feature
phi = np.zeros(self.P)
self.X_masked = np.zeros((self.nsamples_each.max(), X.shape[1]))
for i in range(self.P):
phi[i] = self.ime(i, self.model.f, instance.x, self.data.data, nsamples=self.nsamples_each[i])
phi = np.array(phi)
return AdditiveExplanation(self.link.f(1), self.link.f(1), phi, np.zeros(len(phi)), instance, self.link,
self.model, self.data)
def ime(self, j, f, x, X, nsamples=10):
assert nsamples % 2 == 0, "nsamples must be divisible by 2!"
X_masked = self.X_masked[:nsamples,:]
inds = np.arange(X.shape[1])
for i in range(0, nsamples//2):
np.random.shuffle(inds)
pos = np.where(inds == j)[0][0]
rind = np.random.randint(X.shape[0])
X_masked[i,:] = x
X_masked[i,inds[pos+1:]] = X[rind,inds[pos+1:]]
X_masked[-(i+1),:] = x
X_masked[-(i+1),inds[pos:]] = X[rind,inds[pos:]]
evals = f(X_masked)
evals_on = evals[:nsamples//2]
evals_off = evals[nsamples//2:][::-1]
return np.mean(evals[:nsamples//2] - evals[nsamples//2:])
# +
from tqdm import tqdm
tree_shap_times = []
kernel_shap_times = []
ime_times = []
nreps = 10
N = 1000
X_full = np.random.randn(N, 20)
y = np.random.randn(N)
for M in range(4,8):
ts = []
tree_shap_time = 0
kernel_shap_time = 0
ime_time = 0
for k in tqdm(range(nreps)):
# print()
#+ ((X > 0).sum(1) % 2)
X = X_full[:,:M]
model = xgboost.train({"eta": 1}, xgboost.DMatrix(X, y), 1000)
def f(x):
return model.predict(xgboost.DMatrix(x))
start = time.time()
shap_values = shap.TreeExplainer(model).shap_values(X)
tree_shap_time += time.time() - start
# print("Tree SHAP:", tree_shap_time, "seconds")
shap_stddev = shap_values.std(0)[:-1].mean()
# print("mean std dev of SHAP values over samples:", shap_stddev)
e = shap.KernelExplainer(f, X.mean(0).reshape(1,M))
nsamples = 200
# print(shap_stddev/20)
for j in range(2000):
#print(nsamples)
start = time.time()
std_dev = np.vstack([e.shap_values(X[:1,:], silent=True, nsamples=nsamples) for i in range(50)]).std(0)[:-1].mean()
iter_time = (time.time() - start)/50
#print(std_dev)
if std_dev < shap_stddev/20:
# print("KernelExplainer", nsamples)
# print("KernelExplainer", std_dev)
# print("KernelExplainer", iter_time, "seconds")
kernel_shap_time += iter_time * 1000
break
nsamples += int(nsamples * 0.5)
e = IMEExplainer(f, X.mean(0).reshape(1,M))
nsamples = 200
for j in range(2000):
# print()
# print(nsamples)
start = time.time()
std_dev = np.vstack([e.shap_values(X[:1,:], silent=True, nsamples=nsamples) for i in range(50)]).std(0)[:-1].mean()
# print("time", (time.time() - start)/50)
# print(std_dev)
iter_time = (time.time() - start)/50
if std_dev < shap_stddev/20:
# print("IMEExplainer", nsamples)
# print("IMEExplainer", std_dev)
# print("IMEExplainer", iter_time, "seconds")
ime_time += iter_time * 1000
break
nsamples += int(nsamples * 0.5)
tree_shap_times.append(tree_shap_time / nreps)
kernel_shap_times.append(kernel_shap_time / nreps)
ime_times.append(ime_time / nreps)
print("TreeExplainer", tree_shap_times[-1])
print("KernelExplainer", kernel_shap_times[-1])
print("IMEExplainer", ime_times[-1])
# -
model.predict(xgboost.DMatrix(X)).mean()
shap.TreeExplainer(model).shap_values(X)
e = shap.KernelExplainer(f, X.mean(0).reshape(1,M))
np.vstack([e.shap_values(X[:1,:], silent=True, nsamples=100) for i in range(50)]).std(0)[:-1].mean()
e = shap.KernelExplainer(f, X.mean(0).reshape(1,M))
nsamples = 200
print(shap_stddev/20)
for j in range(2000):
print(nsamples)
start = time.time()
std_dev = np.vstack([e.shap_values(X[:1,:], silent=True, nsamples=nsamples) for i in range(50)]).std(0)[:-1].mean()
iter_time = time.time() - start)/50
print(std_dev)
if std_dev < shap_stddev/20:
print(nsamples)
break
nsamples += int(nsamples * 0.2)
e = IMEExplainer(f, X.mean(0).reshape(1,M))
nsamples = 200
print(shap_stddev/20)
for j in range(2000):
print()
print(nsamples)
start = time.time()
std_dev = np.vstack([e.shap_values(X[:1,:], silent=True, nsamples=nsamples) for i in range(50)]).std(0)[:-1].mean()
print("time", (time.time() - start)/50)
print(std_dev)
if std_dev < shap_stddev/20:
print(nsamples)
break
nsamples += int(nsamples * 0.2)
0.56939 * 1000
np.std([IMEExplainer(f, X.mean(0).reshape(1,M)).shap_values(X[:1,:], silent=True, nsamples=1000)[0,0] for i in range(10)])
[shap.KernelExplainer(f, X.mean(0).reshape(1,M)).shap_values(X[:1,:], silent=True, nsamples=1000)[0,0] for i in range(100)]
# +
def f(x):
return model.predict(xgboost.DMatrix(x))
start = time.time()
shap_values2 = shap.KernelExplainer(f, X.mean(0).reshape(1,M)).shap_values(X)
print(time.time() - start)
# -
start = time.time()
IMEExplainer(f, X.mean(0).reshape(1,M)).shap_values(X)
print(time.time() - start)
| notebooks/tree_explainer/tree_shap_paper/Performance comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Universal approximation properties
# + tags=["hide-input"]
from IPython.display import IFrame
IFrame(src= "https://cdnapisec.kaltura.com/p/2356971/sp/235697100/embedIframeJs/uiconf_id/41416911/partner_id/2356971?iframeembed=true&playerId=kaltura_player&entry_id=1_yk2026jt&flashvars[streamerType]=auto&flashvars[localizationCode]=en&flashvars[leadWithHTML5]=true&flashvars[sideBarContainer.plugin]=true&flashvars[sideBarContainer.position]=left&flashvars[sideBarContainer.clickToClose]=true&flashvars[chapters.plugin]=true&flashvars[chapters.layout]=vertical&flashvars[chapters.thumbnailRotator]=false&flashvars[streamSelector.plugin]=true&flashvars[EmbedPlayer.SpinnerTarget]=videoHolder&flashvars[dualScreen.plugin]=true&flashvars[hotspots.plugin]=1&flashvars[Kaltura.addCrossoriginToIframe]=true&&wid=1_to15zpm3",width='800', height='500')
# -
# ## Download the lecture notes here: [Notes](https://sites.psu.edu/math452/files/2022/01/C05_-Universal-approximation-properties.pdf)
# ## Approximation Properties of Neural Network Function Class
#
#
# ### Qualitative convergence results
#
# ```{prf:theorem} Universal Approximation Property of Shallow Neural Networks
# :label: thm0305_1
# Let $\sigma$ be a Riemann integrable function and
# $\sigma \in L_{l o c}^{\infty}(\mathbb{R}) .$ Then $\Sigma_{d}(\sigma)$
# in dense in $C(\Omega)$ for any compact $\Omega \subset \mathbb{R}^{n}$
# if and and only if $\sigma$ is not a polynomial!
# Namely, if $\sigma$ is not a polynomial, then, for any
# $f \in C(\bar{\Omega})$, there exists a sequence
# $\phi_{n} \in \mathrm{DNN}_{1}$ such that
#
# $$
# \max _{x \in \bar{\Omega}}\left|\phi_{n}(x)-f(x)\right| \rightarrow 0, \quad n \rightarrow \infty
# $$
# ```
#
# ```{prf:proof} Let us first prove the theorem in a special case that
# $\sigma \in C^{\infty}(\mathbb{R}) .$ Since
# $\sigma \in C^{\infty}(\mathbb{R})$, it follows that for every
# $\omega, b$,
#
# $$
# \frac{\partial}{\partial \omega_{j}} \sigma(\omega \cdot x+b)=\lim _{n \rightarrow \infty} \frac{\sigma\left(\left(\omega+h e_{j}\right) \cdot x+b\right)-\sigma(\omega \cdot x+b)}{h} \in \bar{\Sigma}_{d}(\sigma)
# $$
#
# for all $j=1, \ldots, d$.
#
# By the same argument, for
# $\alpha=\left(\alpha_{1}, \ldots, \alpha_{d}\right)$
#
# $$
# D_{\omega}^{\alpha} \sigma(\omega \cdot x+b) \in \bar{\Sigma}_{d}(\sigma)
# $$
#
# for all $k \in \mathbb{N}, j=1, \ldots, d, \omega \in \mathbb{R}^{d}$
# and $b \in \mathbb{R}$.
#
# Now
#
# $$
# D_{\omega}^{\alpha} \sigma(\omega \cdot x+b)=x^{\alpha} \sigma^{(k)}(\omega \cdot x+b)
# $$
#
# where $k=|\alpha|$ and
# $x^{\alpha}=x_{1}^{\alpha_{1}} \cdots x_{d}^{\alpha_{d}}$. Since
# $\sigma$ is not a polynomial there exists a $\theta_{k} \in \mathbb{R}$
# such that $\sigma^{(k)}\left(\theta_{k}\right) \neq 0$. Taking
# $\omega=0$ and $b=\theta_{k}$, we thus see that
# $x_{j}^{k} \in \bar{\Sigma}_{d}(\sigma) .$ Thus, all polynomials of the
# form $x_{1}^{k_{1}} \cdots x_{d}^{k_{d}}$ are in
# $\bar{\Sigma}_{d}(\sigma)$. This implies that $\bar{\Sigma}_{d}(\sigma)$
# contains all polynomials. By Weierstrass’s Theorem, it follows that $\bar{\Sigma}_{d}(\sigma)$
# contains $C(K)$ for each compact $K \subset \mathbb{R}^{n} .$ That is
# $\Sigma_{d}(\sigma)$ is dense in $C\left(\mathbb{R}^{d}\right) .$
#
# Now we consider the case that $\sigma$ is only Riemann integrable.
# Consider the mollifier $\eta$
#
# $$
# \eta(x)=\frac{1}{\sqrt{\pi}} e^{-x^{2}}
# $$
#
# Set $\eta_{\epsilon}=\frac{1}{\epsilon} \eta\left(\frac{x}{\epsilon}\right) .$
# Then consider $\sigma_{\eta_{\epsilon}}$
#
# $$
# \sigma_{\eta_{\epsilon}}(x):=\sigma * \eta_{\epsilon}(x)=\int_{\mathbb{R}} \sigma(x-y) \eta_{\epsilon}(y) d y
# $$ (eq1_4)
#
# for a given activation function $\sigma$
# It can be seen that
# $\sigma_{\eta_{\epsilon}} \in C^{\infty}(\mathbb{R}) .$ We first notice
# that
# $\bar{\Sigma}_{1}\left(\sigma_{\eta_{\epsilon}}\right) \subset \bar{\Sigma}_{1}(\sigma)$,
# which can be done easily by checking the Riemann sum of
# $\sigma_{\eta_{\epsilon}}(x)=\int_{\mathbb{R}} \sigma(x-y) \eta_{\epsilon}(y) d y$
# is in $\bar{\Sigma}_{1}(\sigma)$.
#
# Following the argument in the beginning of the proof proposition, we
# want to show that
# $\left.\bar{\Sigma}_{1}\left(\sigma_{\eta_{\epsilon}}\right)\right)$
# contains all polynomials. For this purpose, it suffices to show that
# there exists $\theta_{k}$ and $\sigma_{\eta_{\epsilon}}$ such that
# $\sigma_{\eta_{\epsilon}}^{(k)}\left(\theta_{k}\right) \neq 0$ for each
# $\mathrm{k}$. If not, then there must be $k_{0}$ such that
# $\sigma_{\eta_{\epsilon}}^{\left(k_{0}\right)}(\theta)=0$ for all
# $\theta \in \mathbb{R}$ and all $\epsilon>0$. Thus
# $\sigma_{\eta_{\epsilon}}$ ’s are all polynomials with degree at most
# $k_{0}-1 .$ In particular, It is known that
# $\eta_{\epsilon} \in C_{0}^{\infty}(\mathbb{R})$ and
# $\sigma * \eta_{\epsilon}$ uniformly converges to $\sigma$ on compact
# sets in $\mathbb{R}$ and $\sigma * \eta_{\epsilon}$ ’s are all
# polynomials of degree at most $k_{0}-1 .$ Polynomials of a fixed degree
# form a closed linear subspace, therefore $\sigma$ is also a polynomial
# of degree at most $k_{0}-1$, which leads to contradiction.
# ```
#
# ### Properties of polynomials using Fourier transform
#
#
# We make use of the theory of tempered distributions and
# we begin by collecting some results of independent interest, which will
# also be important later. We begin by noting that an activation function
# $\sigma$ which satisfies a polynomial growth condition
# $|\sigma(x)| \leq C(1+|x|)^{n}$ for some constants $C$ and $n$ is a
# tempered distribution. As a result, we make this assumption on our
# activation functions in the following theorems. We briefly note that
# this condition is sufficient, but not necessary (for instance an
# integrable function need not satisfy a pointwise polynomial growth
# bound) for $\sigma$ to be represent a tempered distribution.
#
# We begin by studying the convolution of $\sigma$ with a Gaussian
# mollifier. Let $\eta$ be a Gaussian mollifier
#
# $$
# \eta(x)=\frac{1}{\sqrt{\pi}} e^{-x^{2}}
# $$
#
# Set $\eta_{\epsilon}=\frac{1}{\epsilon} \eta\left(\frac{x}{\epsilon}\right) .$
# Then consider $\sigma_{\epsilon}$
#
# $$
# \sigma_{\epsilon}(x):=\sigma * \eta_{\epsilon}(x)=\int_{\mathbb{R}} \sigma(x-y) \eta_{\epsilon}(y) d y
# $$
#
# for a given activation function $\sigma$.
#
# It is clear that $\sigma_{\epsilon} \in C^{\infty}(\mathbb{R}) .$
# Moreover, by considering the Fourier transform (as a tempered
# distribution) we see that
#
# $$
# \hat{\sigma}_{\epsilon}=\hat{\sigma} \hat{\eta}_{\epsilon}=\hat{\sigma} \eta_{\epsilon^{-1}}
# $$
#
# We begin by stating a lemma which characterizes the set of polynomials
# in terms of their Fourier transform.
#
# ```{prf:lemma}
# Given a tempered distribution $\sigma$, the following
# statements are equivalent:
#
# 1. $\sigma$ is a polynomial
#
# 2. $\sigma_{\epsilon}$ given by {eq}`eq1_4` is a polynomial for any
# $\epsilon>0$.
#
# 3. $\operatorname{supp}(\hat{\sigma}) \subset\{0\}$.
# ```
# ```{prf:proof}
# We begin by proving that (3) and (1) are equivalent. This follows
# from a characterization of distributions supported at a single point. In particular, a
# distribution supported at 0 must be a finite linear combination of Dirac
# masses and their derivatives. In particular, if $\hat{\sigma}$ is
# supported at 0 , then
#
# $$
# \hat{\sigma}=\sum_{i=1}^{n} a_{i} \delta^{(i)}
# $$
#
# Taking the inverse Fourier transform and noting that the inverse Fourier transform of
# $\delta^{(i)}$ is $c_{i} x^{i}$, we see that $\sigma$ is a polynomial.
# This shows that (3) implies (1), for the converse we simply take the
# Fourier transform of a polynomial and note that it is a finite linear
# combination of Dirac masses and their derivatives.
#
# Finally, we prove the equivalence of (2) and (3). For this it suffices
# to show that $\hat{\sigma}$ is supported at 0 iff
# $\hat{\sigma}_{\epsilon}$ is supported at $0 .$ This follows from
# equation $1.5$ and the fact that $\eta_{\epsilon^{-1}}$ is nowhere
# vanishing.
# ```
#
#
#
| _build/jupyter_execute/Module3/m3_05/m3_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: testenv
# language: python
# name: testenv
# ---
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
# +
import pretty_midi
import midi
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Dense, Input, Lambda, Concatenate, LSTM
from keras.optimizers import Adam
from keras import backend as K
import copy
import tensorflow as tf
#import tensorflow_probability as tfp # for tf version 2.0.0, tfp version 0.8 is needed
import numpy as np
import matplotlib.pyplot as plt
import csv
from sys import stdout
import random
import librosa.display
import pypianoroll
# My code
from loading import *
from models import *
from data import *
from midi_to_statematrix import *
# %matplotlib inline
# -
print("TensorFlow version: {}".format(tf.__version__))
print("GPU is available: {}".format(tf.test.is_gpu_available()))
# # Load data
file = 'maestro-v2.0.0/maestro-v2.0.0.csv'
# +
# Call data class
data = DataObject(file, what_type = 'train', train_tms = 100, test_tms = 100, fs = 20, window_size = 15)
# Create a batch class which we will iterate over
train_batch = Batch(data, batch_size = 16, songs_per_batch = 4)
# -
curr_batch = train_batch.data
curr_batch.featurize(use_biaxial = True)
# # Define model
def my_binary_loss_seq(y_true, y_pred):
y_true = tf.reshape(y_true, [-1, 78])
y_pred = tf.reshape(y_pred, [-1, 78])
bce = tf.keras.losses.BinaryCrossentropy()
return bce(y_true, y_pred)
model = biaxial_pn_encoder_concat_deeplstm(curr_batch, 32)
model.compile(loss = tf.keras.losses.BinaryCrossentropy(), optimizer = Adam(learning_rate=0.0005))
model = biaxial_target_conv2d_model(curr_batch)
model.compile(loss = tf.keras.losses.BinaryCrossentropy(), optimizer = Adam(learning_rate=0.0005))
curr_batch.target_train.shape
model.summary()
# +
def inputize(curr_batch):
train_target = curr_batch.target
train_target = tf.roll(train_target, shift=1, axis=1)
train_target = train_target.numpy()
train_target[:,0,:] = curr_batch.context[:,-1,-1,:]
return [curr_batch.context, train_target]
def generate(train_batch):
"""a generator for batches, so model.fit_generator can be used. """
while True:
new_batch = next(train_batch)
new_batch.featurize(use_biaxial = False)
yield ([tf.convert_to_tensor(new_batch.context, dtype = tf.float32),
tf.convert_to_tensor(new_batch.target_train, dtype = tf.float32)],
tf.convert_to_tensor(new_batch.target_pred, dtype = tf.float32))
# -
out = model.predict([tf.convert_to_tensor(curr_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_batch.target_train, dtype = tf.float32)], steps = 1)
DataObject.drop_articulation(curr_batch.target_pred)
out
history = model.fit_generator(
generate(train_batch),
steps_per_epoch=1024,
epochs=5)
model.save_weights('model_biaxial_oneseq_nofeat.h5')
model.save_weights('model_rolled_target.h5')
history.history['loss']
out = model.predict([tf.convert_to_tensor(curr_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_batch.target_train, dtype = tf.float32)], steps = 1)
curr_batch.target_pred[57,2,:]
out[57,5,:]
out[:,:,:].max()
curr_batch.target_train.shape
def get_decoder_simple(model):
input_shape = model.get_layer("lambda_2").output.shape
input_embedding = Input(batch_shape =
(input_shape[0], # batch_size
None, # timesteps ()
input_shape[2]), # note_size
name="Input_layer_embedding")
decoder, _, _ = LSTM(units = 512,
return_sequences = True,
return_state = True,
activation = 'tanh',
name = 'Decoder_lstm_1')(input_embedding)
decoder = LSTM(units = 88,
activation = 'sigmoid',
name = 'Decoder_lstm_2')(decoder)
new_model = Model(input_embedding, decoder)
names = {layer.name:idx for idx, layer in enumerate(model.layers)}
weights = model.get_weights()
for idx, layer in enumerate(new_model.layers):
if layer.name in names.keys():
new_model.layers[idx].set_weights(weights[names[layer.name]])
return new_model
# +
#decoder = get_decoder_simple(model)
#decoder.summary()
#weights_list = model.get_weights()
# -
out = model([curr_batch.context, curr_batch.target_train])
# # Create music! (inefficient version) + features
def load_model(file, curr_batch, modelname, *modelparams):
new_model = modelname(curr_batch, *modelparams)
new_model.load_weights(file)
return new_model
#my_model_name = "biaxial_window_feature_15_window.h5"
my_model_name = 'biaxial_pn_encoder_concat_deeplstm.h5'
import pickle
pickle.dump(curr_test_batch, file = open('good_batch_for_graph_66.p', 'wb'))
favorite_color = pickle.load( open( "good_batch_for_graph_66.p", "rb" ) )
# +
file = 'maestro-v2.0.0/maestro-v2.0.0.csv'
# Get a batch we want to predict
data_test = DataObject(file, what_type = 'train', train_tms = 40, test_tms = 20, fs = 20, window_size = 9)
# Create a batch class which we will iterate over
test_batch = Batch(data_test, batch_size = 64, songs_per_batch = 4)
curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch.target_split = 0
curr_test_batch.window_size = 1
curr_test_batch.featurize(use_biaxial = True, out_seq = False)
# -
curr_test_batch = pickle.load(open( "good_batch_for_graph_66.p", "rb" ))
#model = load_model(my_model_name, curr_test_batch, biaxial_target_model, 20)
model = load_model(my_model_name, curr_test_batch, biaxial_pn_encoder_concat_deeplstm)
prediction = model.predict([tf.convert_to_tensor(curr_test_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch.target_train, dtype = tf.float32)],
steps = 1)
# +
#my_model_name = "biaxial_window_feature_15_window.h5"
my_model_name = 'biaxial_pn_encoder_concat_deeplstm_cont.h5'
#curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch = copy.deepcopy(favorite_color)
#curr_test_batch.target = np.zeros((64, 20, 78, 2))
curr_test_batch.target_split = 0
curr_test_batch.window_size = data_test.test_tms
curr_test_batch.featurize(use_biaxial = True, out_seq = False)
#model = load_model(my_model_name, curr_test_batch, biaxial_target_model, 20)
model = load_model(my_model_name, curr_test_batch, biaxial_pn_encoder_concat_deeplstm)
prediction = model.predict([tf.convert_to_tensor(curr_test_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch.target_train, dtype = tf.float32)],
steps = 1)
# +
my_model_name = 'biaxial_pn_encoder_concat_deeplstm_cont.h5'
curr_test_batch = copy.deepcopy(test_batch.data)
seq_preds = np.zeros((64, 20, 78))
seq_preds[:,0,:] = DataObject.drop_articulation3d(curr_test_batch.target[:,0,:,:])
for win in range(1,20):
print(win)
curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch.target_split = 0
#curr_test_batch.window_size = data_test.test_tms
curr_test_batch.window_size = win
curr_test_batch.featurize(use_biaxial = True, out_seq = False)
#model = load_model(my_model_name, curr_test_batch, biaxial_target_model, 20)
model = load_model(my_model_name, curr_test_batch, biaxial_pn_encoder_concat_deeplstm)
seq_preds[:,win,:] = model.predict([tf.convert_to_tensor(curr_test_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch.target_train, dtype = tf.float32)],
steps = 1)[:,-1,:]
# -
prediction.shape
def plot_pianoroll(
ax,
pianoroll,
is_drum=False,
beat_resolution=None,
downbeats=None,
preset="default",
cmap="Blues",
xtick="auto",
ytick="octave",
xticklabel=True,
yticklabel="auto",
tick_loc=None,
tick_direction="in",
label="both",
grid="both",
grid_linestyle=":",
grid_linewidth=0.5,
num_notes = 78,
x_start = 1,
alpha = 1,
):
"""
Plot a pianoroll given as a numpy array.
Parameters
----------
ax : matplotlib.axes.Axes object
A :class:`matplotlib.axes.Axes` object where the pianoroll will be
plotted on.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data type
is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel), where
channels can be either RGB or RGBA.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat (i.e.,
the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If 'auto'
is given, automatically set to 'beat' if `beat_resolution` is also given
and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when `xtick`
is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum` is
True) as tick labels along the y-axis. If 'number', use pitch number. If
'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick`
is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not
'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom', 'top',
'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'. Only
effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and y-axis.
Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis, both
or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
"""
if pianoroll.ndim not in (2, 3):
raise ValueError("`pianoroll` must be a 2D or 3D numpy array")
if pianoroll.shape[1] != num_notes:
raise ValueError("The length of the second axis of `pianoroll` must be 128.")
if xtick not in ("auto", "beat", "step", "off"):
raise ValueError("`xtick` must be one of {'auto', 'beat', 'step', 'none'}.")
if xtick == "beat" and beat_resolution is None:
raise ValueError("`beat_resolution` must be specified when `xtick` is 'beat'.")
if ytick not in ("octave", "pitch", "off"):
raise ValueError("`ytick` must be one of {octave', 'pitch', 'off'}.")
if not isinstance(xticklabel, bool):
raise TypeError("`xticklabel` must be bool.")
if yticklabel not in ("auto", "name", "number", "off"):
raise ValueError(
"`yticklabel` must be one of {'auto', 'name', 'number', 'off'}."
)
if tick_direction not in ("in", "out", "inout"):
raise ValueError("`tick_direction` must be one of {'in', 'out', 'inout'}.")
if label not in ("x", "y", "both", "off"):
raise ValueError("`label` must be one of {'x', 'y', 'both', 'off'}.")
if grid not in ("x", "y", "both", "off"):
raise ValueError("`grid` must be one of {'x', 'y', 'both', 'off'}.")
# plotting
if pianoroll.ndim > 2:
to_plot = pianoroll.transpose(1, 0, 2)
else:
to_plot = pianoroll.T
if np.issubdtype(pianoroll.dtype, np.bool_) or np.issubdtype(
pianoroll.dtype, np.floating
):
ax.imshow(
to_plot,
cmap=cmap,
aspect="auto",
vmin=0,
vmax=1,
origin="lower",
interpolation="none",
alpha = alpha,
)
elif np.issubdtype(pianoroll.dtype, np.integer):
ax.imshow(
to_plot,
cmap=cmap,
aspect="auto",
vmin=0,
vmax=127,
origin="lower",
interpolation="none",
alpha = alpha,
)
else:
raise TypeError("Unsupported data type for `pianoroll`.")
# tick setting
if tick_loc is None:
tick_loc = ("bottom", "left")
if xtick == "auto":
xtick = "beat" if beat_resolution is not None else "step"
if yticklabel == "auto":
yticklabel = "name" if ytick == "octave" else "number"
if preset == "plain":
ax.axis("off")
elif preset == "frame":
ax.tick_params(
direction=tick_direction,
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labeltop=False,
labelleft=False,
labelright=False,
)
else:
ax.tick_params(
direction=tick_direction,
bottom=("bottom" in tick_loc),
top=("top" in tick_loc),
left=("left" in tick_loc),
right=("right" in tick_loc),
labelbottom=(xticklabel != "off"),
labelleft=(yticklabel != "off"),
labeltop=False,
labelright=False,
)
# x-axis
if xtick == "beat" and preset != "frame":
num_beat = pianoroll.shape[0] // beat_resolution
ax.set_xticks(beat_resolution * np.arange(num_beat) - 0.5)
ax.set_xticklabels("")
ax.set_xticks(beat_resolution * (np.arange(num_beat) + 0.5) - 0.5, minor=True)
ax.set_xticklabels(np.arange(x_start, num_beat + 1), minor=True)
ax.tick_params(axis="x", which="minor", width=0)
# y-axis
if ytick == "octave":
ax.set_yticks(np.arange(0, num_notes, 12))
if yticklabel == "name":
ax.set_yticklabels(["C{}".format(i - 2) for i in range(11)])
elif ytick == "step":
ax.set_yticks(np.arange(0, num_notes))
if yticklabel == "name":
if is_drum:
ax.set_yticklabels(
[pretty_midi.note_number_to_drum_name(i) for i in range(num_notes)]
)
else:
ax.set_yticklabels(
[pretty_midi.note_number_to_name(i) for i in range(num_notes)]
)
# axis labels
if label in ("x", "both"):
if xtick == "step" or not xticklabel:
ax.set_xlabel("time (step)")
else:
ax.set_xlabel("time (beat)")
if label in ("y", "both"):
if is_drum:
ax.set_ylabel("key name")
else:
ax.set_ylabel("pitch")
# grid
if grid != "off":
ax.grid(
axis=grid, color="k", linestyle=grid_linestyle, linewidth=grid_linewidth
)
# downbeat boarder
if downbeats is not None and preset != "plain":
for step in downbeats:
ax.axvline(x=step, color="k", linewidth=1)
old_prediction = copy.deepcopy(prediction)
prediction = copy.deepcopy(old_prediction)
prediction[33,19,:]
max_indices = np.zeros(prediction.shape)
for batch in range(prediction.shape[0]):
for timestep in range(prediction.shape[1]):
max_indices[batch,timestep,prediction[batch,timestep,:].argmax()] = 1
prediction[batch,timestep,:] /= np.sum(prediction[batch,timestep,:])
#turn_off = prediction[batch,timestep,:].argsort()[:-20]
#prediction[batch,timestep,turn_off] = 0
pass
# +
def pad_with_zeros(pianoroll):
return np.pad(pianoroll, ((0,0),(25, 25)), 'constant', constant_values=(0, 0))
def combine_pianoroll(*pianorolls):
for idx, pianoroll in enumerate(pianorolls):
if idx == 0:
new_pianoroll = pianoroll
else:
new_pianoroll = np.append(new_pianoroll, pianoroll, axis = 0)
return new_pianoroll
def plot_batch_element(batch, which_element = 0, cmap_ctx = 'viridis', cmap_tar = 'Reds', num_subplots = 3, figsize = (12,8)):
fig = plt.figure(figsize = figsize)
ax = fig.add_subplot(num_subplots*100 + 11)
full_segment = combine_pianoroll(batch.context[which_element,0,:,:],
np.zeros(DataObject.drop_articulation3d(batch.target[which_element,:,:]).shape),
batch.context[which_element,1,:,:])
just_target = np.zeros(full_segment.shape)
just_target[40:60, :] = DataObject.drop_articulation3d(batch.target[which_element,:,:])
plot_pianoroll(ax, full_segment, cmap = cmap_ctx)
plot_pianoroll(ax, just_target, cmap = cmap_tar, alpha = 1)
ax.axvline(data_test.train_tms)
ax.axvline(data_test.train_tms+data_test.test_tms)
return fig, ax
# -
weighted_average = np.zeros((prediction.shape[0], prediction.shape[1]))
for timestep in range(0, prediction.shape[1]):
for batch in range(0, prediction.shape[0]):
weighted_average[batch, timestep] = np.round(np.average(np.arange(0,78,1), weights = prediction[batch, timestep, :]))
weighted_sd = np.zeros((prediction.shape[0], prediction.shape[1]))
for timestep in range(0, prediction.shape[1]):
for batch in range(0, prediction.shape[0]):
weighted_sd[batch, timestep] = np.round(np.average((np.arange(0,78,1) - weighted_average[batch, timestep])**2, weights = prediction[batch, timestep, :]))
# +
from matplotlib.colors import ListedColormap
# Choose colormap
cmap = plt.cm.Wistia
# Get the colormap colors
my_cmap = cmap(np.arange(cmap.N))
# Set alpha
my_cmap[:,-1] = np.linspace(0, 1, cmap.N)
# Create new colormap
my_cmap = ListedColormap(my_cmap)
# -
which_indices = np.zeros((64, 20))
for batch in range(64):
for timestep in range(20):
which_indices[batch, timestep] = np.where(max_indices[batch, timestep, :] == 1)[0][0]
# +
batchnum1 = 33
batchnum2 = 33
cmap = 'Blues'
meancol = 'red'
fig, ax = plot_batch_element(curr_test_batch, batchnum1, cmap_ctx = cmap, cmap_tar = my_cmap, num_subplots = 1,
figsize = (12,6))
ax.set_xticks(np.arange(0,2*data_test.train_tms + data_test.test_tms,data_test.test_tms/2))
ax.set_xticklabels(np.arange(0,2*data_test.train_tms + data_test.test_tms,data_test.test_tms/2))
ax.set_xlabel('Timestep')
ax.set_ylabel('Pitch')
fig.savefig('graphs/top_graph.png', dpi = 300, bbox_inches = 'tight')
# +
batchnum1 = 33
batchnum2 = 33
cmap = 'Blues'
meancol = 'red'
fig = plt.figure(figsize = (12,12))
ax2 = fig.add_subplot(212)
ax3 = fig.add_subplot(211)
plot_pianoroll(ax3,
128*DataObject.drop_articulation3d(curr_test_batch.target[batchnum2,:,:,:]),
cmap=my_cmap, alpha = 1) ######### TRUE
#plot_pianoroll(ax3,
# 0.05*128*seq_preds[batchnum1,:,:],
# cmap=cmap, alpha = 0.5) ######### SEQ_PREDS
plot_pianoroll(ax2,
0.1*128*prediction[batchnum2,:,:],
cmap=cmap) ######### PREDICTION
ax2.plot(weighted_average[batchnum2,:], ':',
linewidth = 3, color = meancol, alpha = 0.6) ######### AVERAGE PREDICTION
ax2.fill_between(np.arange(0, 20, 1),
weighted_average[batchnum2,:]-np.sqrt(weighted_sd)[batchnum2,:],
weighted_average[batchnum2,:]+np.sqrt(weighted_sd)[batchnum2,:],
alpha = 0.1, color = meancol)
ax3.plot(weighted_average[batchnum2,:], ':',
linewidth = 3, color = meancol, alpha = 0.8, label = 'Mean predicted key')######### AVERAGE PREDICTION
ax3.fill_between(np.arange(0, 20, 1),
weighted_average[batchnum2,:]-np.sqrt(weighted_sd)[batchnum2,:],
weighted_average[batchnum2,:]+np.sqrt(weighted_sd)[batchnum2,:],
alpha = 0.15, color = meancol)
#ax3.plot(which_indices[batchnum2,:], color = 'purple')
ax2.set_xticks(np.arange(0,data_test.test_tms,1))
ax2.set_xticklabels(np.arange(data_test.train_tms+1,data_test.train_tms+data_test.test_tms+1,1))
ax3.set_xticks(np.arange(0,data_test.test_tms,1))
ax3.set_xticklabels(np.arange(data_test.train_tms+1,data_test.train_tms+data_test.test_tms+1,1))
ax3.set_xlabel('')
ax3.set_title('True target overlayed with mean predicted key', fontsize = 18)
ax3.legend()
ax2.set_title('Predicted probability of playing key', fontsize = 18)
ax2.set_xlabel('Timestep')
ax2.set_ylabel('Pitch')
ax3.set_ylabel('Pitch')
fig.savefig('graphs/mid_bott_graph.png', dpi = 300, bbox_inches = 'tight')
# +
import matplotlib.pyplot as plt
import matplotlib as mpl
fig, ax = plt.subplots(figsize=(1, 20))
fig.subplots_adjust(bottom=0.5)
cmap = mpl.cm.Blues
norm = mpl.colors.Normalize(vmin=0, vmax=0.6)
cb1 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
orientation='vertical')
#cb1.set_label('Probability of playing key', fontsize=25)
font_size = 25 # Adjust as appropriate.
cb1.ax.tick_params(labelsize=font_size)
fig.show()
fig.savefig('graphs/colorbar_graph.png', dpi = 300, bbox_inches = 'tight')
# +
batchnum1 = 33
batchnum2 = 33
cmap = 'Blues'
meancol = 'red'
fig, ax = plot_batch_element(curr_test_batch, batchnum1, cmap_ctx = cmap, cmap_tar = my_cmap, num_subplots = 3,
figsize = (12,12))
#fig2 = plt.figure(figsize = (12,8))
#fig3 = plt.figure(figsize = (12,8))
ax2 = fig.add_subplot(313)
ax3 = fig.add_subplot(312)
#plot_pianoroll(ax2,
# 0.5*128*np.multiply(prediction[batchnum,:,:],1/np.tile(np.expand_dims(np.sum(prediction[batchnum,:,:], -1), -1), [1,78])),
# cmap='Reds')
plot_pianoroll(ax3,
128*DataObject.drop_articulation3d(curr_test_batch.target[batchnum2,:,:,:]),
cmap=my_cmap, alpha = 1) ######### TRUE
#plot_pianoroll(ax3,
# 0.05*128*seq_preds[batchnum1,:,:],
# cmap=cmap, alpha = 0.5) ######### SEQ_PREDS
plot_pianoroll(ax2,
0.1*128*prediction[batchnum2,:,:],
cmap=cmap) ######### PREDICTION
ax2.plot(weighted_average[batchnum2,:], ':',
linewidth = 3, color = meancol, alpha = 0.6) ######### AVERAGE PREDICTION
ax2.fill_between(np.arange(0, 20, 1),
weighted_average[batchnum2,:]-np.sqrt(weighted_sd)[batchnum2,:],
weighted_average[batchnum2,:]+np.sqrt(weighted_sd)[batchnum2,:],
alpha = 0.1, color = meancol)
ax3.plot(weighted_average[batchnum2,:], ':',
linewidth = 3, color = meancol, alpha = 0.8, label = 'Mean predicted key')######### AVERAGE PREDICTION
ax3.fill_between(np.arange(0, 20, 1),
weighted_average[batchnum2,:]-np.sqrt(weighted_sd)[batchnum2,:],
weighted_average[batchnum2,:]+np.sqrt(weighted_sd)[batchnum2,:],
alpha = 0.15, color = meancol)
#ax3.plot(which_indices[batchnum2,:], color = 'purple')
ax2.set_xticks(np.arange(0,data_test.test_tms,1))
ax2.set_xticklabels(np.arange(data_test.train_tms+1,data_test.train_tms+data_test.test_tms+1,1))
ax.set_xticks(np.arange(0,2*data_test.train_tms + data_test.test_tms,data_test.test_tms/2))
ax.set_xticklabels(np.arange(0,2*data_test.train_tms + data_test.test_tms,data_test.test_tms/2))
ax3.set_xticks(np.arange(0,data_test.test_tms,1))
ax3.set_xticklabels(np.arange(data_test.train_tms+1,data_test.train_tms+data_test.test_tms+1,1))
ax3.set_xlabel('')
ax.set_xlabel('')
# -
# ### Now we compare if we switch the order of left and right
# +
curr_test_batch_normal = pickle.load(open( "good_batch_for_graph_66.p", "rb" ))
curr_test_batch_normal.target = np.zeros(curr_test_batch_normal.target.shape)
curr_test_batch_switch = pickle.load(open( "good_batch_for_graph_66.p", "rb" ))
curr_test_batch_switch.context[:,[0,1],:,:] = curr_test_batch_switch.context[:,[1,0],:,:]
curr_test_batch_switch.target = np.zeros(curr_test_batch_switch.target.shape)
prediction_normal = model.predict([tf.convert_to_tensor(curr_test_batch_normal.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch_normal.target_train, dtype = tf.float32)],
steps = 1)
prediction_switch = model.predict([tf.convert_to_tensor(curr_test_batch_switch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch_switch.target_train, dtype = tf.float32)],
steps = 1)
# +
file = 'maestro-v2.0.0/maestro-v2.0.0.csv'
# Get a batch we want to predict
data_test = DataObject(file, what_type = what_type,
train_tms = train_tms, test_tms = test_tms,
fs = 20, window_size = 15,
seed = seed)
# Create a batch class which we will iterate over
test_batch = Batch(data_test, batch_size = batch_size, songs_per_batch = songs_per_batch)
############################################# START GENERATING #############################################
#test_batch.data = pickle.load(open( "good_batch_for_graph_66.p", "rb" ))
#test_batch.data.context = np.transpose(test_batch.data.context, [1,0,2,3])
curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch.target_split = 0
curr_test_batch.window_size = 20
curr_test_batch.featurize(use_biaxial = True)
# +
batchnum = 40
cmap = 'Blues'
meancol = 'red'
fig, ax = plot_batch_element(curr_test_batch, batchnum, cmap_ctx = cmap, cmap_tar = my_cmap, num_subplots = 3,
figsize = (12,12))
#fig2 = plt.figure(figsize = (12,8))
#fig3 = plt.figure(figsize = (12,8))
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
#plot_pianoroll(ax2,
# 0.5*128*np.multiply(prediction[batchnum,:,:],1/np.tile(np.expand_dims(np.sum(prediction[batchnum,:,:], -1), -1), [1,78])),
# cmap='Reds')
plot_pianoroll(ax2,0.1*128*prediction_normal[batchnum,:,:],cmap=cmap) # NORMAL
plot_pianoroll(ax3,0.1*128*prediction_switch[batchnum,:,:],cmap=cmap) # NORMAL
# +
def plot_batch_element2(batch, fig, which_element = 0, cmap_ctx = 'viridis', cmap_tar = 'Reds', num_subplot = 2):
ax = fig.add_subplot(300 + 10 + num_subplot)
full_segment = combine_pianoroll(batch.context[which_element,0,:,:],
np.zeros(batch.target[which_element,:,:].shape),
batch.context[which_element,1,:,:])
just_target = np.zeros(full_segment.shape)
just_target[40:60, :] = batch.target[which_element,:,:]
plot_pianoroll(ax, full_segment, cmap = cmap_ctx)
plot_pianoroll(ax, just_target, cmap = cmap_tar, alpha = 1)
ax.axvline(data_test.train_tms)
ax.axvline(data_test.train_tms+data_test.test_tms)
return fig, ax
# Choose colormap
cmap2 = plt.cm.Reds
# Get the colormap colors
my_cmap2 = cmap2(np.arange(cmap2.N))
# Set alpha
my_cmap2[:,-1] = np.linspace(0, 1, cmap2.N)
# Create new colormap
my_cmap2 = ListedColormap(my_cmap2)
# +
batchnum = 17
cmap = 'Blues'
meancol = 'red'
fig, ax = plot_batch_element(curr_test_batch, batchnum, cmap_ctx = cmap, cmap_tar = my_cmap, num_subplots = 3,
figsize = (12,12))
#fig2 = plt.figure(figsize = (12,8))
#fig3 = plt.figure(figsize = (12,8))
#ax2 = fig.add_subplot(312)
#ax3 = fig.add_subplot(313)
#plot_pianoroll(ax2,
# 0.5*128*np.multiply(prediction[batchnum,:,:],1/np.tile(np.expand_dims(np.sum(prediction[batchnum,:,:], -1), -1), [1,78])),
# cmap='Reds')
temp_test_batch = copy.deepcopy(curr_test_batch)
temp_test_batch.target = prediction_normal*4
fig, ax2 = plot_batch_element2(temp_test_batch, fig, batchnum, cmap_ctx = cmap, cmap_tar = my_cmap2, num_subplot = 2)
temp_test_batch = copy.deepcopy(curr_test_batch)
temp_test_batch.target = prediction_switch*4
temp_test_batch.context[:,[0,1],:,:] = temp_test_batch.context[:,[1,0],:,:]
fig, ax3 = plot_batch_element2(temp_test_batch, fig, batchnum, cmap_ctx = cmap, cmap_tar = my_cmap2, num_subplot = 3)
ax.set_title('True sample')
ax2.set_title('CNPs improvisation with correct ordering of contexts')
ax3.set_title('CNPs improvisation with switched ordering of contexts')
ax.set_xlabel('')
ax2.set_xlabel('')
#plot_pianoroll(ax2,0.1*128*prediction_normal[batchnum,:,:],cmap=cmap) # NORMAL
#plot_pianoroll(ax3,0.1*128*prediction_switch[batchnum,:,:],cmap=cmap) # NORMAL
#fig.savefig('graphs/switched_contexts_5.png', dpi = 300, bbox_inches = 'tight')
# +
import scipy.stats as st
def turn_probabilities_to_notes(prediction, how = 'random', normalize = True, threshold = 0.1, turn_on = 6):
for batch in range(prediction.shape[0]):
turn_off = prediction[batch,:].argsort()[:-turn_on]
prediction[batch, :][turn_off] = 0
if normalize:
prediction[batch, :] = st.norm.cdf((prediction[batch, :] -
np.mean(prediction[batch, :][prediction[batch, :] > 0]))/
np.sqrt(np.var(prediction[batch, :][prediction[batch, :]>0])))/4
prediction[batch, :][turn_off] = 0
#prediction[batch,:] = prediction[batch,:]*(0.5/np.mean(prediction[batch,:][prediction[batch,:]>0]))
#prediction[batch,:] = prediction[batch,:]/prediction[batch,:].max()
if how == 'random':
notes = np.random.binomial(1, p=prediction)
elif how == 'random_thresholded':
prediction[prediction >= threshold] += 0.5
prediction[prediction > 1] = 1
prediction[prediction < threshold] = 0
notes = np.random.binomial(1, p=prediction)
elif how == 'thresholded':
prediction[prediction >= threshold] = 1
prediction[prediction < threshold] = 0
notes = prediction
return notes
# -
# # ALL IN ONE EXPERIMENTAL
# +
##################### GENERATION PARAMETERS #####################
my_model_name = "biaxial_pn_encoder_concat_deeplstm_cont.h5"
foldername = 'experiment_switch_order3'
# data
what_type = 'test'
train_tms = 40
test_tms = 20
batch_size = 64
songs_per_batch = 16
seed = 1212
# turn probabilities to notes params
how = 'random'
normalize = False
remap_to_max = True
turn_on_notes = 8
divide_prob = 2
articulation_prob = 0.0018
remap_prob = 0.35
# Recurrence params
pick_pred_from_idx = 0
# +
import scipy.stats as st
import os
from os import path
import sys
import pickle
def load_model(file, curr_batch, modelname, *modelparams):
new_model = modelname(curr_batch, *modelparams)
new_model.load_weights(file)
return new_model
def turn_probabilities_to_notes(prediction,
turn_on,
how = 'random',
normalize = True,
threshold = 0.1,
divide_prob = 2,
remap_to_max = True):
for batch in range(prediction.shape[0]):
if turn_on[batch] <= 1:
prediction[batch, :] = 0
continue
turn_off = prediction[batch, :].argsort()[:-int(turn_on[batch])]
prediction[batch, :][turn_off] = 0
if normalize:
prediction[batch, timestep, :] = st.norm.cdf((prediction[batch, timestep, :] -
np.mean(prediction[batch, timestep, :][prediction[batch, timestep, :] > 0]))/
np.sqrt(np.var(prediction[batch, timestep, :][prediction[batch, timestep, :]>0])))/divide_prob
prediction[batch, timestep, :][turn_off] = 0
if remap_to_max:
prediction[batch, :] /= prediction[batch, :].max()
prediction[batch, :] *= remap_prob
if how == 'random':
notes = np.random.binomial(1, p=prediction)
elif how == 'random_thresholded':
prediction[prediction >= threshold] += 0.5
prediction[prediction > 1] = 1
prediction[prediction < threshold] = 0
notes = np.random.binomial(1, p=prediction)
elif how == 'thresholded':
prediction[prediction >= threshold] = 1
prediction[prediction < threshold] = 0
notes = prediction
return notes
############################################# LOAD DATA ####################################################
file = 'maestro-v2.0.0/maestro-v2.0.0.csv'
# Get a batch we want to predict
data_test = DataObject(file, what_type = what_type,
train_tms = train_tms, test_tms = test_tms,
fs = 20, window_size = 15,
seed = seed)
# Create a batch class which we will iterate over
test_batch = Batch(data_test, batch_size = batch_size, songs_per_batch = songs_per_batch)
############################################# START GENERATING #############################################
#test_batch.data = pickle.load(open( "good_batch_for_graph_66.p", "rb" ))
#test_batch.data.context = np.transpose(test_batch.data.context, [1,0,2,3])
curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch.context[[0,1],:,:,:] = curr_test_batch.context[[1,0],:,:,:]
#curr_test_batch.target_split = 0
#curr_test_batch.window_size = 15
#curr_test_batch.featurize(use_biaxial = True)
#model = load_model(my_model_name, curr_test_batch, biaxial_pn_encoder_concat_deeplstm)
final_output = np.zeros((test_batch.batch_size,
19+data_test.test_tms+19,
78))
# Populate from the front
final_output[:,0:19,:] = curr_test_batch.context[0,:,-19:,:]
final_output[:,20,:] = DataObject.drop_articulation3d(curr_test_batch.target[:,0,:,:])
# Populate from the back
final_output[:,-19:,:] = curr_test_batch.context[1,:,0:19,:]
#all_predictions = np.zeros((test_batch.batch_size, data_test.test_tms, 78))
#new_target = np.concatenate([curr_test_batch.context[:,0,-20:0,:],
# DataObject.drop_articulation(curr_test_batch.target),
# curr_test_batch.context[:,1,0:20,:]],
# axis = 1)
curr_test_batch.target[:,0:20,:,0] = final_output[:,0:20,:]
curr_test_batch.target[:,0:20,:,1] = np.zeros(final_output[:,0:20,:].shape)
curr_test_batch.target_split = 0
curr_test_batch.window_size = 20
curr_test_batch.featurize(use_biaxial = True)
model = load_model(my_model_name, curr_test_batch, biaxial_pn_encoder_concat_deeplstm)
def take_prediction(t):
if t<20:
return -t
else:
return -20
def take_actual(t):
if t <= test_tms:
return np.arange(19, 19+t, 1)
else:
return np.arange(t-test_tms+19, t-19, 1)
for timestep in range(1,test_tms):
stdout.write('\rtimestep {}/{}'.format(timestep, test_tms))
stdout.flush()
prediction = model.predict([tf.convert_to_tensor(curr_test_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch.target_train, dtype = tf.float32)],
steps = 1)[:,take_prediction(timestep):,:]
notes = np.zeros(prediction.shape)
turn_on = [turn_on_notes]*batch_size
for t in range(notes.shape[1]):
articulation = np.multiply(prediction[:,t,:], final_output[:,20+t,:])
articulation[articulation >= articulation_prob] = 1
articulation[articulation < articulation_prob] = 0
articulated_notes = np.sum(articulation, axis = -1)
play_notes = turn_probabilities_to_notes(prediction[:,t,:],
turn_on = turn_on - articulated_notes,
how = 'random',
normalize = normalize,
divide_prob = divide_prob,
remap_to_max = remap_to_max)
play_notes = play_notes + articulation
play_notes[play_notes >= 1] = 1
play_notes[play_notes < 1] = 0
final_output[:,21+t,:] = play_notes
#if timestep == test_tms:
# sys.exit()
#articulation = np.multiply(prediction, final_output[:,take_actual(timestep),:])
#articulation[articulation >= articulation_prob] = 1
#articulation[articulation < articulation_prob] = 0
#articulated_notes = np.sum(articulation, axis = -1)
#turn_on = turn_on_notes*np.ones((batch_size, timestep))
#prediction[np.where(articulation > 0)] = 0
#all_predictions[:,timestep,:] = copy.deepcopy(prediction)
#notes = turn_probabilities_to_notes(prediction,
# turn_on = turn_on - articulated_notes,
# how = 'random',
# normalize = normalize,
# divide_prob = divide_prob)
#notes = notes + articulation
#notes[notes >= 1] = 1
#notes[notes < 1] = 0
#final_output[:,21:(21+timestep),:] = notes
# Now reinitialize the model and everything
curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch.target[:,0:20,:,0] = final_output[:,timestep:(20+timestep)]
curr_test_batch.target_split = 0
curr_test_batch.window_size = 20
curr_test_batch.featurize(use_biaxial = True)
#model = load_model(my_model_name, curr_test_batch, biaxial_pn_encoder_concat_deeplstm)
#if curr_test_batch.target.shape[1] <= 15:
# curr_test_batch.target_split = 0
# curr_test_batch.window_size = timestep + 1
#else:
# curr_test_batch.target_split = curr_test_batch.target.shape[1] - 15
# curr_test_batch.window_size = curr_test_batch.target.shape[1]
######### WHY DO THIS?????? ######################################################
#model = load_model(my_model_name, curr_test_batch, biaxial_target_model_oneseq, 20)
#np.save('{}/all_predictions.npy'.format(foldername), all_predictions)
####################################################
#sys.exit('what?')
true_batch = copy.deepcopy(test_batch.data)
song_names = np.zeros(len(true_batch.link))
song_names = song_names.tolist()
i = 0
for i, link in enumerate(true_batch.link):
with open(data_test.file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
if row[4] == link:
name = str(row[0]) + '_' + str(row[1]) + '___' + str(i)
name = name.replace(" ", "-")
name = name.replace("/", "")
song_names[i] = name
break
##########################################################
if path.isdir(foldername):
os.system('rm -r {}'.format(foldername))
if not path.isdir(foldername):
os.mkdir(foldername)
with open('{}/setup.txt'.format(foldername), 'w+') as f:
f.write('what_type = {} \n \
train_tms = {} \n \
test_tms = {} \n \
batch_size = {} \n \
songs_per_batch ={} \n \
how = {} \n \
normalize = {} \n \
turn_on = {} \n \
divide_prob = {} \n \
articulation_prob = {}'.format(what_type,
str(train_tms),
str(test_tms),
str(batch_size),
str(songs_per_batch),
how,
str(normalize),
str(turn_on[0]),
str(divide_prob),
str(articulation_prob)))
##########################################################
true_batch = copy.deepcopy(test_batch.data)
true_batch.target = DataObject.drop_articulation(true_batch.target)
# Combine context
true_sample = np.append(np.squeeze(curr_test_batch.context[:,0,:,:]), true_batch.target, axis = 1)
true_sample = np.append(true_sample, np.squeeze(curr_test_batch.context[:,1,:,:]), axis = 1)
true_sample = np.append(np.expand_dims(true_sample, axis = 3),
np.expand_dims(true_sample, axis = 3), axis = 3)
predicted_sample = np.append(np.squeeze(curr_test_batch.context[:,0,:,:]), final_output[:,20:(20+test_tms),:], axis = 1)
predicted_sample = np.append(predicted_sample, np.squeeze(curr_test_batch.context[:,1,:,:]), axis = 1)
predicted_sample = np.append(np.expand_dims(predicted_sample, axis = 3),
np.expand_dims(predicted_sample, axis = 3), axis = 3)
# Save final midi
#save_indices = np.random.randint(low = 0, high = test_batch.batch_size, size = 20)
save_indices = np.arange(0,test_batch.batch_size)
for idx, i in enumerate(save_indices):
print("saving {}".format(idx))
#noteStateMatrixToMidi(true_sample[i,:,:], name = 'best_model_full_generation/NO_{}_TRUE_{}'.format(i,song_names[i]))
#noteStateMatrixToMidi(predicted_sample[i,:,:], name = 'best_model_full_generation/NO_{}_PRED_{}'.format(i,song_names[i]))
noteStateMatrixToMidi(true_sample[i,:,:], name = '{}/NO_{}_TRUE_{}'.format(foldername,i,song_names[i]))
noteStateMatrixToMidi(predicted_sample[i,:,:], name = '{}/NO_{}_PRED_{}'.format(foldername,i,song_names[i]))
# -
#prediction_switch = final_output[:,20:(20+test_tms),:]
prediction_switch = np.concatenate([np.zeros((64,1,78)), prediction], axis = 1)
# ### Now play music in a loop looking back at everything
# +
####### GOOD INIT START #######
curr_test_batch = copy.deepcopy(test_batch.data)
first_timestep = DataObject.drop_articulation(curr_test_batch.target[:,0:15,:])
curr_test_batch.target_split = 0
curr_test_batch.window_size = 15
curr_test_batch.featurize(use_biaxial = True)
model = load_model(my_model_name, curr_test_batch, biaxial_target_model_oneseq, 20)
final_output = np.zeros((test_batch.batch_size, data_test.test_tms, curr_test_batch.target_train.shape[-2]))
final_output[:,0:15,:] = first_timestep
all_predictions = np.zeros((test_batch.batch_size, data_test.test_tms, curr_test_batch.target_train.shape[-2]))
for timestep in range(15,100):
stdout.write('\rtimestep {}/100'.format(timestep))
stdout.flush()
prediction = model.predict([tf.convert_to_tensor(curr_test_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch.target_train, dtype = tf.float32)],
steps = 1)
all_predictions[:,timestep,:] = copy.deepcopy(prediction)
notes = turn_probabilities_to_notes(prediction, how = 'random', turn_on = 8, normalize = True)
#notes = turn_probabilities_to_notes(prediction, how = 'random', threshold = 0.2, turn_on=20, normalize = False)
final_output[:,timestep,:] = notes
# Now reinitialize the model and everything
curr_test_batch = copy.deepcopy(test_batch.data)
curr_test_batch.target = copy.deepcopy(np.append(np.expand_dims(final_output, axis = 3),
np.expand_dims(final_output, axis = 3), axis = 3))
curr_test_batch.target_split = 0
curr_test_batch.window_size = timestep + 1
#if curr_test_batch.target.shape[1] <= 15:
# curr_test_batch.target_split = 0
# curr_test_batch.window_size = timestep + 1
#else:
# curr_test_batch.target_split = curr_test_batch.target.shape[1] - 15
# curr_test_batch.window_size = curr_test_batch.target.shape[1]
curr_test_batch.featurize(use_biaxial = True)
model = load_model(my_model_name, curr_test_batch, biaxial_target_model_oneseq, 20)
np.save('best_model_full_generation_fully_random/all_predictions.npy', all_predictions)
# +
true_batch = copy.deepcopy(test_batch.data)
song_names = np.zeros(len(true_batch.link))
song_names = song_names.tolist()
i = 0
for i, link in enumerate(true_batch.link):
with open(data_test.file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
if row[4] == link:
name = str(row[0]) + '_' + str(row[1]) + '___' + str(i)
name = name.replace(" ", "-")
name = name.replace("/", "")
song_names[i] = name
break
# +
import os
from os import path
foldername = 'best_model_full_generation_train_normalized'
if not path.isdir(foldername):
os.mkdir(foldername)
# +
true_batch = copy.deepcopy(test_batch.data)
true_batch.target = DataObject.drop_articulation(true_batch.target)
# Combine context
true_sample = np.append(np.squeeze(curr_test_batch.context[:,0,:,:]), true_batch.target, axis = 1)
true_sample = np.append(true_sample, np.squeeze(curr_test_batch.context[:,1,:,:]), axis = 1)
true_sample = np.append(np.expand_dims(true_sample, axis = 3),
np.expand_dims(true_sample, axis = 3), axis = 3)
predicted_sample = np.append(np.squeeze(curr_test_batch.context[:,0,:,:]), final_output, axis = 1)
predicted_sample = np.append(predicted_sample, np.squeeze(curr_test_batch.context[:,1,:,:]), axis = 1)
predicted_sample = np.append(np.expand_dims(predicted_sample, axis = 3),
np.expand_dims(predicted_sample, axis = 3), axis = 3)
# Save final midi
#save_indices = np.random.randint(low = 0, high = test_batch.batch_size, size = 20)
save_indices = np.arange(0,test_batch.batch_size)
for idx, i in enumerate(save_indices):
print("saving {}".format(idx))
#noteStateMatrixToMidi(true_sample[i,:,:], name = 'best_model_full_generation/NO_{}_TRUE_{}'.format(i,song_names[i]))
#noteStateMatrixToMidi(predicted_sample[i,:,:], name = 'best_model_full_generation/NO_{}_PRED_{}'.format(i,song_names[i]))
noteStateMatrixToMidi(true_sample[i,:,:], name = '{}/NO_{}_TRUE_{}'.format(foldername,i,song_names[i]))
noteStateMatrixToMidi(predicted_sample[i,:,:], name = '{}/NO_{}_PRED_{}'.format(foldername,i,song_names[i]))
# -
# ### Now play music in a loop looking back only a 15 window
# +
curr_test_batch = copy.copy(test_batch.data)
first_timestep = DataObject.drop_articulation3d(curr_test_batch.target[:,0,:])
curr_test_batch.target_split = 0
curr_test_batch.window_size = 1
curr_test_batch.featurize(use_biaxial = True)
model = load_model(my_model_name, curr_test_batch, biaxial_target_model_oneseq, 20)
final_output = np.zeros((test_batch.batch_size, data_test.test_tms, curr_test_batch.target_train.shape[-2]))
final_output[:,0,:] = first_timestep
for timestep in range(1,99):
print('timestep {}/100'.format(timestep))
prediction = model.predict([tf.convert_to_tensor(curr_test_batch.context, dtype = tf.float32),
tf.convert_to_tensor(curr_test_batch.target_train, dtype = tf.float32)],
steps = 1)
notes = turn_probabilities_to_notes(prediction, how = 'random')
final_output[:,timestep,:] = notes
# Now reinitialize the model and everything
curr_test_batch = copy.copy(test_batch.data)
curr_test_batch.target = np.append(np.zeros((np.expand_dims(final_output, axis = 3).shape)),
np.expand_dims(final_output, axis = 3), axis = 3)
if curr_test_batch.target.shape[1] <= 15:
curr_test_batch.target_split = 0
curr_test_batch.window_size = timestep + 1
else:
curr_test_batch.target_split = curr_test_batch.target.shape[1] - 15
curr_test_batch.window_size = curr_test_batch.target.shape[1]
curr_test_batch.featurize(use_biaxial = True)
model = load_model(my_model_name, curr_test_batch, biaxial_target_model_oneseq, 20)
# -
# # Experiment space
# ## How to change target so that it gives the first 50 non silent timesteps
change_target = curr_batch.target.numpy()
igor = np.nonzero(curr_batch.target)
first_index = np.nonzero(np.r_[1, np.diff(igor[0])[:-1]])[0]
first_note_index = igor[1][first_index]
for batch, idx in enumerate(first_note_index):
change_target[batch,(idx+50):,:] = 0
# # Generate midi and see how it looks
# +
def piano_roll_to_pretty_midi(piano_roll, fs=100, program=0):
piano_roll = np.pad(piano_roll, [(0, 0), (20,20)], 'constant', constant_values=0)
piano_roll = np.transpose(piano_roll)
notes, frames = piano_roll.shape
pm = pretty_midi.PrettyMIDI()
instrument = pretty_midi.Instrument(program=program)
# pad 1 column of zeros so we can acknowledge inital and ending events
piano_roll = np.pad(piano_roll, [(0, 0), (1, 1)], 'constant')
# use changes in velocities to find note on / note off events
velocity_changes = np.nonzero(np.diff(piano_roll).T)
# keep track on velocities and note on times
prev_velocities = np.zeros(notes, dtype=int)
note_on_time = np.zeros(notes)
for time, note in zip(*velocity_changes):
# use time + 1 because of padding above
velocity = piano_roll[note, time + 1]
time = time / fs
if velocity > 0:
if prev_velocities[note] == 0:
note_on_time[note] = time
prev_velocities[note] = velocity
else:
pm_note = pretty_midi.Note(
velocity=prev_velocities[note],
pitch=note,
start=note_on_time[note],
end=time)
instrument.notes.append(pm_note)
prev_velocities[note] = 0
pm.instruments.append(instrument)
return pm
def extract_piano_roll(predicted_pr, threshold):
predicted_pr[predicted_pr >= threshold] = 1
predicted_pr[predicted_pr < threshold] = 0
return predicted_pr
# -
my_midi = piano_roll_to_pretty_midi(curr_batch.target[0,:,:], fs = 50)
my_midi.write('example_target.mid')
predicted_target = extract_piano_roll(output[0,:,:], threshold = 0.2)
predicted_midi = piano_roll_to_pretty_midi(predicted_target, fs = 20)
predicted_midi.write('example_predicted.mid')
idx = 10
print(predicted_target[idx,:])
print(curr_batch.target[0,idx,:])
curr_batch.link[0]
midi_data = pretty_midi.PrettyMIDI('maestro-v2.0.0/'+'2006/MIDI-Unprocessed_01_R1_2006_01-09_ORIG_MID--AUDIO_01_R1_2006_01_Track01_wav.midi')
midi_data.estimate_tempo()
target_shape = curr_batch.target.shape
print(target_shape)
igor = tf.zeros((128, 10))
new_igor=tf.tile(tf.expand_dims(igor, 1), [1,target_shape[1],1])
tf.concat([curr_batch.target, new_igor], axis = 2)
curr_batch.target[0,0,:]
output[0,0,:]
igor = tf.convert_to_tensor([[1,1,1,1], [2,2,2,2], [3,3,3,3]])
igor.shape
tf.reshape(igor, [4,3])
# # Encoder
lstm = LSTM(100)
output = lstm(curr_batch.context)
output.shape
res_output = K.mean(tf.reshape(output, [128, 59, 100]), axis = -2)
res_output.shape
tile_output = tf.tile(tf.expand_dims(res_output, 1), [1,150,1])
tile_output.shape
K.mean(res_output, axis = -2).shape
# +
input_context_ = Input((None, 2), name="Input_layer_contxt_xy") # [num_pts, 2]
input_target_x = Input((None, 1), name="Input_layer_target_x") # [num_pts, 1]
encoder = input_context_xy
# -
curr_batch.context.shape
| music_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import Image
from demo_utils.demo0 import Demo0
from demo_utils.demo5 import Demo5
from demo_utils.temporal_data import *
import importlib
import warnings
warnings.filterwarnings('ignore')
# Anteriormente había llegado a la conclusión de que realizar PCA sobre un dataset
# daba mejores resultados a los modelos
Image('demo_previa_pca_dt.png')
# La imagen previa utiliza el dataset 'digits' (directamente como lo ofrece la biblioteca sklearn)
# Da a entender que es mejor usar PCA respecto a no usarlo, y que lo mejor es hacer primero el sampling y después el PCA
# Pues ahora no obtengo los mismos resultados
Demo0().non_interactive(**data_d0)
# Notar que en la primera imagen se está mirando el score, mientras que en la segunda el error
# Para empezar, el DT solo ahora es mejor que el DT con PCA. Eso es distinto a lo que muestra la primera imagen.
# Después, no parece haber una diferencia real entre usar PCA y no usarlo cuando se hace sampling. Nystroem parece que se beneficia un poco en algunos casos, pero la diferencia no es mucha.
# La única diferencia que se me ocurre en las dos ejecuciones es que en la primera utilizo el
# dataset de juguete que ofrecen en scikit-learn, y hago la normalización que usaban ellos (dividir entre 16 y restar la media).
#
# En la segunda, en cambio, utilizo un subconjunto propio del dataset digits que está en UCI, y uso la la estandarización (restar la media y dividir entre la varianza)
# Me queda pendiente enfrentar estas diferencias para ver si son las que están causando estas discordancias
# Quería volver a comprobar cómo es mejor hacer primero el sampling y después el PCA, y he hecho una demo probando las dos ordenaciones. Los resultados son estos:
Demo5().non_interactive(**data_d1_1)
Demo5().non_interactive(**data_d1_2)
# Se puede observar cómo haciendo RBF no hay ninguna diferencia real entre cambiar el orden del sampling y PCA, y en Nystroem hay una pequeña diferencia. Parece que es mejor hacer primero el sampling y después el PCA
# ### Usando digits
Demo0().non_interactive(**data_d2_1)
# Se puede observar que lo que va mejor es Logit RBF con un Grey Box, con resultados muy parecidos a usar un PCA. El resto de combinaciones son bastante similares.
# De todos modos, lo mejor sigue siendo usar un Logit clásico sin más
# ¿Es esto lo que estábamos esperando?
Demo0().non_interactive(**data_d2_2)
# ### Usando Segment
Demo0().non_interactive(**data_d2_3)
Demo0().non_interactive(**data_d2_4)
| code/notebooks/python/Demo 5-12-2018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# Import libraries
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn import metrics
# -
# Load data
# origial data https://people.sc.fsu.edu/~jburkardt/datasets/regression/x16.txt
dataset = pd.read_csv('data/petrol_consumption.csv')
# Explore data
dataset.head()
dataset.describe()
# Preprocessing
X = dataset.drop('Petrol_Consumption', axis=1)
y = dataset['Petrol_Consumption']
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Train the model
regressor = DecisionTreeRegressor()
regressor.fit(X_train, y_train)
# Predict
# +
y_pred = regressor.predict(X_test)
# compare predicted and actual values
df=pd.DataFrame({'Actual':y_test, 'Predicted':y_pred})
df
# -
# Evaluate the model
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
| decision_tree_regressor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## DO NOT RUN THE BLOCK BELOW
# +
import pandas as pd
from sklearn.utils import shuffle
df=pd.read_csv("002_duplicate.csv")
#print(df['text'].iloc[5])
#print(df['text'].iloc[77])
df=shuffle(df)
print(df)
df.to_csv(path_or_buf="shuffled_tweets.csv")
# +
df=pd.read_csv("shuffled_tweets.csv")
print(df)
# -
# ## DO NOT RUN THE BLOCK BELOW
df=pd.read_csv("All_HITS.csv")
#print(df)
shuffled_df=shuffle(df)
print(shuffled_df)
shuffled_df.to_csv("shuffled_all_HITS.csv")
# +
batch=pd.read_csv("Batch_results.csv")
print(batch)
# +
import numpy as np
average_time=batch['WorkTimeInSeconds'].mean()
print(average_time)
print(batch['Input.Annotation'].isnull().values)
"""for i in range(114):
print(batch.iloc[i,30])"""
print(batch.iloc[2,30]==batch.iloc[2,31])
# -
real_batch=pd.read_csv("Real_Batch.csv")
print(real_batch.shape)
worker_id=real_batch['WorkerId'].unique()
print(worker_id)
print(len(worker_id))
#extract the tweet and final annotation for model training and testing
cols=['tweet','Final_Annotation']
training_df=real_batch[cols]
df=training_df.dropna()
print(df)
# +
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(df['tweet'])
X_train_counts.shape
# -
X_train_counts.reshape(-1,1)
X_train_counts.shape
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
X_train_tfidf.shape
# +
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import cross_val_score
import numpy as np
from sklearn.model_selection import KFold
from sklearn import svm, metrics
seed=np.random.RandomState(0)
kf = KFold(n_splits=10, random_state=seed,shuffle=False)
scores=[]
for train_index, test_index in kf.split(X_train_tfidf):
clf = MultinomialNB()
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X_train_tfidf[train_index], X_train_tfidf[test_index]
y_train, y_test = df['Final_Annotation'][train_index], df['Final_Annotation'][test_index]
clf.fit(X_train, y_train)
annotation_pred = clf.predict(X_test)
# The coefficients
print('Coefficients : \n', clf.coef_)
# The mean squared error
# Explained variance score: 1 is perfect prediction
print('Score is: %.2f' % clf.score(X_test,y_test))
scores.append(clf.score(X_test,y_test))
print()
print("Classification report for classifier %s\n%s\n" % (clf, metrics.classification_report(y_test, annotation_pred)))
# Print out the confusion matrix. PS: edible is 0, poisonous is 1
print("Confusion Matrix:\n%s" % metrics.confusion_matrix(y_test, annotation_pred,labels=['anti-vaccine','pro-vaccine','neither']))
print("The scores for 10-folds cross validation are: ")
print(scores)
print("The average score for 10-folds cross validation is: "+ str(np.mean(scores)))
# -
| Assignments/Applied Machine Learning for Analytics/HW4/HW4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="PVUW3yIlrHu5"
# # Векторное представления текстов
# + [markdown] id="aN-JAK7XrHu7"
# ## Библиотеки
# + id="1l60J3BhJjHl"
# !pip install --quiet dvc[gdrive] fasttext
# + id="BC3Te-tYrHu8"
from copy import deepcopy
import fasttext
import fasttext.util
import matplotlib.pyplot as plt
from matplotlib.image import imread
from mpl_toolkits import mplot3d
from matplotlib import gridspec
from PIL import Image
import io
import os
from urllib.request import urlopen
from skimage.segmentation import mark_boundaries
from nltk.tokenize import RegexpTokenizer
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import requests
from scipy.stats import norm
import torch
import dvc.api
from sklearn.metrics import classification_report
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
# + id="P0RVGWO-rHu8"
import warnings
warnings.filterwarnings("ignore")
# + colab={"base_uri": "https://localhost:8080/", "height": 37} id="6d3vU83SrHu8" outputId="ffe1668b-7fe9-44db-dcc9-2241b5ea2efb"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
device
# + [markdown] id="fMLxenl58nYd"
# ## Код для обучения
# + id="ZItD3XrL85ur"
def train_on_batch(model, x_batch, y_batch, optimizer, loss_function):
model.train()
optimizer.zero_grad()
output = model(x_batch.to(model.device))
loss = loss_function(output, y_batch.to(model.device))
loss.backward()
optimizer.step()
return loss.cpu().item()
# + id="MVNMl8DfmRhU"
def train_epoch(train_generator, model, loss_function, optimizer, callback = None):
epoch_loss = 0
total = 0
for it, (batch_of_x, batch_of_y) in enumerate(train_generator):
batch_loss = train_on_batch(model, batch_of_x, batch_of_y, optimizer, loss_function)
if callback is not None:
with torch.no_grad():
callback(model, batch_loss)
epoch_loss += batch_loss*len(batch_of_x)
total += len(batch_of_x)
return epoch_loss/total
# + id="QCXwUfl51k3z"
def trainer(count_of_epoch,
batch_size,
dataset,
model,
loss_function,
optimizer,
lr = 0.001,
callback = None):
optima = optimizer(model.parameters(), lr=lr)
iterations = tqdm(range(count_of_epoch), desc='epoch')
iterations.set_postfix({'train epoch loss': np.nan})
for it in iterations:
batch_generator = tqdm(
torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True, pin_memory=True),
leave=False, total=len(dataset)//batch_size+(len(dataset)%batch_size>0))
epoch_loss = train_epoch(train_generator=batch_generator,
model=model,
loss_function=loss_function,
optimizer=optima,
callback=callback)
iterations.set_postfix({'train epoch loss': epoch_loss})
# + [markdown] id="JsGEvSBSrVwg"
# ## Что это и зачем нужно?
# + [markdown] id="wwCVJ5IDH17i"
# ## Пример классификации твитов
# + [markdown] id="RnP0JTS8Qsya"
# ### Загрузим выборку
# Рекомендую всем ознакомиться с dvc (если проблема аунтетификации, перезагрузите ядро юпитер)
# + id="DkoqT588HtUZ"
with dvc.api.open(
'sem17/data/dataset.csv',
repo='https://github.com/andriygav/MachineLearningSeminars',
) as f:
dataset = pd.read_csv(f)
# + [markdown] id="l6OXiLVZQ0zV"
# ### Посмотрим на данные
# + id="gISrWPvOYI_r"
dataset = dataset[dataset[['tag', 'message']].notnull().all(1)]
# + id="_VcSnHNFQmwY"
dataset = dataset.sample(125000, random_state=42)
train_mask = np.random.rand(len(dataset), ) < 0.8
dataset_train = dataset[train_mask]
dataset_test = dataset[~train_mask]
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="zhf27RAPjqdu" outputId="e75d6ee8-389a-42e6-d3fc-e5e08bcd1143"
dataset_train.sample(5, random_state=42)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="EGbakt6ujsud" outputId="3f9f358e-9f70-422e-9302-d1db3be57603"
dataset_train.describe()
# + [markdown] id="6BUGt80XQ-Zi"
# ### Построим модель RNN (как 2 семинара назад)
#
# + id="Shbvw80CRnDW"
class RNNclassifier(torch.nn.Module):
@property
def device(self):
return next(self.parameters()).device
def __init__(self, vocab_dim, output_dim, emb_dim = 10, hidden_dim = 10,
num_layers = 3, bidirectional = False, p=0.7):
super(RNNclassifier, self).__init__()
self.embedding = torch.nn.Embedding(vocab_dim, emb_dim)
self.encoder = torch.nn.LSTM(emb_dim, hidden_dim, num_layers,
bidirectional=bidirectional,
batch_first=True, dropout=p)
self.linear = torch.nn.Linear(
2*num_layers*int(bidirectional + 1)*hidden_dim,
output_dim)
def forward(self, input):
input = self.embedding(input)
_, (h, c) = self.encoder(input)
act = torch.cat([h, c], dim=0).transpose(0, 1)
act = act.reshape(len(input), -1)
return self.linear(act)
# + id="H4MQQD5nd36H"
class Tokenizer(object):
def __init__(self, word_to_ind, tokenizer):
self.word_to_ind = word_to_ind
self.tokenizer = tokenizer
def __call__(self, sentences, max_length = 10, pad_to_max_length = False):
tokens = self.tokenizer.tokenize_sents(sentences)
if not pad_to_max_length:
max_length = min(max_length, max(map(len, tokens)))
tokens = [['[CLS]']+s+['[SEP]'] + ['[PAD]']*(max_length-len(s)) \
if len(s) < max_length \
else ['[CLS]']+s[:max_length]+['[SEP]'] \
for s in tokens ]
ids = [[self.word_to_ind.get(w, self.word_to_ind['[UNK]']) for w in sent] for sent in tokens]
return torch.tensor(ids)
# + [markdown] id="7r6ogb_2XB0q"
# ### Разбиение на слова --- токенайзер
# + id="wUCtTjuDWDrY"
word_to_ind = {'[PAD]': 0, '[UNK]': 1, '[CLS]': 3, '[SEP]': 4}
for sent in tqdm(dataset_train.values[:, 1]):
for word in RegexpTokenizer('[a-zA-Z]+|[^\w\s]|\d+').tokenize(sent):
if word not in word_to_ind:
word_to_ind[word] = word_to_ind.__len__()
# + colab={"base_uri": "https://localhost:8080/"} id="q4GIei9IYgq9" outputId="82320c72-8f89-45ab-a9c8-c1a67bede40e"
len(word_to_ind)
# + colab={"base_uri": "https://localhost:8080/"} id="HI5u_PMWYuE9" outputId="734b1285-8c7c-4bc0-f4f9-f1c158316b40"
len(set(dataset_train.values[:, 0]))
# + id="nV3XAeJVgS9j"
tokenizer = Tokenizer(word_to_ind, RegexpTokenizer('[a-zA-Z]+|[^\w\s]|\d+'))
# + id="J5GmjlwmisRX"
train_data_sent = tokenizer(dataset_train.values[:, 1])
test_data_sent = tokenizer(dataset_test.values[:, 1])
# + id="KxGKpNVWiwSn"
dataset_train_pt = torch.utils.data.TensorDataset(
train_data_sent, torch.tensor(dataset_train.values[:, 0].tolist()).long())
dataset_test_pt = torch.utils.data.TensorDataset(
test_data_sent, torch.tensor(dataset_test.values[:, 0].tolist()).long())
# + [markdown] id="sxWpZdZ-1fZU"
# ### Инициализация модели
# + id="BiigRiMkUWfY"
config = dict()
config['vocab_dim'] = len(word_to_ind)
config['output_dim'] = len(set(dataset.values[:, 0]))
config['emb_dim'] = 100
config['hidden_dim'] = 10
config['num_layers'] = 3
config['bidirectional'] = False
config['p'] = 0.7
model = RNNclassifier(**config)
_ = model.to(device)
# + [markdown] id="cL6_9APz1ijh"
# ### Качество до обучения
# + colab={"base_uri": "https://localhost:8080/"} id="q4RXQet33NIv" outputId="93ef2c10-44b0-4052-cdfd-7ec53c2e7233"
batch_generator = torch.utils.data.DataLoader(dataset=dataset_test_pt,
batch_size=64,
pin_memory=True)
pred = []
real = []
model.eval()
for it, (x_batch, y_batch) in enumerate(batch_generator):
x_batch = x_batch.to(device)
with torch.no_grad():
output = model(x_batch)
pred.extend(torch.argmax(output, dim=-1).cpu().numpy().tolist())
real.extend(y_batch.cpu().numpy().tolist())
print(classification_report(real, pred))
# + [markdown] id="dceqH2my1woS"
# ### Обучение модели
# + id="wkY08WPv1zEF"
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam
# + id="OepV4mCu2Dxj"
trainer(count_of_epoch=5,
batch_size=64,
dataset=dataset_train_pt,
model=model,
loss_function=loss_function,
optimizer = optimizer,
lr=0.001,
callback=None)
# + [markdown] id="HHHeP5ZD9AcP"
# ### Качество после обучения
# + colab={"base_uri": "https://localhost:8080/"} id="7sJceEhE43Bf" outputId="f1924f87-2035-4e8c-a9ad-bb9f3a75b004"
batch_generator = torch.utils.data.DataLoader(dataset=dataset_test_pt,
batch_size=64,
pin_memory=True)
pred = []
real = []
test_loss = 0
model.eval()
for it, (x_batch, y_batch) in enumerate(batch_generator):
x_batch = x_batch.to(device)
with torch.no_grad():
output = model(x_batch)
pred.extend(torch.argmax(output, dim=-1).cpu().numpy().tolist())
real.extend(y_batch.cpu().numpy().tolist())
print(classification_report(real, pred))
# + [markdown] id="6QpNwKHcMx92"
# ## Word2Vec (на основе vec формата fasttext)
# + [markdown] id="ngzxcSiQ6Xb5"
# Используя опыт предыдущего семинара хочется "дообучать" нейросеть вместо того, чтобы обучать с нуля.
#
# Предлагается к примеру использовать предобученный слой nn.Embedings.
# + [markdown] id="6r3Z8879YLwR"
# ### Скачивание модели
# + id="DffXHIKINWcL"
# !dvc get https://github.com/andriygav/MachineLearningSeminars sem17/data/cc.en.10.bin
# + [markdown] id="e71iykPGVpAA"
# ### Загрузка fasttext модели
# + id="FVqMkJuVM8Bz"
ft = fasttext.load_model('cc.en.10.bin', )
# + [markdown] id="Mh2aqP_tWbVL"
# ### Генерация VEC формата
# + id="yY81-I4t_5zH"
word_to_ind = dict()
matrix_fasttext = []
for i, w in enumerate(tqdm(ft.get_words(on_unicode_error='replace'))):
v = ft.get_word_vector(w)
if w not in word_to_ind:
word_to_ind[w] = i
matrix_fasttext.append(v)
for w in ['[PAD]', '[UNK]', '[CLS]', '[SEP]']:
word_to_ind[w] = word_to_ind.__len__()
matrix_fasttext.append(np.zeros_like(matrix_fasttext[-1]))
# + [markdown] id="6RDgh3UHX2AJ"
# ### Получения векторизаваных данных
# + id="FQOF1Of3Az-q"
tokenizer = Tokenizer(word_to_ind, RegexpTokenizer('[a-zA-Z]+|[^\w\s]|\d+'))
# + id="mhNmyRyyBL3Q"
train_data_sent = tokenizer(dataset_train.values[:, 1])
test_data_sent = tokenizer(dataset_test.values[:, 1])
# + id="OZ1EjVHJBSbK"
dataset_train_pt = torch.utils.data.TensorDataset(
train_data_sent, torch.tensor(dataset_train.values[:, 0].tolist()).long())
dataset_test_pt = torch.utils.data.TensorDataset(
test_data_sent, torch.tensor(dataset_test.values[:, 0].tolist()).long())
# + [markdown] id="v4-Qn5N8X6J1"
# ### Инициализация моделей
# + id="prhhkqMJBUxB"
config = dict()
config['vocab_dim'] = len(word_to_ind)
config['output_dim'] = len(set(dataset.values[:, 0]))
config['emb_dim'] = 10
config['hidden_dim'] = 10
config['num_layers'] = 3
config['bidirectional'] = False
config['p'] = 0.7
model = RNNclassifier(**config)
_ = model.to(device)
# + [markdown] id="5IKVa-2rX9Yy"
# ### Использование VEC формата фастекста в модели
# + colab={"base_uri": "https://localhost:8080/"} id="3YJWF11MODdM" outputId="c8116d51-0606-4f1a-d9c1-a0c820804ff1"
model.embedding.weight.data.copy_(torch.tensor(matrix_fasttext))
for param in model.embedding.parameters():
param.requires_grad = False
model.to(device)
# + [markdown] id="6W09s_jzYDOt"
# ### Качество до обучения
# + colab={"base_uri": "https://localhost:8080/"} id="V2m5OZw0PCQW" outputId="392e9ac0-3a1a-4143-f302-e4c2652c546c"
batch_generator = torch.utils.data.DataLoader(dataset=dataset_test_pt,
batch_size=64,
pin_memory=True)
pred = []
real = []
model.eval()
for it, (x_batch, y_batch) in enumerate(batch_generator):
x_batch = x_batch.to(device)
with torch.no_grad():
output = model(x_batch)
pred.extend(torch.argmax(output, dim=-1).cpu().numpy().tolist())
real.extend(y_batch.cpu().numpy().tolist())
print(classification_report(real, pred))
# + [markdown] id="jy75sy87YG5T"
# ### Обучение модели
# + id="FOAEzf28PGPi"
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam
# + id="fQU2RL1iPKHJ"
trainer(count_of_epoch=5,
batch_size=64,
dataset=dataset_train_pt,
model=model,
loss_function=loss_function,
optimizer = optimizer,
lr=0.001,
callback=None)
# + [markdown] id="auihxGCSYInj"
# ### Качество после обучения
# + colab={"base_uri": "https://localhost:8080/"} id="lAgfECGPPL69" outputId="76a80298-05dc-42bf-f780-420788b97fc8"
batch_generator = torch.utils.data.DataLoader(dataset=dataset_test_pt,
batch_size=64,
pin_memory=True)
pred = []
real = []
model.eval()
for it, (x_batch, y_batch) in enumerate(batch_generator):
x_batch = x_batch.to(device)
with torch.no_grad():
output = model(x_batch)
pred.extend(torch.argmax(output, dim=-1).cpu().numpy().tolist())
real.extend(y_batch.cpu().numpy().tolist())
print(classification_report(real, pred))
# + [markdown] id="Iwy9w8hvM8kk"
# ## Полноценный fasttext
# + [markdown] id="cDh0lzmLYQ9h"
# ### Задание модели
# + id="MR_yn9r8S-Oa"
class RNNclassifierFastText(torch.nn.Module):
@property
def device(self):
return next(self.parameters()).device
def __init__(self, output_dim, emb_dim = 10, hidden_dim = 10,
num_layers = 3, bidirectional = False, p=0.7):
super(RNNclassifierFastText, self).__init__()
self.encoder = torch.nn.LSTM(emb_dim, hidden_dim, num_layers,
bidirectional=bidirectional,
batch_first=True, dropout=p)
self.linear = torch.nn.Linear(
2*num_layers*int(bidirectional + 1)*hidden_dim,
output_dim)
def forward(self, input):
_, (h, c) = self.encoder(input)
act = torch.cat([h, c], dim=0).transpose(0, 1)
act = act.reshape(len(input), -1)
return self.linear(act)
# + id="t_2EiCVbToOE"
class TokenizerFastText(object):
def __init__(self, ft, tokenizer):
self.ft = ft
self.tokenizer = tokenizer
def __call__(self, sentences, max_length = 10, pad_to_max_length = False):
tokens = self.tokenizer.tokenize_sents(sentences)
if not pad_to_max_length:
max_length = min(max_length, max(map(len, tokens)))
tokens = [['[CLS]']+s+['[SEP]'] + ['[PAD]']*(max_length-len(s)) \
if len(s) < max_length \
else ['[CLS]']+s[:max_length]+['[SEP]'] \
for s in tokens ]
vectors = [[self.ft.get_word_vector(w) for w in sent] for sent in tokens]
return torch.tensor(vectors)
# + [markdown] id="elHpby4zYWQF"
# ### Векторизация всех текстов
# + id="CNe-ohgeT9_F"
tokenizer = TokenizerFastText(ft, RegexpTokenizer('[a-zA-Z]+|[^\w\s]|\d+'))
# + id="GTJUTniUUTYn"
train_data_sent = tokenizer(dataset_train.values[:, 1])
test_data_sent = tokenizer(dataset_test.values[:, 1])
# + id="Ceb4X9v4UXAx"
dataset_train_pt = torch.utils.data.TensorDataset(
train_data_sent, torch.tensor(dataset_train.values[:, 0].tolist()).long())
dataset_test_pt = torch.utils.data.TensorDataset(
test_data_sent, torch.tensor(dataset_test.values[:, 0].tolist()).long())
# + [markdown] id="4tQzNm6KYZXL"
# ### Инициализация модели
# + id="vzEDavh-Udz_"
config = dict()
config['output_dim'] = len(set(dataset.values[:, 0]))
config['emb_dim'] = 10
config['hidden_dim'] = 10
config['num_layers'] = 3
config['bidirectional'] = False
config['p'] = 0.7
model = RNNclassifierFastText(**config)
_ = model.to(device)
# + [markdown] id="oIRIM8aMYbrB"
# ### Качество до обучения
# + colab={"base_uri": "https://localhost:8080/"} id="D3cewLHEUqne" outputId="6acf20dd-71ee-4446-f9d0-9dd69f5c2589"
batch_generator = torch.utils.data.DataLoader(dataset=dataset_test_pt,
batch_size=64,
pin_memory=True)
pred = []
real = []
model.eval()
for it, (x_batch, y_batch) in enumerate(batch_generator):
x_batch = x_batch.to(device)
with torch.no_grad():
output = model(x_batch)
pred.extend(torch.argmax(output, dim=-1).cpu().numpy().tolist())
real.extend(y_batch.cpu().numpy().tolist())
print(classification_report(real, pred))
# + [markdown] id="Gy0Pc5TZYdkI"
# ### Обучение модели
# + id="GJpHIhMHUt-e"
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam
# + id="DsqlypyWUwq0"
trainer(count_of_epoch=5,
batch_size=64,
dataset=dataset_train_pt,
model=model,
loss_function=loss_function,
optimizer = optimizer,
lr=0.001,
callback=None)
# + [markdown] id="eE6KLjyuYf8Y"
# ### Качество после обучения
# + colab={"base_uri": "https://localhost:8080/"} id="Qb7lvyJfUzJ4" outputId="5d493a81-2449-47fe-f4af-04367ae69d26"
batch_generator = torch.utils.data.DataLoader(dataset=dataset_test_pt,
batch_size=64,
pin_memory=True)
pred = []
real = []
model.eval()
for it, (x_batch, y_batch) in enumerate(batch_generator):
x_batch = x_batch.to(device)
with torch.no_grad():
output = model(x_batch)
pred.extend(torch.argmax(output, dim=-1).cpu().numpy().tolist())
real.extend(y_batch.cpu().numpy().tolist())
print(classification_report(real, pred))
# + [markdown] id="vkvEU2xBSz0Y"
# ### Репрезентация слов (к сожалению плохой пример вышел из-за reduce)
# + colab={"base_uri": "https://localhost:8080/"} id="E7fdtZ1DRe_S" outputId="0e0d9af6-f835-44d7-e977-2a73d2b36d50"
ft.get_analogies("ios", "google", "android", )
# + colab={"base_uri": "https://localhost:8080/"} id="OEmiyBQaQtGQ" outputId="de8de6a9-bd88-48b2-db0c-42ae6b97a86e"
ft.get_nearest_neighbors('king')
# + [markdown] id="WYzoIcpuNA-z"
# ## Приемы unsupervise обучения эмбедингов. На основе BERT.
# + [markdown] id="PVk_WeFfYrLJ"
# Основное приемущество векторного представления в том, что он обучается не зависимо от задачи.
#
# Для обучения представления используются вспомогательные задачи.
# -
# ### Предсказание токена на основе окрестности
# 
# ### Предсказание, что предложение следует за предыдущем
# 
# ### Другие задачи, которые можно дообучать на основе предобученых векторов
# #### Выбор варианта из списка альтернатив
# Примерный формат данных:
# * Premise: The man broke his toe. What was the CAUSE of this?
# * Alternative 1: He got a hole in his sock.
# * Alternative 2: He dropped a hammer on his foot.
# #### Recognizing Textual Entailment
# Примерный формат данных:
# * Premise: If you help the needy, God will reward you.
# * Hypothesis: Giving money to a poor man has good consequences.
# #### Word in Context
# Примерный формат данных:
# * Context 1: There's a lot of trash on the **bed** of the river.
# * Context 2: I keep a glass of water next to my **bed** when I sleep.
# #### Answer To Passage
# Примерный формат данных
# * Question: Is france the same timezone as the uk.
# * Hypothesis: At the Liberation of France in the summer of 1944, Metropolitan France kept GMT+2 as it was the time then used by the Allies (British Double Summer Time). In the winter of 1944--1945, Metropolitan France switched to GMT+1, same as in the United Kingdom, and switched again to GMT+2 in April 1945 like its British ally. In September 1945, Metropolitan France returned to GMT+1 (pre-war summer time), which the British had already done in July 1945. Metropolitan France was officially scheduled to return to GMT+0 on November 18, 1945 (the British returned to GMT+0 in on October 7, 1945), but the French government canceled the decision on November 5, 1945, and GMT+1 has since then remained the official time of Metropolitan France.
# #### Более подробно для русского и английского языка
# * [SuperGLUE](https://super.gluebenchmark.com)
# * [Russian SuperGLUE](https://russiansuperglue.com)
| sem17/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## ```NoteBook Focus```
# ---
# 1. Train numerous models to select best models for hypertuning.
# 2. Build a model on unbalanced data and compare it to a model with balanced data. I want to make sure the models are performing they way they should.
#
# a. Unbalanced classes model should misclassify the minority class frequently.
#
# b. Balanced classes model should have great score across the board.
# ## ```Imports```
# ---
# +
import pandas as pd
from classifiers_copy import classify
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# -
# ## ```Creating new dataframe with balanced classes```
# ---
drugs = pd.read_csv('../Claire/data/drugs_2020_simply_imputed.csv')
drugs.head()
# +
drugs['prisdum'].value_counts(normalize=True)
# 0 = no prison time
# 1 = prison time
# -
# checking for missing values Claire might've missed before modeling
drugs.isnull().sum()
# +
# concatting new df with equal classes for modeling
# separate all minor classes
df_0 = drugs[drugs['prisdum']==0]
print(df_0.shape)
# separate all majority class and sample 754 to match minor class
df_1 = drugs[drugs['prisdum']==1]
df_1_sample = df_1.sample(n=754, replace=False)
print(df_1_sample.shape)
# concat both df's
equal_class_df = pd.concat([df_0,df_1_sample], ignore_index=True)
print(equal_class_df.shape)
# -
# ## ```Modeling```
# ---
# +
# model with balanced classes
#set up X/y
X = equal_class_df.drop(columns='prisdum')
y = equal_class_df['prisdum']
# set train/test split
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=42, train_size=0.7, stratify=y)
# scale data
ss = StandardScaler()
X_train_ss = ss.fit_transform(X_train)
X_test_ss = ss.fit_transform(X_test)
# train multiple models
equal_class_scores = classify(X_train_ss,X_test_ss,y_train,y_test)
# -
equal_class_scores
# +
# import model
from sklearn.linear_model import LogisticRegression
# instantiate, train, evaluate
logreg = LogisticRegression()
logreg.fit(X_train_ss,y_train)
print(f"train acc: {logreg.score(X_train_ss,y_train)}")
print(f"test acc: {logreg.score(X_test_ss,y_test)}")
# create df to visualize coefs
logreg_scores = pd.DataFrame(columns=X.columns,data=logreg.coef_).T
# -
logreg_scores[logreg_scores[0]>0].sort_values(by=0,ascending=False).head(10)
# +
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
# set up confusion matrix
cm = confusion_matrix(y_test,logreg.predict(X_test_ss))
# plot matrix
plot_confusion_matrix(logreg, X_test_ss, y_test, cmap='BuGn', display_labels=['no prison time', 'prison time']);
# -
# **These results are great for a model that has balanced classess. We would never want to tell the person/offender the incorrect probability of getting a prison sentence.**
# +
# model with UN_balanced classes
#set up X/y
X = drugs.drop(columns='prisdum')
y = drugs['prisdum']
# set train/test split
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=42, train_size=0.7, stratify=y)
# scale data
ss = StandardScaler()
X_train_ss = ss.fit_transform(X_train)
X_test_ss = ss.fit_transform(X_test)
# train multiple models
unbalanced_class_scores = classify(X_train_ss,X_test_ss,y_train,y_test)
# -
unbalanced_class_scores
# +
# import model
from sklearn.linear_model import LogisticRegression
# instantiate, train, evaluate
logreg = LogisticRegression()
logreg.fit(X_train_ss,y_train)
print(f"train acc: {logreg.score(X_train_ss,y_train)}")
print(f"test acc: {logreg.score(X_test_ss,y_test)}")
# create df to visualize coefs
logreg_scores = pd.DataFrame(columns=X.columns,data=logreg.coef_).T
# +
# set up confusion matrix
cm = confusion_matrix(y_test,logreg.predict(X_test_ss))
# plot matrix
plot_confusion_matrix(logreg, X_test_ss, y_test, cmap='BuGn', display_labels=['no prison time', 'prison time']);
# -
# **One of the other features is giving it away because the model should be misclassifying the minor class since the classes are very unbalanced. More EDA will be done to identify these features. The cm_plot basically states the model is predicting the minority class very well.**
# ## ```More Modeling```
# ---
# 1. After some eda was done on the dataset we discovered that the feature 'suprdum' is 0.74 correlated to the target variable 'prisdum'. Modeling excluding this feature will be done to see how the model performs. We're expecting the models to be worst without this feature.
# +
# model with UN_balanced classes and removal of highly correlated features
#set up X/y
X = drugs.drop(columns=['prisdum','suprdum'])
y = drugs['prisdum']
# set train/test split
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=42, train_size=0.7, stratify=y)
# scale data
ss = StandardScaler()
X_train_ss = ss.fit_transform(X_train)
X_test_ss = ss.fit_transform(X_test)
# train multiple models
unbalanced_class_scores_1 = classify(X_train_ss,X_test_ss,y_train,y_test)
# -
unbalanced_class_scores_1
# +
# import model
from sklearn.linear_model import LogisticRegression
# instantiate, train, evaluate
logreg = LogisticRegression()
logreg.fit(X_train_ss,y_train)
print(f"train acc: {logreg.score(X_train_ss,y_train)}")
print(f"test acc: {logreg.score(X_test_ss,y_test)}")
# create df to visualize coefs
logreg_scores = pd.DataFrame(columns=X.columns,data=logreg.coef_).T
# +
# set up confusion matrix
cm = confusion_matrix(y_test,logreg.predict(X_test_ss))
# plot matrix
plot_confusion_matrix(logreg, X_test_ss, y_test, cmap='BuGn', display_labels=['no prison time', 'prison time']);
# -
# **I've removed highly correlated features and the mdoel continues to predict very well despite classes being unbalanced. Further EDA needs to be done to check what's going on. Again, we're expecting the minority class to be misclassified at a high rate.**
# ## ```Notebook Conclusion```
# ---
# 1. The models trained with balanced classes performed as expected. Great score across the board.
#
# 2. The models trained with UNbalanced classes also performed well, which is not what we were expected. Further EDA needs to be done to find out why this model is classifying so well. Continue to the next modeling notebook -- **modeling_1**.
| code/03_Modeling_logreg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial on Python for scientific computing
#
# > <NAME>
#
# > <NAME>
#
# > Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/))
# > Federal University of ABC, Brazil
# This will be a very brief tutorial on Python.
# For a complete (and much better) tutorial about Python see [A Whirlwind Tour of Python](https://github.com/jakevdp/WhirlwindTourOfPython) and [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) for a specific tutorial about Python for scientific computing.
#
# To use Python for scientific computing we need the Python program itself with its main modules and specific packages for scientific computing. [See this notebook on how to install Python for scientific computing](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/PythonInstallation.ipynb).
# Once you get Python and the necessary packages for scientific computing ready to work, there are different ways to run Python, the main ones are:
#
# - open a terminal window in your computer and type `python` or `ipython` that the Python interpreter will start
# - run the `Jupyter notebook` and start working with Python in a browser
# - run `Spyder`, an interactive development environment (IDE)
# - run the `Jupyter qtconsole`, a more featured terminal
# - run Python online in a website such as [https://www.pythonanywhere.com/](https://www.pythonanywhere.com/) or [Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb)
# - run Python using any other Python editor or IDE
#
# We will use the Jupyter Notebook for this tutorial but you can run almost all the things we will see here using the other forms listed above.
# ## Python as a calculator
#
# Once in the Jupyter notebook, if you type a simple mathematical expression and press `Shift+Enter` it will give the result of the expression:
1 + 2 - 25
4/7
# Using the `print` function, let's explore the mathematical operations available in Python:
# + sl_translate="sl_none"
print('1+2 = ', 1+2, '\n', '4*5 = ', 4*5, '\n', '6/7 = ', 6/7, '\n', '8**2 = ', 8**2, sep='')
# -
# And if we want the square-root of a number:
sqrt(9)
# We get an error message saying that the `sqrt` function if not defined. This is because `sqrt` and other mathematical functions are available with the `math` module:
import math
math.sqrt(9)
from math import sqrt
sqrt(9)
# ## The import function
#
# We used the command '`import`' to be able to call certain functions. In Python functions are organized in modules and packages and they have to be imported in order to be used.
#
# A module is a file containing Python definitions (e.g., functions) and statements. Packages are a way of structuring Python’s module namespace by using “dotted module names”. For example, the module name A.B designates a submodule named B in a package named A. To be used, modules and packages have to be imported in Python with the import function.
#
# Namespace is a container for a set of identifiers (names), and allows the disambiguation of homonym identifiers residing in different namespaces. For example, with the command import math, we will have all the functions and statements defined in this module in the namespace '`math.`', for example, '`math.pi`' is the π constant and '`math.cos()`', the cosine function.
# By the way, to know which Python version you are running, we can use one of the following modules:
import sys
sys.version
# And if you are in an IPython session:
from IPython import sys_info
print(sys_info())
# The first option gives information about the Python version; the latter also includes the IPython version, operating system, etc.
# ## Object-oriented programming
#
# Python is designed as an object-oriented programming (OOP) language. OOP is a paradigm that represents concepts as "objects" that have data fields (attributes that describe the object) and associated procedures known as methods.
#
# This means that all elements in Python are objects and they have attributes which can be acessed with the dot (.) operator after the name of the object. We already experimented with that when we imported the module `sys`, it became an object, and we acessed one of its attribute: `sys.version`.
#
# OOP as a paradigm is much more than defining objects, attributes, and methods, but for now this is enough to get going with Python.
# ## Python and IPython help
#
# To get help about any Python command, use `help()`:
help(math.degrees)
# Or if you are in the IPython environment, simply add '?' to the function that a window will open at the bottom of your browser with the same help content:
# +
# math.degrees?
# -
# And if you add a second '?' to the statement you get access to the original script file of the function (an advantage of an open source language), unless that function is a built-in function that does not have a script file, which is the case of the standard modules in Python (but you can access the Python source code if you want; it just does not come with the standard program for installation).
#
# So, let's see this feature with another function:
# +
import scipy.fftpack
# scipy.fftpack.fft??
# -
# To know all the attributes of an object, for example all the functions available in `math`, we can use the function `dir`:
print(dir(math))
# ### Tab completion in IPython
#
# IPython has tab completion: start typing the name of the command (object) and press `tab` to see the names of objects available with these initials letters. When the name of the object is typed followed by a dot (`math.`), pressing `tab` will show all available attribites, scroll down to the desired attribute and press `Enter` to select it.
# ### The four most helpful commands in IPython
#
# These are the most helpful commands in IPython (from [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html)):
#
# - `?` : Introduction and overview of IPython’s features.
# - `%quickref` : Quick reference.
# - `help` : Python’s own help system.
# - `object?` : Details about ‘object’, use ‘object??’ for extra details.
# ### Comments
#
# Comments in Python start with the hash character, #, and extend to the end of the physical line:
# Import the math library to access more math stuff
import math
math.pi # this is the pi constant; a useless comment since this is obvious
# To insert comments spanning more than one line, use a multi-line string with a pair of matching triple-quotes: `"""` or `'''` (we will see the string data type later). A typical use of a multi-line comment is as documentation strings and are meant for anyone reading the code:
"""Documentation strings are typically written like that.
A docstring is a string literal that occurs as the first statement
in a module, function, class, or method definition.
"""
# A docstring like above is useless and its output as a standalone statement looks uggly in IPython Notebook, but you will see its real importance when reading and writting codes.
#
# Commenting a programming code is an important step to make the code more readable, which Python cares a lot.
# There is a style guide for writting Python code ([PEP 8](https://www.python.org/dev/peps/pep-0008/)) with a session about [how to write comments](https://www.python.org/dev/peps/pep-0008/#comments).
# ### Magic functions
#
# IPython has a set of predefined ‘magic functions’ that you can call with a command line style syntax.
# There are two kinds of magics, line-oriented and cell-oriented.
# Line magics are prefixed with the % character and work much like OS command-line calls: they get as an argument the rest of the line, where arguments are passed without parentheses or quotes.
# Cell magics are prefixed with a double %%, and they are functions that get as an argument not only the rest of the line, but also the lines below it in a separate argument.
# ## Assignment and expressions
#
# The equal sign ('=') is used to assign a value to a variable. Afterwards, no result is displayed before the next interactive prompt:
x = 1
# Spaces between the statements are optional but it helps for readability.
#
# To see the value of the variable, call it again or use the print function:
x
print(x)
# Of course, the last assignment is that holds:
x = 2
x = 3
x
# In mathematics '=' is the symbol for identity, but in computer programming '=' is used for assignment, it means that the right part of the expresssion is assigned to its left part.
# For example, 'x=x+1' does not make sense in mathematics but it does in computer programming:
x = x + 1
print(x)
# A value can be assigned to several variables simultaneously:
x = y = 4
print(x)
print(y)
# Several values can be assigned to several variables at once:
x, y = 5, 6
print(x)
print(y)
# And with that, you can do (!):
x, y = y, x
print(x)
print(y)
# Variables must be “defined” (assigned a value) before they can be used, or an error will occur:
x = z
# ## Variables and types
#
# There are different types of built-in objects in Python (and remember that everything in Python is an object):
import types
print(dir(types))
# Let's see some of them now.
# ### Numbers: int, float, complex
#
# Numbers can an integer (int), float, and complex (with imaginary part).
# Let's use the function `type` to show the type of number (and later for any other object):
type(6)
# A float is a non-integer number:
math.pi
type(math.pi)
# Python (IPython) is showing `math.pi` with only 15 decimal cases, but internally a float is represented with higher precision.
# Floating point numbers in Python are implemented using a double (eight bytes) word; the precison and internal representation of floating point numbers are machine specific and are available in:
sys.float_info
# Be aware that floating-point numbers can be trick in computers:
0.1 + 0.2
0.1 + 0.2 - 0.3
# These results are not correct (and the problem is not due to Python). The error arises from the fact that floating-point numbers are represented in computer hardware as base 2 (binary) fractions and most decimal fractions cannot be represented exactly as binary fractions. As consequence, decimal floating-point numbers are only approximated by the binary floating-point numbers actually stored in the machine. [See here for more on this issue](http://docs.python.org/2/tutorial/floatingpoint.html).
# A complex number has real and imaginary parts:
1+2j
print(type(1+2j))
# Each part of a complex number is represented as a floating-point number. We can see them using the attributes `.real` and `.imag`:
print((1+2j).real)
print((1+2j).imag)
# ### Strings
#
# Strings can be enclosed in single quotes or double quotes:
s = 'string (str) is a built-in type in Python'
s
type(s)
# String enclosed with single and double quotes are equal, but it may be easier to use one instead of the other:
'string (str) is a Python's built-in type'
"string (str) is a Python's built-in type"
# But you could have done that using the Python escape character '\':
'string (str) is a Python\'s built-in type'
# Strings can be concatenated (glued together) with the + operator, and repeated with *:
s = 'P' + 'y' + 't' + 'h' + 'o' + 'n'
print(s)
print(s*5)
# Strings can be subscripted (indexed); like in C, the first character of a string has subscript (index) 0:
print('s[0] = ', s[0], ' (s[index], start at 0)')
print('s[5] = ', s[5])
print('s[-1] = ', s[-1], ' (last element)')
print('s[:] = ', s[:], ' (all elements)')
print('s[1:] = ', s[1:], ' (from this index (inclusive) till the last (inclusive))')
print('s[2:4] = ', s[2:4], ' (from first index (inclusive) till second index (exclusive))')
print('s[:2] = ', s[:2], ' (till this index, exclusive)')
print('s[:10] = ', s[:10], ' (Python handles the index if it is larger than the string length)')
print('s[-10:] = ', s[-10:])
print('s[0:5:2] = ', s[0:5:2], ' (s[ini:end:step])')
print('s[::2] = ', s[::2], ' (s[::step], initial and final indexes can be omitted)')
print('s[0:5:-1] = ', s[::-1], ' (s[::-step] reverses the string)')
print('s[:2] + s[2:] = ', s[:2] + s[2:], ' (because of Python indexing, this sounds natural)')
# ### len()
#
# Python has a built-in functon to get the number of itens of a sequence:
help(len)
s = 'Python'
len(s)
# The function len() helps to understand how the backward indexing works in Python.
# The index s[-i] should be understood as s[len(s) - i] rather than accessing directly the i-th element from back to front. This is why the last element of a string is s[-1]:
print('s = ', s)
print('len(s) = ', len(s))
print('len(s)-1 = ',len(s) - 1)
print('s[-1] = ', s[-1])
print('s[len(s) - 1] = ', s[len(s) - 1])
# Or, strings can be surrounded in a pair of matching triple-quotes: """ or '''. End of lines do not need to be escaped when using triple-quotes, but they will be included in the string. This is how we created a multi-line comment earlier:
"""Strings can be surrounded in a pair of matching triple-quotes: \""" or '''.
End of lines do not need to be escaped when using triple-quotes,
but they will be included in the string.
"""
# ### Lists
#
# Values can be grouped together using different types, one of them is list, which can be written as a list of comma-separated values between square brackets. List items need not all have the same type:
x = ['spam', 'eggs', 100, 1234]
x
# Lists can be indexed and the same indexing rules we saw for strings are applied:
x[0]
# The function len() works for lists:
len(x)
# ### Tuples
#
# A tuple consists of a number of values separated by commas, for instance:
t = ('spam', 'eggs', 100, 1234)
t
# The type tuple is why multiple assignments in a single line works; elements separated by commas (with or without surrounding parentheses) are a tuple and in an expression with an '=', the right-side tuple is attributed to the left-side tuple:
a, b = 1, 2
print('a = ', a, '\nb = ', b)
# Is the same as:
(a, b) = (1, 2)
print('a = ', a, '\nb = ', b)
# ### Sets
#
# Python also includes a data type for sets. A set is an unordered collection with no duplicate elements.
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
fruit = set(basket) # create a set without duplicates
fruit
# As set is an unordered collection, it can not be indexed as lists and tuples.
set(['orange', 'pear', 'apple', 'banana'])
'orange' in fruit # fast membership testing
# ### Dictionaries
#
# Dictionary is a collection of elements organized keys and values. Unlike lists and tuples, which are indexed by a range of numbers, dictionaries are indexed by their keys:
tel = {'jack': 4098, 'sape': 4139}
tel
tel['guido'] = 4127
tel
tel['jack']
del tel['sape']
tel['irv'] = 4127
tel
tel.keys()
'guido' in tel
# The dict() constructor builds dictionaries directly from sequences of key-value pairs:
tel = dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
tel
# ## Built-in Constants
#
# - **False** : false value of the bool type
# - **True** : true value of the bool type
# - **None** : sole value of types.NoneType. None is frequently used to represent the absence of a value.
# In computer science, the Boolean or logical data type is composed by two values, true and false, intended to represent the values of logic and Boolean algebra. In Python, 1 and 0 can also be used in most situations as equivalent to the Boolean values.
# ## Logical (Boolean) operators
# ### and, or, not
# - **and** : logical AND operator. If both the operands are true then condition becomes true. (a and b) is true.
# - **or** : logical OR Operator. If any of the two operands are non zero then condition becomes true. (a or b) is true.
# - **not** : logical NOT Operator. Reverses the logical state of its operand. If a condition is true then logical NOT operator will make false.
# ### Comparisons
#
# The following comparison operations are supported by objects in Python:
#
# - **==** : equal
# - **!=** : not equal
# - **<** : strictly less than
# - **<=** : less than or equal
# - **\>** : strictly greater than
# - **\>=** : greater than or equal
# - **is** : object identity
# - **is not** : negated object identity
True == False
not True == False
1 < 2 > 1
True != (False or True)
True != False or True
# ## Indentation and whitespace
#
# In Python, statement grouping is done by indentation (this is mandatory), which are done by inserting whitespaces, not tabs. Indentation is also recommended for alignment of function calling that span more than one line for better clarity.
# We will see examples of indentation in the next session.
# ## Control of flow
#
# ### `if`...`elif`...`else`
#
# Conditional statements (to peform something if another thing is True or False) can be implemmented using the `if` statement:
# ```
# if expression:
# statement
# elif:
# statement
# else:
# statement
# ```
# `elif` (one or more) and `else` are optionals.
# The indentation is obligatory.
# For example:
if True:
pass
# Which does nothing useful.
#
# Let's use the `if`...`elif`...`else` statements to categorize the [body mass index](http://en.wikipedia.org/wiki/Body_mass_index) of a person:
# body mass index
weight = 100 # kg
height = 1.70 # m
bmi = weight / height**2
# +
if bmi < 15:
c = 'very severely underweight'
elif 15 <= bmi < 16:
c = 'severely underweight'
elif 16 <= bmi < 18.5:
c = 'underweight'
elif 18.5 <= bmi < 25:
c = 'normal'
elif 25 <= bmi < 30:
c = 'overweight'
elif 30 <= bmi < 35:
c = 'moderately obese'
elif 35 <= bmi < 40:
c = 'severely obese'
else:
c = 'very severely obese'
print('For a weight of {0:.1f} kg and a height of {1:.2f} m,\n\
the body mass index (bmi) is {2:.1f} kg/m2,\nwhich is considered {3:s}.'\
.format(weight, height, bmi, c))
# -
# ### for
#
# The `for` statement iterates over a sequence to perform operations (a loop event).
# ```
# for iterating_var in sequence:
# statements
# ```
for i in [3, 2, 1, 'go!']:
print(i, end=', ')
for letter in 'Python':
print(letter),
# #### The `range()` function
#
# The built-in function range() is useful if we need to create a sequence of numbers, for example, to iterate over this list. It generates lists containing arithmetic progressions:
help(range)
range(10)
range(1, 10, 2)
for i in range(10):
n2 = i**2
print(n2),
# ### while
#
# The `while` statement is used for repeating sections of code in a loop until a condition is met (this different than the `for` statement which executes n times):
# ```
# while expression:
# statement
# ```
# Let's generate the Fibonacci series using a `while` loop:
# Fibonacci series: the sum of two elements defines the next
a, b = 0, 1
while b < 1000:
print(b, end=' ')
a, b = b, a+b
# ## Function definition
#
# A function in a programming language is a piece of code that performs a specific task. Functions are used to reduce duplication of code making easier to reuse it and to decompose complex problems into simpler parts. The use of functions contribute to the clarity of the code.
#
# A function is created with the `def` keyword and the statements in the block of the function must be indented:
def function():
pass
# As per construction, this function does nothing when called:
function()
# The general syntax of a function definition is:
# ```
# def function_name( parameters ):
# """Function docstring.
#
# The help for the function
#
# """
#
# function body
#
# return variables
# ```
# A more useful function:
def fibo(N):
"""Fibonacci series: the sum of two elements defines the next.
The series is calculated till the input parameter N and
returned as an ouput variable.
"""
a, b, c = 0, 1, []
while b < N:
c.append(b)
a, b = b, a + b
return c
fibo(100)
if 3 > 2:
print('teste')
# Let's implemment the body mass index calculus and categorization as a function:
def bmi(weight, height):
"""Body mass index calculus and categorization.
Enter the weight in kg and the height in m.
See http://en.wikipedia.org/wiki/Body_mass_index
"""
bmi = weight / height**2
if bmi < 15:
c = 'very severely underweight'
elif 15 <= bmi < 16:
c = 'severely underweight'
elif 16 <= bmi < 18.5:
c = 'underweight'
elif 18.5 <= bmi < 25:
c = 'normal'
elif 25 <= bmi < 30:
c = 'overweight'
elif 30 <= bmi < 35:
c = 'moderately obese'
elif 35 <= bmi < 40:
c = 'severely obese'
else:
c = 'very severely obese'
s = 'For a weight of {0:.1f} kg and a height of {1:.2f} m,\
the body mass index (bmi) is {2:.1f} kg/m2,\
which is considered {3:s}.'\
.format(weight, height, bmi, c)
print(s)
bmi(73, 1.70)
# ## Numeric data manipulation with Numpy
#
# Numpy is the fundamental package for scientific computing in Python and has a N-dimensional array package convenient to work with numerical data. With Numpy it's much easier and faster to work with numbers grouped as 1-D arrays (a vector), 2-D arrays (like a table or matrix), or higher dimensions. Let's create 1-D and 2-D arrays in Numpy:
# + run_control={"breakpoint": false}
import numpy as np
# + run_control={"breakpoint": false}
x1d = np.array([1, 2, 3, 4, 5, 6])
print(type(x1d))
x1d
# + run_control={"breakpoint": false}
x2d = np.array([[1, 2, 3], [4, 5, 6]])
x2d
# -
# len() and the Numpy functions size() and shape() give information aboout the number of elements and the structure of the Numpy array:
# + run_control={"breakpoint": false}
print('1-d array:')
print(x1d)
print('len(x1d) = ', len(x1d))
print('np.size(x1d) = ', np.size(x1d))
print('np.shape(x1d) = ', np.shape(x1d))
print('np.ndim(x1d) = ', np.ndim(x1d))
print('\n2-d array:')
print(x2d)
print('len(x2d) = ', len(x2d))
print('np.size(x2d) = ', np.size(x2d))
print('np.shape(x2d) = ', np.shape(x2d))
print('np.ndim(x2d) = ', np.ndim(x2d))
# -
# Create random data
# + run_control={"breakpoint": false}
x = np.random.randn(4,3)
x
# -
# Joining (stacking together) arrays
# + run_control={"breakpoint": false}
x = np.random.randint(0, 5, size=(2, 3))
print(x)
y = np.random.randint(5, 10, size=(2, 3))
print(y)
# + run_control={"breakpoint": false}
np.vstack((x,y))
# + run_control={"breakpoint": false}
np.hstack((x,y))
# -
# Create equally spaced data
# + run_control={"breakpoint": false}
np.arange(start = 1, stop = 10, step = 2)
# + run_control={"breakpoint": false}
np.linspace(start = 0, stop = 1, num = 11)
# -
# ### Interpolation
#
# Consider the following data:
# + run_control={"breakpoint": false}
y = [5, 4, 10, 8, 1, 10, 2, 7, 1, 3]
# -
# Suppose we want to create data in between the given data points (interpolation); for instance, let's try to double the resolution of the data by generating twice as many data:
# + run_control={"breakpoint": false}
t = np.linspace(0, len(y), len(y)) # time vector for the original data
tn = np.linspace(0, len(y), 2 * len(y)) # new time vector for the new time-normalized data
yn = np.interp(tn, t, y) # new time-normalized data
yn
# -
# The key is the Numpy `interp` function, from its help:
#
# interp(x, xp, fp, left=None, right=None)
# One-dimensional linear interpolation.
# Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points.
#
# A plot of the data will show what we have done:
# + run_control={"breakpoint": false}
# %matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(10,5))
plt.plot(t, y, 'bo-', lw=2, label='original data')
plt.plot(tn, yn, '.-', color=[1, 0, 0, .5], lw=2, label='interpolated')
plt.legend(loc='best', framealpha=.5)
plt.show()
# -
# For more about Numpy, see [http://www.numpy.org/](http://www.numpy.org/).
# ## Read and save files
#
# There are two kinds of computer files: text files and binary files:
# > Text file: computer file where the content is structured as a sequence of lines of electronic text. Text files can contain plain text (letters, numbers, and symbols) but they are not limited to such. The type of content in the text file is defined by the Unicode encoding (a computing industry standard for the consistent encoding, representation and handling of text expressed in most of the world's writing systems).
# >
# > Binary file: computer file where the content is encoded in binary form, a sequence of integers representing byte values.
#
# Let's see how to save and read numeric data stored in a text file:
#
# **Using plain Python**
# + run_control={"breakpoint": false}
f = open("newfile.txt", "w") # open file for writing
f.write("This is a test\n") # save to file
f.write("And here is another line\n") # save to file
f.close()
f = open('newfile.txt', 'r') # open file for reading
f = f.read() # read from file
print(f)
# + run_control={"breakpoint": false}
help(open)
# -
# **Using Numpy**
# + run_control={"breakpoint": false}
import numpy as np
data = np.random.randn(3,3)
np.savetxt('myfile.txt', data, fmt="%12.6G") # save to file
data = np.genfromtxt('myfile.txt', unpack=True) # read from file
data
# -
# ## Ploting with matplotlib
#
# Matplotlib is the most-widely used packge for plotting data in Python. Let's see some examples of it.
# + run_control={"breakpoint": false}
import matplotlib.pyplot as plt
# -
# Use the IPython magic `%matplotlib inline` to plot a figure inline in the notebook with the rest of the text:
# + run_control={"breakpoint": false}
# %matplotlib inline
# -
import numpy as np
# + run_control={"breakpoint": false}
t = np.linspace(0, 0.99, 100)
x = np.sin(2 * np.pi * 2 * t)
n = np.random.randn(100) / 5
plt.Figure(figsize=(12,8))
plt.plot(t, x, label='sine', linewidth=2)
plt.plot(t, x + n, label='noisy sine', linewidth=2)
plt.annotate(s='$sin(4 \pi t)$', xy=(.2, 1), fontsize=20, color=[0, 0, 1])
plt.legend(loc='best', framealpha=.5)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Data plotting using matplotlib')
plt.show()
# -
# Use the IPython magic `%matplotlib qt` to plot a figure in a separate window (from where you will be able to change some of the figure proprerties):
# + run_control={"breakpoint": false}
# %matplotlib qt
# + run_control={"breakpoint": false}
mu, sigma = 10, 2
x = mu + sigma * np.random.randn(1000)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
ax1.plot(x, 'ro')
ax1.set_title('Data')
ax1.grid()
n, bins, patches = ax2.hist(x, 25, normed=True, facecolor='r') # histogram
ax2.set_xlabel('Bins')
ax2.set_ylabel('Probability')
ax2.set_title('Histogram')
fig.suptitle('Another example using matplotlib', fontsize=18, y=1)
ax2.grid()
plt.tight_layout()
plt.show()
# -
# And a window with the following figure should appear:
# + run_control={"breakpoint": false}
from IPython.display import Image
Image(url="./../images/plot.png")
# -
# You can switch back and forth between inline and separate figure using the `%matplotlib` magic commands used above. There are plenty more examples with the source code in the [matplotlib gallery](http://matplotlib.org/gallery.html).
# + run_control={"breakpoint": false}
# get back the inline plot
# %matplotlib inline
# -
# ## Signal processing with Scipy
#
# The Scipy package has a lot of functions for signal processing, among them: Integration (scipy.integrate), Optimization (scipy.optimize), Interpolation (scipy.interpolate), Fourier Transforms (scipy.fftpack), Signal Processing (scipy.signal), Linear Algebra (scipy.linalg), and Statistics (scipy.stats). As an example, let's see how to use a low-pass Butterworth filter to attenuate high-frequency noise and how the differentiation process of a signal affects the signal-to-noise content. We will also calculate the Fourier transform of these data to look at their frequencies content.
# + run_control={"breakpoint": false}
from scipy.signal import butter, filtfilt
import scipy.fftpack
freq = 100.
t = np.arange(0,1,.01);
w = 2*np.pi*1 # 1 Hz
y = np.sin(w*t)+0.1*np.sin(10*w*t)
# Butterworth filter
b, a = butter(4, (5/(freq/2)), btype = 'low')
y2 = filtfilt(b, a, y)
# 2nd derivative of the data
ydd = np.diff(y,2)*freq*freq # raw data
y2dd = np.diff(y2,2)*freq*freq # filtered data
# frequency content
yfft = np.abs(scipy.fftpack.fft(y))/(y.size/2); # raw data
y2fft = np.abs(scipy.fftpack.fft(y2))/(y.size/2); # filtered data
freqs = scipy.fftpack.fftfreq(y.size, 1./freq)
yddfft = np.abs(scipy.fftpack.fft(ydd))/(ydd.size/2);
y2ddfft = np.abs(scipy.fftpack.fft(y2dd))/(ydd.size/2);
freqs2 = scipy.fftpack.fftfreq(ydd.size, 1./freq)
# -
# And the plots:
# + run_control={"breakpoint": false}
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2, 2, figsize=(12, 6))
ax1.set_title('Temporal domain', fontsize=14)
ax1.plot(t, y, 'r', linewidth=2, label = 'raw data')
ax1.plot(t, y2, 'b', linewidth=2, label = 'filtered @ 5 Hz')
ax1.set_ylabel('f')
ax1.legend(frameon=False, fontsize=12)
ax2.set_title('Frequency domain', fontsize=14)
ax2.plot(freqs[:int(yfft.size/4)], yfft[:int(yfft.size/4)],'r', lw=2,label='raw data')
ax2.plot(freqs[:int(yfft.size/4)],y2fft[:int(yfft.size/4)],'b--',lw=2,label='filtered @ 5 Hz')
ax2.set_ylabel('FFT(f)')
ax2.legend(frameon=False, fontsize=12)
ax3.plot(t[:-2], ydd, 'r', linewidth=2, label = 'raw')
ax3.plot(t[:-2], y2dd, 'b', linewidth=2, label = 'filtered @ 5 Hz')
ax3.set_xlabel('Time [s]'); ax3.set_ylabel("f ''")
ax4.plot(freqs[:int(yddfft.size/4)], yddfft[:int(yddfft.size/4)], 'r', lw=2, label = 'raw')
ax4.plot(freqs[:int(yddfft.size/4)],y2ddfft[:int(yddfft.size/4)],'b--',lw=2, label='filtered @ 5 Hz')
ax4.set_xlabel('Frequency [Hz]'); ax4.set_ylabel("FFT(f '')")
plt.show()
# -
# For more about Scipy, see [https://docs.scipy.org/doc/scipy/reference/tutorial/](https://docs.scipy.org/doc/scipy/reference/tutorial/).
# ## Symbolic mathematics with Sympy
#
# Sympy is a package to perform symbolic mathematics in Python. Let's see some of its features:
# + run_control={"breakpoint": false}
from IPython.display import display
import sympy as sym
from sympy.interactive import printing
printing.init_printing()
# -
# Define some symbols and the create a second-order polynomial function (a.k.a., parabola):
# + run_control={"breakpoint": false}
x, y = sym.symbols('x y')
y = x**2 - 2*x - 3
y
# -
# Plot the parabola at some given range:
# + run_control={"breakpoint": false}
from sympy.plotting import plot
# %matplotlib inline
plot(y, (x, -3, 5));
# -
# And the roots of the parabola are given by:
# + run_control={"breakpoint": false}
sym.solve(y, x)
# -
# We can also do symbolic differentiation and integration:
# + run_control={"breakpoint": false}
dy = sym.diff(y, x)
dy
# + run_control={"breakpoint": false}
sym.integrate(dy, x)
# -
# For example, let's use Sympy to represent three-dimensional rotations. Consider the problem of a coordinate system xyz rotated in relation to other coordinate system XYZ. The single rotations around each axis are illustrated by:
# + run_control={"breakpoint": false}
from IPython.display import Image
Image(url="./../images/rotations.png")
# -
# The single 3D rotation matrices around Z, Y, and X axes can be expressed in Sympy:
# + run_control={"breakpoint": false}
from IPython.core.display import Math
from sympy import symbols, cos, sin, Matrix, latex
a, b, g = symbols('alpha beta gamma')
RX = Matrix([[1, 0, 0], [0, cos(a), -sin(a)], [0, sin(a), cos(a)]])
display(Math(latex('\\mathbf{R_{X}}=') + latex(RX, mat_str = 'matrix')))
RY = Matrix([[cos(b), 0, sin(b)], [0, 1, 0], [-sin(b), 0, cos(b)]])
display(Math(latex('\\mathbf{R_{Y}}=') + latex(RY, mat_str = 'matrix')))
RZ = Matrix([[cos(g), -sin(g), 0], [sin(g), cos(g), 0], [0, 0, 1]])
display(Math(latex('\\mathbf{R_{Z}}=') + latex(RZ, mat_str = 'matrix')))
# -
# And using Sympy, a sequence of elementary rotations around X, Y, Z axes is given by:
# + run_control={"breakpoint": false}
RXYZ = RZ*RY*RX
display(Math(latex('\\mathbf{R_{XYZ}}=') + latex(RXYZ, mat_str = 'matrix')))
# -
# Suppose there is a rotation only around X ($\alpha$) by $\pi/2$; we can get the numerical value of the rotation matrix by substituing the angle values:
# + run_control={"breakpoint": false}
r = RXYZ.subs({a: np.pi/2, b: 0, g: 0})
r
# -
# And we can prettify this result:
# + run_control={"breakpoint": false}
display(Math(latex(r'\mathbf{R_{(\alpha=\pi/2)}}=') +
latex(r.n(chop=True, prec=3), mat_str = 'matrix')))
# -
# For more about Sympy, see [http://docs.sympy.org/latest/tutorial/](http://docs.sympy.org/latest/tutorial/).
# ## Data analysis with pandas
#
# > "[pandas](http://pandas.pydata.org/) is a Python package providing fast, flexible, and expressive data structures designed to make working with “relational” or “labeled” data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, real world data analysis in Python."
#
# To work with labellled data, pandas has a type called DataFrame (basically, a matrix where columns and rows have may names and may be of different types) and it is also the main type of the software [R](http://www.r-project.org/). Fo ezample:
# + run_control={"breakpoint": false}
import pandas as pd
# + run_control={"breakpoint": false}
x = 5*['A'] + 5*['B']
x
# + run_control={"breakpoint": false}
df = pd.DataFrame(np.random.rand(10,2), columns=['Level 1', 'Level 2'] )
df['Group'] = pd.Series(['A']*5 + ['B']*5)
plot = df.boxplot(by='Group')
# + run_control={"breakpoint": false}
from pandas.plotting import scatter_matrix
df = pd.DataFrame(np.random.randn(100, 3), columns=['A', 'B', 'C'])
plot = scatter_matrix(df, alpha=0.5, figsize=(8, 6), diagonal='kde')
# -
# pandas is aware the data is structured and give you basic statistics considerint that and nicely formatted:
# + run_control={"breakpoint": false}
df.describe()
# -
# For more on pandas, see this tutorial: [http://pandas.pydata.org/pandas-docs/stable/10min.html](http://pandas.pydata.org/pandas-docs/stable/10min.html).
# ## To learn more about Python
#
# There is a lot of good material in the internet about Python for scientific computing, here is a small list of interesting stuff:
#
# - [How To Think Like A Computer Scientist](http://www.openbookproject.net/thinkcs/python/english2e/) or [the interactive edition](http://interactivepython.org/courselib/static/thinkcspy/index.html) (book)
# - [Python Scientific Lecture Notes](http://scipy-lectures.github.io/) (lecture notes)
# - [A Whirlwind Tour of Python](https://github.com/jakevdp/WhirlwindTourOfPython) (tutorial/book)
# - [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) (tutorial/book)
# - [Lectures on scientific computing with Python](https://github.com/jrjohansson/scientific-python-lectures#lectures-on-scientific-computing-with-python) (lecture notes)
| notebooks/PythonTutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from boardlaw.sql import *
# +
from boardlaw import backup
import sys
import time
run = '2021-03-26 15-30-17 harsh-wait'
dest = 'local_storage'
bucket = 'boardlaw'
api = backup.api(bucket)
syncer = b2.Synchronizer(4)
with b2.SyncReport(sys.stdout, False) as reporter:
syncer.sync_folders(
source_folder=b2.parse_sync_folder(f'b2://boardlaw/output/pavlov/{run}', api),
dest_folder=b2.parse_sync_folder(f'{run}', api),
now_millis=int(round(time.time() * 1000)),
reporter=reporter)
# -
mk
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Projector
# $Def$
#
# If a square matrix $P \in \mathbb{R}^{m\times m}$ satisfies $P^2 = P$, then we call $P$ a **projector**. **Range** of $A$, the set of images, $i.e.$, *the space spanned by the columnss of matrix $A$*. **Null** of $A$, all vectors whose images are zero, $i.e.$, all $\vec{x}$ satisfying $A\vec{x} = \vec{0}$.
#
# $Theorem$ **1**
#
# For ***any*** matrix $A\in \mathbb{R}^{m \times m}$, $\DeclareMathOperator*{\null}{null} \DeclareMathOperator*{\range}{range} \boxed{ \null\left(A\right) \subseteq \range\left(I-A\right) }$.
#
# $Proof$
#
# If $\vec{x} \in \null \left(A\right)$, then $A\vec{x} = \vec{0}$. Then $\vec{x} = \vec{x} - A\vec{x} = \left(I - A \right) \vec{x} \in \range\left(I-A\right)$
#
# $Theorem$ **2**
#
# Let $P \in \mathbb{R}^{m \times m}$. Then $\boxed{ P^2 = P}$ is *eqivalent* to $\boxed{ \null(P) = \range(I - P)}$
#
# $Proof$
#
# $\Rightarrow)$ Since we already have $\null (P) \subseteq \range(I-P)$ from *Theorem 1*, now we prove that $\null(P) \supseteq \range(I-P)$. So for $\vec{x} \in \range(I-P)$, there exists a $\vec{y}$ such that $\vec{x} = (I-P)\vec{y}$, so that $P\vec{x} = P(I-P)\vec{y} = (P - P^2)\vec{y} = \vec{0}$, so $\vec{x} \in \null(P)$. Half Done!
#
# $\Leftarrow)$ Since now $\forall \vec{x} \in \mathbb{R}^{m}$, $(I-P)\vec{x} \in \range(I - P) = \null(P)$, so that $P\big( (I-P)\vec{x} \big) = (P - P^2)\vec{x} = \vec{0}$, that is to say that in terms of map, $P$ and $P^2$ are equivalent, $i.e.$, $P = P^2$. ALL DONE! $\square$
#
# In general, any projector $P \in \mathbb{R}^{ m \times m}$ maps $\mathbb{R}^{ m}$ onto its **range**. For any vector $\vec{v}$ in the range of $P$, $P\vec{v} = \vec{v}$. For any vector $\vec{v}$ *not* in the **range** of $P$, the difference $\vec{v} − P\vec{v}$ is in the **null** of $P$.
#
# $Def$
#
# **orthogonal projector** and **oblique projector**: Let $P \in \mathbb{R}^{ m \times m}$ be a projector. If $\range(I-P) \perp \range(P)$, $i.e.$, $\null(P) \perp \range(P)$, then it is an orthogonal projector, otherwise oblique projector.
#
# >**e.g.** $B = \left[ \begin{array}{ccc}
# 1 & 1\\
# 0 & 0
# \end{array}\right]$
# >
# >Now $B^2 = \left[ \begin{array}{ccc}
# 1 & 1\\
# 0 & 0
# \end{array}\right] = B$, so it is a projector. And its **range** is the space spanned by $\left[ \begin{array}{c}
# 1\\
# 0
# \end{array}\right]$, and its **null** is spanned by $\left[ \begin{array}{c}
# 1\\
# -1
# \end{array}\right]$.
# >
# >And obviously $\range(B) \not \perp \null(B)$
#
# How to determine whether it is oblique or orthogonal projector?
#
# $Theorem$ **3**
#
# Let $P \in \mathbb{R}^{ m \times m}$ be a projector. $P$ is an *orthogonal projector* $iff$ $\boxed{ P = P^{\mathrm{T}} }$.
#
# $Proof$
#
# $\Rightarrow)$ Denote the dimension of $\range(P)$ by $r$, so that the dimension of $\null(P)$ is $m-r$. Denote the basis of $\range(P)$ by $\vec{q}_{1}, \vec{q}_{2},\dots,\vec{q}_{r}$, and the basis of $\null(P)$ by $\vec{q}_{r+1}, \vec{q}_{r+2},\dots,\vec{q}_{m}$. Since $P$ is orthogonal projector, $\{ \vec{q}_{1}, \vec{q}_{2},\dots,\vec{q}_{r} \}$ are orthogonal to $\{ \vec{q}_{r+1}, \vec{q}_{r+2},\dots,\vec{q}_{m} \}$.
#
# Let $Q = \left[ \begin{array}{cccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_m
# \end{array}\right]$, so that $Q$ is an orthogonal matrix, $Q^{\mathrm{T}}Q = I$. Then we have
#
# $$\begin{align}
# Q^{\mathrm{T}}PQ =& \left[ \begin{array}{cccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_m
# \end{array}\right]^{\mathrm{T}} P \left[ \begin{array}{cccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_m
# \end{array}\right] \\
# =& \left[ \begin{array}{cccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_m
# \end{array}\right]^{\mathrm{T}} \left[ \begin{array}{cccc}
# P\vec{q}_1 & P\vec{q}_2 & \cdots & P\vec{q}_m
# \end{array}\right] \\
# =& \left[ \begin{array}{ccccccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_r & \vec{q}_{r+1} & \cdots & \vec{q}_m
# \end{array}\right]^{\mathrm{T}} \left[ \begin{array}{ccccccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_r & 0 & \cdots & 0
# \end{array}\right] \\
# =& I_r
# \end{align}$$
#
# Therefore
#
# $$\begin{align}
# P =& QI_rQ^{\mathrm{T}} \\
# =& Q(I_rI_r)Q^{\mathrm{T}} = (QI_r)(I_rQ^{\mathrm{T}}) \\
# =& \left[ \begin{array}{cccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_r
# \end{array}\right] \left[ \begin{array}{cccc}
# \vec{q}_1^{\mathrm{T}} & \vec{q}_2^{\mathrm{T}} & \cdots & \vec{q}_r^{\mathrm{T}}
# \end{array}\right]^{\mathrm{T}} \\
# =& Q_r Q_r^{\mathrm{T}}
# \end{align}$$
#
# OK, now $P$ is symmetrix.
#
# $Conclusion$
#
# If $P$ is orthogonal projector, then $P$ can be written as $\sum \limits_{i = 1}^{r} \vec{q}_i \vec{q}_{i} ^{ \mathrm{T}}$, where $\vec{q}_1,\vec{q}_2,\dots,\vec{q}_r$ are set of orthonormal basis vectors of $\range(P)$. And on the other hand, if $Q_r = \left[ \begin{array}{cccc}
# \vec{q}_1 & \vec{q}_2 & \cdots & \vec{q}_r
# \end{array}\right]$ contains a set of orthonormal vectors in $\mathbb{R}^{m}$, then the orthogonal projector from $\mathbb{R}^{m}$ to the range of $Q_r$ is $P = Q_rQ_r^{\mathrm{T}} = \sum \limits_{i = 1}^{r} \vec{q}_i \vec{q}_{i} ^{ \mathrm{T}}$.
#
# In general for any given vector $\vec{v} \in \mathbb{R}^{m}$, not necessarily a normalized vector, the orthogonal projector to the direction $\vec{v}$ is
#
# $$P_{\vec{v}} = \bigg(\frac{\vec{v}} {\|\vec{v}\|}\bigg)\bigg( \frac{\vec{v}} {\|\vec{v}\|}\bigg)^{\mathrm{T}} = \boxed{ \frac{\vec{v}\vec{v}^{\mathrm{T}}} {\|\vec{v}\|^2}}$$
#
# After that, given any vector $\vec{x} \in \mathbb{R}^{m}$, we have $P_{\vec{v}}\vec{x}$ is the orthogonal projection of $\vec{x}$ onto the direction $\vec{v}$ and the difference $\vec{x} − P_{\vec{v}}\vec{x}$ is perpendicular to $\vec{v}$.
#
# And more generally, given a matrix $W \in \mathbb{R}^{m \times n}$, assuming that $m \geq n$ and the columns of $W$ are linearly independent. The orthogonal projector from $\mathbb{R}^{m}$ to the column space (the range) of $W$ can be determined as follows, denoted as $P$.
# 1. $\vec{v}$ be any vector in $\mathbb{R}^{m}$ and $\vec{y} = P\vec{v} \in \range(W)$ is the image of $\vec{v}$ under this orthogonal projector.
# 2. Since the projector is orthogonal, the difference $\vec{v} − \vec{y}$ is orthogonal to $\range(W)$, $i.e.$, we have $W^{\mathrm{T}}(\vec{v} − \vec{y}) = \vec{0}$.
# 3. We also have $\vec{y} = W\vec{x}$, for some $\vec{x} \in \mathbb{R}^{n}$, so that $W^{\mathrm{T}}(\vec{v} − W\vec{x}) = \vec{0}$, $i.e.$, $\vec{x} = \big( W^{\mathrm{T}}W \big)^{-1}W^{\mathrm{T}}\vec{v}$. Since $W$ is of full column rank as assumed ahead, $\big( W^{\mathrm{T}}W \big)^{-1}$ exists.
# 4. $\vec{y} = P\vec{v} = W\vec{x} = W\big( W^{\mathrm{T}}W \big)^{-1}W^{\mathrm{T}}\vec{v} \Rightarrow \boxed{ P = W\big( W^{\mathrm{T}}W \big)^{-1}W^{\mathrm{T}}}$.
#
# $\dagger$ ***NO*** orthogonal matrix, except the identity matrix, is an orthogonal projector.$\ddagger$
# # Householder reflector
# To do Gaussian elimination. For any given vector $\vec{x}$, we want to find an operator (matrix) $F$ such that,
#
# $$\vec{x} = \left[ \begin{array}{c}
# x_1 \\
# x_2 \\
# \vdots \\
# x_n
# \end{array}\right] \overset{F}{\longrightarrow } F\vec{x} = \left[ \begin{array}{c}
# \pm \left\| \vec{x}\right\|_2 \\
# 0 \\
# \vdots \\
# 0
# \end{array}\right] = \pm \left\| \vec{x}\right\|_2 \vec{e}_1$$
#
# All are zeros below the first entry. Then how to determine matrix $F$? Assume that $\vec{x}$ is reflected to $F\vec{x} = \|\vec{x}\|_2 \vec{e}_1$, the positive direction of $x\text{-axis}$.
# 1. Define $\vec{v} = \vec{x} - F\vec{x} = \vec{x} - \|\vec{x}\|_2 \vec{e}_1$
# 2. Following above, the orthogonal projector onto the direction of $\vec{v}$ is $P_{\vec{v}} = \newcommand{\ffrac}{\displaystyle \frac} \ffrac{\vec{v}\vec{v}^{\mathrm{T}}} {\|\vec{v}\|_2^2}$, so that we can see that $\vec{v} = 2P_{\vec{v}}\vec{x}$
# 3. $F\vec{x} = \vec{x} - \vec{v} = (I - 2P_{\vec{v}})\vec{x}$, $i.e.$, $\boxed{ F = I - 2P_{\vec{v}} = I - 2 \displaystyle \frac{\vec{v}\vec{v}^{\mathrm{T}}} {\|\vec{v}\|_2^2}}$
#
# $Proof$
#
# Brief proof of the second point.
#
# To prove $2P_{\vec{v}}\vec{x} = 2\cdot\ffrac{\vec{v}\vec{v}^{\mathrm{T}}\vec{x}} {\|\vec{v}\|_2^2}= \vec{v}$, it equals to prove that $2\vec{v}^{\mathrm{T}}\vec{x} = \|\vec{v}\|_2^2$.
#
# $$2\vec{v}^{\mathrm{T}}\vec{x} = 2(\vec{x}^{\mathrm{T}} - \|\vec{x}\|_2 \vec{e}_1^{\mathrm{T}})\vec{x} = 2\|\vec{x}\|_2^2 - 2\|\vec{x}\|_2 \cdot x_1 \\
# \|\vec{v}\|_2^2 = x_1^2 -2\|\vec{x}\|_{2}x_{1} + \|\vec{x}\|_2^2 + x_2^2 + \cdots + x_n^2 = 2\|\vec{x}\|_2^2 - 2\|\vec{x}\|_2 \cdot x_1$$
#
# ALL DONE! $\square$
#
# ***
# Now we have found what we want, which reflects the vector $\vec{x}$ to the direction of $\vec{e}_1$ by multiplying matrix (operator) $F$. Besides we have
#
# $$F^{\mathrm{T}}F = (I - 2P_{\vec{v}})^{\mathrm{T}}(I - 2P_{\vec{v}}) = I^2 - 4P_{\vec{v}} + 4P_{\vec{v}}^2 = I - 4P_{\vec{v}} + 4P_{\vec{v}} = I$$
#
# So actually this *Householder reflector* is an *orthogonal matrix*.
#
# ## Which reflector
# For a given $\vec{x}$ we have
#
# | $$F\vec{x} = +\sideset{}{^{2}}{\|\vec{x}\|} \sideset{}{_{1}}{\vec{e}}$$ | $$F\vec{x} = -\sideset{}{^{2}}{\|\vec{x}\|} \sideset{}{_{1}}{\vec{e}}$$ |
# |:---------------------------------------------------------------------:|:---------------------------------------------------------------------:|
# |  |  |
#
# As mentioned before, subtracting two numbers which are *close* is an **ill-conditioned** problem. So that we prefer that $\vec{x}$ and $F\vec{x}$ are in opposite direction, $i.e.$
#
# $$\DeclareMathOperator*{\sign}{sign}
# F\vec{x} = \left[\begin{array}{c}
# -\sign(x_1)\|\vec{x}\|_2 \\
# 0 \\
# 0 \\
# \vdots \\
# 0
# \end{array} \right]
# = -\sign(x_1) \|\vec{x}\|_2\vec{e}_1 $$
#
# And then $\vec{v} = \vec{x} - F\vec{x} = \vec{x} + \sign(x_1)\|\vec{x}\|_2\vec{e}_1$, while $F$ keeps the same expression.
# # QR factorization by Householder reflectors
# Now we can use the Householder reflectors to reduce a matrix $A$ to its upper trigangular form. Let $A$ be an $m\times n$ size matrix, assuming that $m \geq n$ and $\DeclareMathOperator*{\rank}{rank} \rank(A) = n$, a column full rank matrix.
#
# We got our first operator $F_1$ that can reflect the first column of $A$ to its $\vec{e}_1$ direction.
# 1. Take $\vec{x} =
# \left[\begin{array}{c}
# a_{11} \\
# a_{21} \\
# \vdots \\
# a_{m1}
# \end{array} \right]
# $, and the projection $F_1\vec{x} = -\sign(x_1)\|\vec{x}\|_2 \vec{e}_1$,then $\vec{v}_1 = \vec{x} - F_1 \vec{x} = \vec{x} + \sign(x_1) \|\vec{x}\|_2 \vec{e}_1$.
# 2. We can get the Householder reflector:
# $$Q_1 = F_1 = I - 2 \frac{\vec{v}_1\vec{v}_1^{\mathrm{T}}} {\|\vec{v}_1\|_2^2}$$
# 3. Now we have
# $$Q_1A = \left[\begin{array}{cccc}
# a_{11}^{(1)} & a_{12} ^{(1)} & \cdots & a_{1n} ^{(1)} \\
# 0 & a_{22} ^{(1)} & \cdots & a_{2n} ^{(1)} \\
# 0 & a_{32} ^{(1)} & \cdots & a_{3n} ^{(1)} \\
# \vdots & \vdots & \vdots & \vdots \\
# 0 & a_{m2} ^{(1)} & \cdots & a_{mn} ^{(1)} \\
# \end{array} \right]$$
# So now we take $\vec{x} =
# \left[\begin{array}{c}
# a_{22} ^{(1)} \\
# a_{32} ^{(1)} \\
# \vdots \\
# a_{m2} ^{(1)}
# \end{array} \right]
# $, and the projection $F_2\vec{x} = -\sign(x_1)\|\vec{x}\|_2 \vec{e}_1$,then $\vec{v}_2 = \vec{x} - F_2 \vec{x} = \vec{x} + \sign(x_1) \|\vec{x}\|_2 \vec{e}_1$.
# 4. We can get the second Householder reflector:
# $$F_2 = I - 2 \frac{\vec{v}_2\vec{v}_2^{\mathrm{T}}} {\|\vec{v}_2\|_2^2}$$
# Notice that at this time $F_2$ is an $(m-1)\times (m-1)$ orthogonal matrix, so we define
# $$Q_2 = \left[\begin{array}{cc}
# 1 & \vec{0}^{\mathrm{T}} \\
# \vec{0} & F_2
# \end{array} \right]_{m \times m}$$
#
# So on and so forth, after $n$ times loop, we get the QR factorization as following:
#
# $$Q_{n}Q_{n-1}\cdots Q_{2}Q_{1}A = \left[\begin{array}{cccc}
# a_{11}^{(1)} & a_{12} ^{(1)} & \cdots & a_{1n} ^{(1)} \\
# 0 & a_{22} ^{(2)} & \cdots & a_{2n} ^{(2)} \\
# 0 & 0 & \ddots & \vdots \\
# \vdots & \vdots & \ddots & a_{nn} ^{(n)} \\
# 0 & 0 & \cdots & 0 \\
# \vdots & \vdots & & \vdots \\
# 0 & 0 & \cdots & 0
# \end{array} \right]_{m \times n} = R$$
#
# $R$ is upper triangular, and each $Q_i$ is an orthogonal matrix. So $Q_i^{\mathrm{T}} = Q_i^{-1}$. And seeing from how we create $Q_i$, we also have $Q_i = Q_i^{\mathrm{T}}$
#
# $$A = Q_1Q_2Q_3\cdots Q_nR := Q_{m \times m}R_{m \times n}$$
#
# $Algorithm$ **Householder Triangularization**
#
# ```
# for k = 1 to n
# x = A_{k:m,k}
# v_k = x + sign(x_1) \|x\|_2 e_1
# v_k = v_k / \|v_k\|_2
# A_{k:m,k:n} = A_{k:m,k:n} − 2v_k ( v_k^T A_{k:m,k:n} )
# end
# ```
#
# The amount on floating point operations of the Householder Triangularization is approximately ():
#
# $$\begin{align}
# \sum_{k=1}^n \sum_{j=k}^{n} 4(m-k+1) =& \sum_{k=1}^n 4(m-k+1)(n-k+1) \\
# =& \sum_{k=1}^n 4\big(mn+m+n+1+k^2+2k(m+1)(n+1)\big) \\
# \approx & 2mn^2 - \frac{2} {3} n^3
# \end{align}$$
# ***
# After the above algorithm, the result $R$ is stored in the upper triangular part of $A$. As for $Q$, it's gone. But we can rebuild it using the saved vector $\vec{v}_k$.
#
# Why $Q$ is not needed? Because we use QR factorization to solve the equation $A\vec{x} = \vec{b}$. Then we will have $Q^{\mathrm{T}}QR\vec{x} = Q^{\mathrm{T}}\vec{b} = R\vec{x}$. So actually what we need is the product of matrix $Q$ (or its transpose) and a given vector $\vec{b}$, which can be achieved with the stored $\vec{v}_k$.
#
# $$\begin{align}
# Q =& Q_1Q_2Q_3\cdots Q_n \\
# =& \big( I_{m} - 2\vec{v}_1 \vec{v}_1^{\mathrm{T}} \big) \left( \begin{array}{cc}
# 1 & \vec{0}^{\mathrm{T}} \\
# \vec{0} & I_{m-1} - 2\vec{v}_2 \vec{v}_2^{\mathrm{T}}
# \end{array} \right) \cdots \left( \begin{array}{cc}
# I_{n-1} & \mathbf{0} \\
# \mathbf{0} & I_{m-n+1} - 2\vec{v}_n \vec{v}_n^{\mathrm{T}}
# \end{array} \right) \\[1em]
# Q^{\mathrm{T}} =& Q_n^{\mathrm{T}}Q_{n-1}^{\mathrm{T}}Q_{n-2}^{\mathrm{T}}\cdots Q_1^{\mathrm{T}} = Q_nQ_{n-1}Q_{n-2}\cdots Q_1 \\
# =& \left( \begin{array}{cc}
# I_{n-1} & \mathbf{0} \\
# \mathbf{0} & I_{m-n+1} - 2\vec{v}_n \vec{v}_n^{\mathrm{T}}
# \end{array} \right) \left( \begin{array}{cc}
# I_{n-2} & \mathbf{0} \\
# \mathbf{0} & I_{m-n+2} - 2\vec{v}_{n-1} \vec{v}_{n-1}^{\mathrm{T}}
# \end{array} \right) \cdots \left( \begin{array}{cc}
# 1 & \vec{0}^{\mathrm{T}} \\
# \vec{0} & I_{m-1} - 2\vec{v}_2 \vec{v}_2^{\mathrm{T}}
# \end{array} \right)
# \big( I_m - 2\vec{v}_1 \vec{v}_1^{\mathrm{T}} \big)
# \end{align}$$
#
# $Algorithm$ **Given $\vec{b}$, find $Q\vec{b}$**
#
# ```
# x = b
# for k = n : −1 : 1
# x_{k:m} = x_{k:m} − 2 v_k ( v_k^T x_{k:m} )
# end
# ```
#
# $Algorithm$ **Given $\vec{b}$, find $Q^{\mathrm{T}}\vec{b}$**
#
# ```
# x = b
# for k = 1 : n
# x_{k:m} = x_{k:m} − 2 v_k ( v_k^T x_{k:m} )
# end
# ```
#
# And if you still want the explicit $Q$, just using $Q\vec{e}_1, Q\vec{e}_2, \dots , Q\vec{e}_n$ to get that.
# # QR factorization by Gram-Schmidt orthogonalization
# Still we assume that $A$ be an $m\times n$ size matrix, and $m \geq n$ with $\rank(A) = n$, a column full rank matrix. Let $Q \in \mathbb{R}^{m \times m}$ and $R \in \mathbb{R}^{m \times n}$ be the result of QR factorization.
#
# $$A = \left[\begin{array}{cccc}
# \vec{a}_1^c & \vec{a}_2^c & \cdots \vec{a}_n^c
# \end{array} \right] = \left[\begin{array}{cccc}
# \vec{q}_1^c & \vec{q}_2^c & \cdots \vec{q}_m^c
# \end{array} \right]\left[\begin{array}{c}
# \begin{array}{cccc}
# r_{11} & r_{12} & \cdots & r_{1n} \\
# 0 & r_{22} & \cdots & r_{2n} \\
# \vdots & 0 & \ddots & \vdots \\
# 0 & 0 & \cdots & r_{nn}
# \end{array} \\
# \mathbf{0}
# \end{array} \right]=QR$$
#
# But actually we don't need a **full QR factorization**, here's the **reduced** one.
#
# $$A = \left[\begin{array}{cccc}
# \vec{a}_1^c & \vec{a}_2^c & \cdots \vec{a}_n^c
# \end{array} \right] = \left[\begin{array}{cccc}
# \vec{q}_1^c & \vec{q}_2^c & \cdots \vec{q}_n^c
# \end{array} \right]\left[\begin{array}{cccc}
# r_{11} & r_{12} & \cdots & r_{1n} \\
# 0 & r_{22} & \cdots & r_{2n} \\
# \vdots & \ddots & \ddots & \vdots \\
# 0 & \cdots & 0 & r_{nn}
# \end{array}\right]=\hat{Q}\hat{R}$$
#
# And more easily we can find that
#
# $\begin{align}
# \vec{a}_1^c =& r_{11} \vec{q}_1^c \\
# \vec{a}_2^c =& r_{12} \vec{q}_1^c + r_{22} \vec{q}_2^c \\
# \vdots & \\
# \vec{a}_j^c =& r_{1j} \vec{q}_1^c + r_{2j} \vec{q}_2^c + \cdots + r_{jj} \vec{q}_j^c\\
# \vdots & \\
# \vec{a}_n^c =& r_{1n} \vec{q}_1^c + r_{2n} \vec{q}_2^c + \cdots + r_{nn} \vec{q}_j^c\\
# \end{align}$
#
# $Conclusion$
#
# 1. $\vec{a}_j^c \in \big< \vec{q}_1^c, \vec{q}_2^c, \dots, \vec{q}_j^c \big>$, $j = 1,2,\dots, n$
# 2. $r_{jj}\neq 0$. Firstly that $r_{11}$ can't, otherwise $\vec{a}_1^c = \vec{0}$, however $A$ is a column full rank matrix. And for $j = 2,3, \dots, n$, still $r_{jj}\neq 0$, otherwise $\vec{a}_1^c, \vec{a}_2^c, \dots, \vec{a}_j^c$ can be expressed by $\big< \vec{q}_1^c, \vec{q}_2^c, \dots, \vec{q}_{j-1}^c \big>$, which implies that these $j$ columns are linear dependent, can't be! (And we can get to this by seeing how we get the $R$.)
# 3. $\left\{\begin{align}
# \vec{q}_1^c =& \ffrac{\vec{a}_1^c} {r_{11}} \\
# \vec{q}_2^c =& \ffrac{\vec{a}_2^c - r_{12}\vec{q}_1^{c}} {r_{22}} \\
# \vdots & \\
# \vec{q}_j^c =& \ffrac{\vec{a}_j^c - r_{1j}\vec{q}_1^{c} - r_{2j}\vec{q}_2^{c} -\cdots -r_{j-1,j}\vec{q}_{j-1}^{c}} {r_{jj}}\\
# \vdots & \\
# \vec{q}_n^c =& \ffrac{\vec{a}_n^c - r_{1n}\vec{q}_1^{c} - r_{2n}\vec{q}_2^{c} -\cdots -r_{n-1,n}\vec{q}_{n-1}^{c}} {r_{nn}}\\
# \end{align}\right.$
# 4. $\vec{q}_j^c \in \big< \vec{a}_1^c, \vec{a}_2^c, \dots, \vec{a}_j^c \big>$, $j = 1,2,\dots, n$
# 5. $\big< \vec{a}_1^c, \vec{a}_2^c, \dots, \vec{a}_j^c \big> = \big< \vec{q}_1^c, \vec{q}_2^c, \dots, \vec{q}_j^c \big>$
# 6. (From the process of getting $R$, we can see that each time $r_{jj} = \|\vec{x}\|$.) So that actually $\|\vec{q}_j^c\| = 1$, and $\vec{q}_1^c, \vec{q}_2^c, \dots, \vec{q}_j^c$ is actually set of orthonormal vectors.
# 7. So $r_{jj} = \|\vec{a}_j - r_{1j}\vec{q}_1 - r_{2j}\vec{q}_2 - \cdots - r_{j-1,j}\vec{q}_{j-1} \|_2$, and $\vec{q}_j^c = \ffrac{\vec{a}_j - r_{1j}\vec{q}_1 - r_{2j}\vec{q}_2 - \cdots - r_{j-1,j}\vec{q}_{j-1}} {r_{jj}}$
#
# For the algorithms below, calculate $r_{1,1}$ and $\vec{q}_1^{c}$ first.
#
# $Algorithm$ **QR factorization by classical Gram-Schmidt**
#
# ```
# for j = 1 to n
# \vec{v} = \vec{a}_j
# for i = 1 to j−1
# r_{ij} = \vec{q}_i^T \vec{a}_j
# \vec{v} = \vec{v} − r_{ij} \vec{q}_i
# end
# r_{jj} = \| \vec{v} \|_2
# \vec{q}_j = \vec{v}/r_{jj}
# end
# ```
#
# With flops $\sum\limits_{j=1}^{n}\sum\limits_{i=1}^{j-1} 4m \approx 2mn^2$
#
# $Algorithm$ **QR factorization by modified Gram-Schmidt**
#
# ```
# for j = 1 to n
# \vec{v} = \vec{a}_j
# for i = 1 to j−1
# r_{ij} = \vec{q}_i^T \vec{v}
# \vec{v} = \vec{v} − r_{ij} \vec{q}_i
# end
# r_{jj} = \| \vec{v} \|_2
# \vec{q}_j = \vec{v}/r_{jj}
# end
# ```
# # Backward stability of QR factorization
# $Theorem$
#
# Let $A = QR$ by Householder triangularization and for the computed factors, we have $\tilde{Q}\tilde{R} = A + \delta A$, with $\ffrac{\|\delta A\|} {\| A \|} = O(\epsilon_{machine})$. But it is not backward stable if using the classical or modified Gram-Schmidt algorithm.
#
# Take an example.
#
# $$A = \left[\begin{array}{cc}
# 0.7 & 0.7 + 10^{-15} \\
# 0.7 + 10^{-15} & 0.7 + 10^{-15}
# \end{array} \right]$$
#
# Condition number of $A$ is about $10^{15}$. Denote the result from Gram-Schmidt algorithm in MATLAB as $Q_G, R_G$, respectively; and similar for $Q_H, R_H$ using Householder triangularization. The result are
#
# $$Q_G Q_G^{\mathrm{T}} = \left[\begin{array}{cc}
# 0.890 & 0.012 \\
# 0.012 & 1.109
# \end{array} \right],Q_H Q_H^{\mathrm{T}} = \left[\begin{array}{cc}
# 1 & 0 \\
# 0 & 1
# \end{array} \right]$$
# # Solution of system of linear equations through QR factorization
# Given $A \in \mathbb{R}^{n \times n}$ and $\vec{b} \in \mathbb{R}^{n}$. As mentioned above, we have $R\vec{x} = Q^{\mathrm{T}}\vec{b}$ after the QR factorization, which can be solved by backward substitution or backslash function.
#
# Comparing using the LU factorization, it's more stable but more expensive.
#
# # Legendre polynomials 勒让德多项式
# Consider the space of polynomials of degree less or equal than $n − 1$ on the interval $x \in [ − 1, 1]$. The monomials $1, x, x^2 , ... , x^{n − 1}$ form a basis of this space; $p(x) = a_1 + a_2 x + \cdots + a_n x^{n − 1}$.
#
# $Def$ **inner product**
#
# $$\big(p(x),q(x)\big) = \int _{-1} ^{1} p(x)q(x) \,\mathrm{d}x$$
#
# Two polynomials are orthogonal if $\big(p(x),q(x)\big)=0$.
#
# Then after that the fact is $1, x, x^2 , ... , x^{n − 1}$ is not an orthogonal basis. So using QR factorization to find it.
#
# $$A = \left[\begin{array}{cccc}
# 1 & x & \cdots & x^{n-1}
# \end{array} \right] = \left[\begin{array}{cccc}
# P_0(x) & P_1(x) & \cdots & P_{n-1}(x)
# \end{array} \right] \left[\begin{array}{cccc}
# r_{11} & r_{12}& \cdots & r_{1n} \\
# 0 & r_{22} & \cdots & r_{2n} \\
# \vdots & \ddots & \ddots& \vdots \\
# 0 & \cdots & 0 & r_{nn}
# \end{array} \right]$$
#
# And the polynomials in $Q$ are the **Legendre polynomials**.
#
# $$\displaystyle P_{n}(x)={1 \over 2^{n}n!}{\mathrm{d}^{n} \over \mathrm{d}x^{n}}\left[(x^{2}-1)^{n}\right]$$
#
# Here each polynomial $P_i(x)$ is of degree $i$, and since $P_i(1) = 1$, so they form an orthogonal basis
| Computational/Intro to Numerical Computing/Note_Chap06_Householder Triangularization, Gram-Schmidt Orthogonalization, and QR Factorization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Representing data in memory
# A typical program outline calls for us to load data from disk and place into memory organized into data structures. The way we represent data in memory is critical to building programs. This is particularly true with data science programs because processing data is our focus.
#
# First, let's get something straight about data. Data elements have *values* and *types*, such as `32` and *integer* or `"hi"` and *string*. We build the data structures by combining and organizing these data elements, such as a list of integers.
#
# We also have a special element called a *pointer* or *reference* that refers to another element. It's like a phone number "points at" a phone but is not the phone itself. Using the pointer we can get to the phone. A list of pointers is like a phone book with references to humans but the phone book is not actually a list of humans. (We will see later that even when we do something simple like `x=3`, the variable `x` is secretly a pointer to an integer object with the value of 3.)
#
# Next, let's take a small detour into computer architecture to get a handle on what it means to load something into memory.
# ## Computer architecture detour
#
# A computer consists of three primary components: a disk to hold data, a memory (that is wiped upon power off), and a processor (CPU) to process that data. Here is a picture of an actual CPU and some memory chips:
#
# <img src="images/cpu-memory.png" width="400">
#
# Computer memory (RAM == random access memory) is much faster but usually much smaller than the disk and all memory is lost when the computer powers off. Think of memory as your working or scratch space and the disk as your permanent storage. Memory chips are kind of like human short-term memory that is prone to disappearing versus a piece of paper which is slower to read and write but *persistent*.
#
# The memory is broken up into discrete cells of a fixed size. The size of a cell is one *byte*, which consists of 8 *bits*, binary on/off digits. It is sufficient to hold a number between 0 and 255. Each cell is identified by an integer address, just like the numbers on mailboxes (see image below and to the right). Processors can ask for the data at a particular address and can store a piece of data at a specific memory location as well. For example, here is an abstract representation of byte-addressable computer memory:
#
# <table border="0">
# <tr>
# <td><img src="images/addresses.png" width="80">
# <td><img src="images/mailboxes.png" width="70">
# </table>
#
# In this case, the memory has value 100 at address 0. At address 1, the memory has value 0. Address 4 has the maximum value we can store in a single byte: 255.
#
# **Everything from actual numbers to music to videos is stored using one or more of these atomic storage units called bytes.**
#
# **Everything is stored as a number or sequence of numbers in a computer, even text.**
#
# Data lives either in memory, on the disk, or can be retrieved from a network. As part of producing a programming plan, you need to know where the data resides so you can incorporate loading that data into memory as part of the plan.
# ### Computer archicture metrics
#
# Here are the key units we use in computer architecture:
#
# * Kilo. $10^3 = 1,000$ or often $2^{10} = 1024$
# * Mega. $10^6 = 1,000,000$
# * Giga. $10^9 = 1,000,000,000$
# * Tera. $10^12 = 1,000,000,000,000$
#
# You need to know these units because you need to know whether a data set fits in memory or whether it fits on the disk or even how long it will take to transfer across the network.
#
# For example, when I started out, my first microcomputer had 16k of RAM, but my desktop now has 32G of RAM. What is the ratio of memory size increase?
#
# CPUs execute instructions to the heartbeat of a clock, which is where we get the term clock rate. Mhz (million herz==cycles/second), Ghz (billion) are the typically units in clock ticks per second. My desktop has a 4Ghz clock rate, which means that you can execute approximately 4 giga- or billion instructions per second. That's a lot.
#
# If your network is, say, 100Mbits/second that you can transfer a 800Mbit (100M byte) file in 8 seconds.
#
# How big is the San Francisco phonebook (uncompressed)? How fast can you transfer that phonebook across a 8Mbit/second network?
#
# ## Programming language view of memory
# Programming languages present us with a higher level view of the memory in two ways: we can use names to refer to locations in memory and each memory cell can hold integer and real number values of arbitrary size (they do have a limit, but let's keep things simple for now). For example, here are two named values stored in memory:
#
# <img src="images/named-memory.png" width="90">
units = 923
price = 8.02
# <img src="images/redbang.png" width="30" align="left">When referring to the kind of thing a value represents, we use the word **type**. The type of the "units" cell is integer and the type of "price" is real number (or floating-point number).
type(units)
type(price)
# Another very common value type is *string*, which is really a list of characters. We use strings to hold place names, book titles, and any other text-based values. We can think of strings as being a single value because the programming language hides the details. Strings can be arbitrarily long and the programming language stores the characters as a sequence of bytes in memory. Each character takes one or two bytes. In other words, we think of it as
#
# <img src="images/strings.png" width="110">
name = "Mary"
type(name)
# but it is really more like this:
#
# <img src="images/strings2.png" width="110">
#
# Using package [lolviz](https://github.com/parrt/lolviz) we can visualize even simple types like strings:
from lolviz import *
strviz(name)
objviz(name) # render as list of char
# These basic data types
#
# * integer numbers
# * floating-point numbers
# * strings
#
# are our building blocks. If we arrange some of these blocks together, we can create more complex structures.
# ## Data structures
# ### List
# The most common *data structures* is the **list**, which is just a sequence of memory cells. Because we're all familiar with spreadsheets, let's visualize these data structures using a spreadsheet. Columns in a spreadsheet are really lists, such as the following lists/columns of integers, floating-point numbers, and strings:
#
# <table border="0">
# <tr>
# <td><img src="images/int-list.png" width="60">
# <td><img src="images/float-list.png" width="80">
# <td><img src="images/names-list.png" width="139">
# </tr>
# </table>
Quantity = [6, 49, 27, 30, 19, 21, 12, 22, 21]
type(Quantity)
len(Quantity)
objviz(Quantity)
# We can think of the rows of a spreadsheet as lists also. For example, the header row of a spreadsheet is really a list of strings:
#
# <img src="images/header-row.png" width="750">
headers = ['Date', 'Quantity', 'Unit Price', 'Shipping']
type(headers)
objviz(headers)
# All of these lists have one thing in common: the type of element is the same. They are *homogeneous*. But, we can also have lists with *heterogeneous* elements, which is typically what we see in spreadsheet rows:
#
# <img src="images/sample-row.png" width="800">
arow = ['10/13/10', 6, 38.94, 35, '<NAME>']
# or
from datetime import date
arow = [date(2010, 10, 13), 6, 38.94, 35, '<NAME>']
arow
type(arow)
listviz(arow)
# Heterogeneous lists are typically used to group bits of information about a particular entity. In machine learning, we call this a **feature vector**, an **instance**, or an **observation**. For example, an apples versus oranges classifier might have feature vectors containing weight (number), volume (number), and color (string). The important point here is that a list can also be used to as a way to aggregate features about a particular entity. The sequence of the elements is less important than the fact that they are contained (aggregated) within the same list.
# ### Tuple
#
# A tuple is an immutable list and is often used for returning multiple values from a function. It's also a simple way to group number of related elements such as:
me = ('parrt',607)
me
# We index the elements just like we do with a list:
print(me[0])
print(me[1])
# But, there's no way to change the elements, as there is with a list. If we do:
#
# ```python
# me[0] = 'tombu'
# ```
#
# the result is an error:
#
# ```
# TypeError: 'tuple' object does not support item assignment
# ```
# Here's an example of pulling apart a tuple using the multiple assignment statement:
me = ('parrt',607)
userid,office = me
print(userid)
print(office)
print(me[0], me[1])
me[2] # causes error
# Tuples are a great way to group related items without having to create a formal Python class definition.
# ### Set
# If we enforce a rule that all elements within a list are unique, then we get a **set**. Sets are unordered.
ids = {100, 103, 121, 102, 113, 113, 113, 113}
ids
type(ids)
objviz(ids)
# We can do lots of fun set arithmetic:
{100,102}.union({109})
{100,102}.intersection({100,119})
# ### Tables (list of lists)
# Spreadsheets arrange rows one after the other, which programmers interpret as a *list of lists.* In the analytics or database world, we call this a **table**:
#
# <img src="images/rows.png" width="700">
#
# In this example, each row represents a sales transaction.
#
# The input to machine learning algorithms is often a table where each row aggregates the data associated with a specific instance or observation. These tables are called **dataframes** and will become your BFF.
# +
from pandas import DataFrame
df = DataFrame(data=[[99,'parrt'],[101,'sri'],[42,'kayla']],
columns=['ID','user'])
df
# -
df.values
df.columns
df.user
objviz(df.values)
# ### Matrix
# If the table elements are all numbers, we call it a **matrix**. Here's a matrix with 5 rows and 2 columns:
#
# <img src="images/matrix.png" width="110">
#
# Let me introduce you to another of your new BFF, `numpy`:
import numpy as np
A = np.array([[19,11],
[21,15],
[103,18],
[99,13],
[8,2]])
print(A)
# That is a matrix with shape 5 rows, 2 columns:
A.shape
# There are many ways to represent or layout things and memory. In this case, we can view the matrix as a list of lists using lolviz:
lolviz(A.tolist())
# Or as a matrix
objviz(A)
# We can do lots of matrix math with numpy:
objviz(A+A)
objviz(A*99)
objviz(A.T) #transpose
# Here's a system of equation: $A x = b$, $x = A^{-1} b$:
#
# \begin{equation*}
# \begin{bmatrix}
# 38 & 22\\
# 42 & 30
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\
# x_2
# \end{bmatrix} =
# \begin{bmatrix}
# 3 \\
# 5
# \end{bmatrix}
# \end{equation*}
# Using numpy, we can solve that using the inverse of $A$.
from numpy.linalg import inv
A = np.array([[38, 22], [42, 30]])
b = np.array([3, 5])
x = inv(A).dot(b)
objviz(x)
# Here's some more stuff about the shape of various numpy $n$-dimensional arrays:
x = np.array([3, 5]) # vertical vector with 2 rows
y = np.array([[3, 5]]) # matrix with 1 row and 2 columns
z = np.array([[3],[5]]) # matrix with 2 rows, 1 column
print(x.shape)
print(y.shape)
print(z.shape)
# The tuple `(2,)` means a one-dimensional vector with 2 rows. We can't use notation `(2)` because that's just an expression that means 2 rather than a tuple. It's a quirk but necessary.
# ### Traversing lists
# The spreadsheet model is a good one for understanding data structures but it's important to keep in mind that computers process one element (number or string) at a time.
# As humans, we can look at the spreadsheet or data structure from above in its entirety, but programs must **walk** or **traverse** the elements of a data structure one after the other. It's kind of like sliding a magnifying glass over the elements of a list:
#
# <img src="images/int-list-item.png" width="230">
#
# This notion of traversal abstracts to any **sequence** (or **stream**) of elements, not just lists. For example, we will eventually traverse the lines of a text file or a sequence of filenames obtained from the operating system. Sequences are extremely powerful because it allows us to process data that is much bigger than the memory of our computer. We can process the data piecemeal whereas a list requires all elements to be in memory at once.
#
# Typically we iterate through the elements of a list with a `for`-each statement:
for q in Quantity:
print(q)
# Here, the type of the objects pointed to by `q` is `int`. We can also iterate through that list using an indexed loop:
for i in range(len(Quantity)):
print(Quantity[i])
# For lists and other structures that fit completely in memory, we often find a **reverse traversal** useful, that examines elements from last to first:
for q in reversed(Quantity):
print(q)
# ### Dictionary
# If we arrange two lists side-by-side and kind of glue them together, we get a **dictionary**. Dictionaries map one value to another, just like a dictionary in the real world maps a word to a definition. Here is a sample dictionary that maps a movie title to the year it was nominated for an Oscar award:
#
# <img src="images/dict.png" width="220">
{'a':1,'b':3,'hi':45}
movies = {'Amadeus':1984, 'Witness':1985}
print(movies)
objviz(movies)
print(movies.keys())
print(movies.values())
# Walking a dictionary is also easy but we have to decide whether we want to walk the keys or the values:
movies = {'Amadeus':1984, 'Witness':1985}
for m in movies: # walk keys
print(m)
for m in movies.values(): # walk values
print(m)
for (key,value) in movies.items():
print(f"{key} -> {value}")
movies['Amadeus']
# ```
# movies['foo']
# ```
#
# gets a KeyError:
#
# ```
# ---------------------------------------------------------------------------
# KeyError Traceback (most recent call last)
# <ipython-input-40-72c06b90f573> in <module>()
# ----> 1 movies['foo'] # gets a KeyError
#
# KeyError: 'foo'
# ```
# ## Summary
# Here are the commonly-used data types:
#
# * integer numbers like -2, 0, 99
# * real numbers (floating-point numbers) like -2.3, 99.1932
# * strings like "Mary", "<NAME>"
#
# And here are the commonly-used data structures:
#
# * ordered list
# * set (just an unordered, unique list)
# * list of lists such as tables or matrices with rows and columns
# * tuples are immutable lists
# * dictionary such as mapping a student name to their student ID; we can think of this as a table where each row in the table associates the key with a value.
#
# Remember that all variable names are actually indirect references to a memory location. Everything is a pointer to the data in the implementation. That means we can have two variable names that refer to the same memory location and hence the variables are aliased. Changing one variable's elements appears to change the other variables elements.
#
# Now that we know what data looks like in memory, let's consider a [computation model](computation.ipynb).
| notes/data-in-memory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/roneljr/Linear-Algebra-58019/blob/main/Python_Exercise_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="POqSAJ8V3biI"
# #Vector Space Operations
# + [markdown] id="iSTx3nFqowAz"
# #Representing Vectors
# + colab={"base_uri": "https://localhost:8080/"} id="AklQ7hssnbzZ" outputId="33c1910c-9419-40be-e5b9-cba05b1d610d"
import numpy as np
A = np.array([4,3])
B = np.array([2,-5])
print('Vector A is',A)
print('Vector B is',B)
# + [markdown] id="UKKhmLm3o2KR"
# ##Describing Vectors in NumPy
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="3_ptk5PLo1e9" outputId="42428773-7dff-4c54-9641-9da94e206f1d"
ball1 = np.array([1,2,3])
ball2 = np.array([0,1,-1])
pool = np.array([ball1,ball2])
pool.shape
pool.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="r_IOw2UKqdsl" outputId="d2fb76a5-ed77-4590-8bab-b4e211ca22db"
U = np.array([[1,2,3],[4,5,6]])
U
# + colab={"base_uri": "https://localhost:8080/"} id="KkKuGsfBrh9u" outputId="8ff18824-255c-4ca4-bbe6-eac7b607ddde"
U = np.array([[1,2,3],[4,5,6]])
U.shape
# + colab={"base_uri": "https://localhost:8080/"} id="0gKwxquGrqnG" outputId="d19804f9-ebc8-456c-fc39-be983e9d74a1"
U = np.array([[1,2,3],[4,5,6]])
U.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="whCdvDcCruL6" outputId="86f38450-48d6-4c09-b654-4e1da55ee40a"
U = np.array([[1,2,3],[4,5,6]])
U
U.size
# + [markdown] id="yUdFTAlqr6vZ"
# #Space
# + [markdown] id="n51njNIJr9-l"
# ##Vector spaces are mathematical objects that abstractly capture the geometry and algebra of linear equations.
# + [markdown] id="QF6uir9LtUEP"
# ##Addition of Vectors
# + colab={"base_uri": "https://localhost:8080/"} id="iqhWLmTCsP9f" outputId="fe1bc251-9654-4e63-cbed-c325f7a4994f"
addend1 = np.array([0,0,0])
addend2 = np.array([1,1,0])
sum = addend1 + addend2
sum
# + colab={"base_uri": "https://localhost:8080/"} id="N1KGaAx2uBLd" outputId="44a0abbb-08ee-4195-acf7-f4a0bdb16251"
addend1 = np.array([0,0,0])
addend2 = np.array([1,1,0])
resultant = np.add(addend1,addend2)
resultant
# + [markdown] id="r4t7Yrg3u-Kx"
# ##Subtraction of Vector
# + colab={"base_uri": "https://localhost:8080/"} id="AzbQw84QvDRR" outputId="95eb6ce2-4c74-4599-d5bd-9bba05ba3fb6"
#You will subtract the components of addend2 from addend1 : addend1-addend2
difference = addend1-addend2
difference
# + colab={"base_uri": "https://localhost:8080/"} id="FLR7ZOK2vrgF" outputId="9b5bfb57-3e4b-4436-931a-7efdb5960493"
difference2 = np.subtract(addend1,addend2)
difference2
# + [markdown] id="_DViKdRswac3"
# ##Scaling
# + colab={"base_uri": "https://localhost:8080/"} id="9_8DXvPswcmn" outputId="753d53fb-a5fd-4f56-b68d-ee20e6cb81c6"
A = np.array([1,5,8,9])
S = 5*A
S
# + [markdown] id="lEEp0fD8yRJu"
# ##Cross Product
# + colab={"base_uri": "https://localhost:8080/"} id="wijK6cCfxmuc" outputId="112744e3-6f14-4ced-92d9-d999ab2a2b41"
A = np.array([2,3])
B = np.array([1,7])
#Compute the cross product of A and B
cross = np.cross(A,B)
print(cross)
# + colab={"base_uri": "https://localhost:8080/"} id="flWazz7JzCpU" outputId="000acd3f-d3da-4919-efc6-e3c1c6460913"
A = np.array([2,3,4])
B = np.array([1,7,1])
#Compute the cross product of A and B
cross = np.cross(A,B)
print(cross)
# + colab={"base_uri": "https://localhost:8080/"} id="9uRc39pW0I_d" outputId="3c7514e8-2ef5-4c18-fd99-4e176229e179"
import numpy as np
A = np.array([2,3])
B = np.array([1,7])
#Compute the dot product of A and B
dot = np.dot(A,B)
print(dot)
| Python_Exercise_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
# #%metplotlib inline
# #%matplotlib notebook
# %matplotlib
matplotlib.style.use("ggplot")
file_path = "/Users/szabolcs/dev/git/DAT210x/Module3/Datasets/"
file_name = "students.data"
df = pd.read_csv(file_path + file_name)
print(df.columns)
df.head()
# +
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.set_xlabel("Final Grade")
ax.set_ylabel("First Grade")
ax.set_zlabel("Daily Alcohol")
ax.scatter(df.G1, df.G3, df.Dalc, c="r", marker=".")
plt.show()
# -
| Module3/.ipynb_checkpoints/solution_3D_scatter-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="H5kJBZfy5JEP"
import pandas as pd
import numpy as np
import os
# + id="MMDxjz-AQMbZ"
df = pd.read_csv( '/content/sample_data/rating.csv' , sep=';' )
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="OaF32RwnqQV9" outputId="2236b34a-ec91-4ce7-a043-64a62cec325e"
df
# + id="yYSitYqpF48f"
#df.to_csv("dataset_pilkada_ok.csv")
# + id="nzwHQbBzoUyG"
df['label'] = df['Star'].astype(np.int64)
#df = df.drop(["Unnamed: 0"],axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="YT18gdqCnzY_" outputId="fdad17d5-2d85-4d5b-9c1c-1bfb6610c72c"
df.dtypes
# + id="G_VbYBUFqvBN"
import re
#menghapus RT
df.Review = df.Review.str.replace(r'RT', '')
#menghapus \n
df.Review = df.Review.str.replace(r'\n', '')
#menghapus link
df.Review = df.Review.str.replace(r'https?:\/\/.*[\r\n]*',' ')
#menghapus tanda mata uang dolar dll
df.Review = df.Review.str.replace(r'\$\w*',' ')
# + id="NqPoQF40792r"
#lower case semua text
df['Review'] = df['Review'].str.lower()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="TsOBt5I4syQr" outputId="5e10eff4-6d6d-49f6-a450-91db3377bc57"
df.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 84} id="G5lob3FUrb5b" outputId="a9d01e55-d337-46d3-bc74-aed7348d9041"
df.Review.iloc[1]
# + colab={"base_uri": "https://localhost:8080/"} id="HCIEw8QwlNya" outputId="0e445170-061d-4747-9681-3147650ff571"
a = df['Star'].unique()
print(sorted(a))
# + id="f4g6rd9z5ewt"
#rubah jadi array numpy, supaya bisa dilakukan modeling
from sklearn.model_selection import train_test_split
kalimat = df['Review'].values
y = df['label'].values
kalimat_latih, kalimat_test, y_latih, y_test = train_test_split(kalimat, y, test_size=0.2)
# + colab={"base_uri": "https://localhost:8080/"} id="eS7bn1nE5qet" outputId="ea7edec3-4ee8-4e92-97a3-e0778dbae995"
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
#tokeninzing menjadi perkata, dalma bentuk array
tokenizer = Tokenizer(num_words=250, oov_token='x')
tokenizer.fit_on_texts(kalimat_latih)
tokenizer.fit_on_texts(kalimat_test)
#kita pecah perkalimat
sekuens_latih = tokenizer.texts_to_sequences(kalimat_latih)
sekuens_test = tokenizer.texts_to_sequences(kalimat_test)
#kita bikin padding supaya setiap kalimat memiliki dimensi panjang yg sama
padded_latih = pad_sequences(sekuens_latih)
padded_test = pad_sequences(sekuens_test)
padded_latih.shape
voc_size = padded_latih.max()+1
print(voc_size)
# + id="iULWisgz5s-q"
#membuat model Neural Network
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(voc_size, 16, input_length=20),
tf.keras.layers.LSTM(64),
# tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
# #membuat model Neural Network
# import tensorflow as tf
# model = tf.keras.Sequential([
# tf.keras.layers.Embedding(voc_size, 16, input_length=20),
# tf.keras.layers.GlobalAveragePooling1D(),
# tf.keras.layers.Dense(24, activation='relu'),
# tf.keras.layers.Dense(1, activation='sigmoid')
# ])
# model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="uhULm8_W6ztq" outputId="f9ef554a-7873-481e-eb5b-36a65a3ea675"
#melakukan pelatihan model NN sebanyak 30 kali epoch
num_epochs = 30
history = model.fit(padded_latih, y_latih, epochs=num_epochs,
validation_data=(padded_test, y_test), verbose=2)
# + id="ZQO6wsvDx7W8"
import pickle
weigh = model.get_weights(); pklfile= "modelweights.pkl"
fpkl= open(pklfile, 'wb') #Python 3
pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL)
fpkl.close()
model.save("my_model.h5") #using h5 extension
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="6f-VdJjH66vN" outputId="994a4552-1bae-4b9b-e445-959b172320de"
#Tampilkan loss dari data training dan testing dalam bentuk plot
from matplotlib import pyplot
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="ATL6LphANus9" outputId="d92c9861-565a-4f43-f40e-9a3095668213"
#Tampilkan loss dari data training dan testing dalam bentuk plot
from matplotlib import pyplot
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show()
# + colab={"base_uri": "https://localhost:8080/"} id="vyhrDd_K7C5v" outputId="673caa14-f243-4512-ac08-cfed30a6480c"
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing import sequence
def predict_sentimen(kata):
coba =[]
kata = kata.replace('RT', ' ')
kata = kata.replace('\n', ' ')
kata = kata.replace('https?:\/\/.*[\r\n]*', ' ')
kata = str.lower(kata)
coba.append(kata)
tokenizer.fit_on_texts(coba)
sequence_coba = tokenizer.texts_to_sequences(coba)
padded_coba = pad_sequences(sequence_coba)
# padded_coba
coba_hasil = sequence.pad_sequences(padded_coba)
hasil_prediksi = model.predict(coba_hasil)[0][0]
if hasil_prediksi > 0.6:
sentimen = "Positif"
else:
sentimen = "Negatif"
print("Kalimat :\n", kata)
print("Sentimen :\n", sentimen)
print("Akurasi prediksi :\n",hasil_prediksi)
input_tweet = input("Masukkan kalimat/tweet yang ingin anda analisa sentimennya :")
predict_sentimen(input_tweet)
| ProgressML/bangkit01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pipeline NotebookNode
# This notebook will represent a single node of a pipeline. It will be completely driven via the following environment variables configured on the node properties dialog of the pipeline:
# - `NODE_FILENAME`: (Required) The filename associated with the node. The extension is used to validate that the node matches the associated file. The basename portion represents the node name - and is used in producing the output files.
# - `INPUT_FILENAMES`: (Optional) A SEMI-COLON-separated list of filenames. Each entry can include a _relative_ path as a prefix to the filename. Each file is expected to exist and contain content. The content will be printed and should appear in the out of a cell.
# - `OUTPUT_FILENAMES`: (Optional) A SEMI-COLON-separated list of filenames. Each entry can include a _relative_ path as a prefix to the filename. Each file is NOT expected to exist, but will be created as a function of the notebook's execution.
import os
from node_util.node_util import NotebookNode
# These getenv calls are here to help seed the environment variables
# dialog in the node properties of the pipeline editor
os.getenv("NODE_FILENAME")
os.getenv("INPUT_FILENAMES")
os.getenv("OUTPUT_FILENAMES")
os.getenv("ELYRA_RUNTIME_ENV")
# Execute the node
NotebookNode().run()
| elyra/pipeline/tests/resources/node_util/node.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#NOTE: This must be the first call in order to work properly!
from deoldify import device
from deoldify.device_id import DeviceId
#choices: CPU, GPU0...GPU7
device.set(device=DeviceId.GPU0)
from deoldify.visualize import *
plt.style.use('dark_background')
torch.backends.cudnn.benchmark=True
# NOTE: Set artistic to False if you're having trouble getting a good render. Chances are it will work with the Stable model.
colorizer = get_image_colorizer(artistic=True)
# # Instructions
#
# ### source_url
# Type in a url to a direct link of an image. Usually that means they'll end in .png, .jpg, etc. NOTE: If you want to use your own image, you can set source_url to None and just upload the image to /test_images/ in Jupyter. Just make sure that the source_path parameter matches the file you uploaded.
#
# ### source_path
# Name this whatever sensible image path (plus extension of jpg/png/ext) you want! Sensible means the path exists and the file exists if source_url=None.
#
# ### render_factor
# The default value of 35 has been carefully chosen and should work -ok- for most scenarios (but probably won't be the -best-). This determines resolution at which the color portion of the image is rendered. Lower resolution will render faster, and colors also tend to look more vibrant. Older and lower quality images in particular will generally benefit by lowering the render factor. Higher render factors are often better for higher quality images, but the colors may get slightly washed out.
#
# ### result_path
# Ditto- don't change.
#
# ### How to Download a Copy
# Simply shift+right click on the displayed image and click "Save Image As..."!
#
#
# ## Pro Tips
# 1. You can evaluate how well the image is rendered at each render_factor by using the code at the bottom (that cell under "See how well render_factor values perform on a frame here").
# 2. Keep in mind again that you can go up top and set artistic to False for the colorizer to use the 'Stable' model instead. This will often tend to do better on portraits, and natural landscapes.
#
#
# ## Troubleshooting
# If you get a 'CUDA out of memory' error, you probably have the render_factor too high. The max is 45 on 11GB video cards.
# ## Colorize!!
# +
#NOTE: Max is 45 with 11GB video cards. 35 is a good default
render_factor=35
#NOTE: Make source_url None to just read from file at ./video/source/[file_name] directly without modification
source_url='https://upload.wikimedia.org/wikipedia/commons/e/e4/Raceland_Louisiana_Beer_Drinkers_Russell_Lee.jpg'
source_path = 'test_images/image.png'
result_path = None
if source_url is not None:
result_path = colorizer.plot_transformed_image_from_url(url=source_url, path=source_path, render_factor=render_factor, compare=True)
else:
result_path = colorizer.plot_transformed_image(path=source_path, render_factor=render_factor, compare=True)
show_image_in_notebook(result_path)
# -
# ## See how well render_factor values perform on the image here
#for i in range(10,46):
#colorizer.plot_transformed_image(source_path, render_factor=i, display_render_factor=True, figsize=(10,10))
| ImageColorizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
import numpy as np
from gurobipy import *
with open('sample1.csv', 'r') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',', quotechar='|')
data = list(readCSV)
row_count = len(data)
#print(data)
m_a = data[0][0]
n_a = data[0][1]
delta_a = data[0][2]
#get m, n, delta in array form
given = [m_a, n_a, delta_a]
given = [float(i) for i in given]
m = given[0]
n = given[1]
delta = given[2]
#print(given)
#calculate epsilon
eps = delta/4
#get b in array form
k = row_count -1
b = data[4]
b = [float(k) for k in b]
#get A in array form (will need to convert to float later)
A_str = data[1:row_count-1]
A = []
for y in range(len(A_str)):
#print(A_str[y])
A.append([x for x in A_str[y] if x])
for row in range(len(A)):
for col in range(len(A[row])):
#print(A[row][col])
A[row][col] = float(A[row][col])
#print(A)
num_const = len(A) #same as m?
#initialize weights
w = np.ones(int(m))
w_sum = 0
for element in range(len(w)):
w_sum = w_sum + w[element]
p = np.ones(int(m))
try:
#Oracle--> solve -M <= (Ax -b)i <= M say M = 100?
m = Model("Sample1")
x1 = m.addVar(vtype=GRB.CONTINUOUS, name = 'x1')
x2 = m.addVar(vtype=GRB.CONTINUOUS, name = 'x2')
m.setObjective(0, GRB.MAXIMIZE)
m.addConstr(-x2 + 1 <= 100)
m.addConstr(-x2 + 1 >= -100)
m.addConstr(-2*x1 -x2 + 2 <= 100)
m.addConstr(-2*x1 -x2 + 2 >= -100)
m.addConstr(x1 + x2 - 1.5 <= 100)
m.addConstr(x1 + x2 - 1.5 >= -100)
m.optimize()
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
#losses--> (Ax -b )i for all x^t, need x from oracle
#update weights
for i in range(num_const):
w[i] = w[i]*(1- eps) #need to add loss function
print(w)
#to get p value
for j in range(num_const):
p[i] = w[i]/ w_sum
print(p)
except GurobiError as e:
print('Error code ' + str(e.errno) + ": " + str(e))
except AttributeError:
print('Encountered an attribute error')
# -
| .ipynb_checkpoints/Assignment 2 Q4-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import copy
import unittest
from unittest import TestCase
from src.multiple_trees.compare_trees import get_distances_by_files
from src.single_tree.development_tree_reader import read_all_trees
from src.single_tree.global_params import GlobalParams
global_params = GlobalParams(max_level=11, param_a=0.6, g_weight=0.1, chain_length_weight=0.1)
class TestDistance(TestCase):
# expected_matr = top-right matr (diagonal is excluded)
def compare(self, path, is_reducing, expected_matr, g_weight=0.0):
[_trees, matr] = get_distances_by_files(f"test/test_input/{path}",
GlobalParams(max_level=11, is_test_nodes=True, g_weight=g_weight),
is_reducing=is_reducing,
is_test_nodes=True)
for (i, expected_row) in enumerate(expected_matr):
for (j, expected_value) in enumerate(expected_row):
actual_value = matr[i][j + i + 1]
self.assertAlmostEqual(expected_value, actual_value)
def test_chain(self):
# if reduce - all trees are the same
# distance between chain_13 and chain_13_with_division_at_12 = 0
distance_13_13_with_div_at_12 = 0
self.compare("chains/test_chain*.xtg", True, [[0, 0, 0],
[0, 0],
[distance_13_13_with_div_at_12]])
# if NO reduce
dist_level_10 = pow(0.5, 9) # d(Leave, Growth)
dist_level_11 = pow(0.5, 10) # d(Leave, Null)
dist_chain_10_chain_11 = dist_level_10 + dist_level_11
dist_chain_10_chain_13 = dist_chain_10_chain_11 # equal because of cut at level 11
dist_chain_11_chain_13 = 0 # equal because of cut at level 11
self.compare("chains/test_chain*.xtg", False, [[dist_chain_10_chain_11, dist_chain_10_chain_13, dist_chain_10_chain_13],
[dist_chain_11_chain_13, dist_chain_11_chain_13],
[0]])
def test_growth_chain(self):
# if growth > 0, then distance[0][1] > 0
# then distance[1][2] tests producing growths in the chain
d_growth_and_no_growth = pow(0.5, 1) * (256 - 1)
self.compare("chains/test_*chain10.xtg", True, [[d_growth_and_no_growth, d_growth_and_no_growth],
[0]],
g_weight=1.0)
# if growth = 0, then distance[0][1] = 0
self.compare("chains/test_*chain10.xtg", True, [[0, 0],
[0]],
g_weight=0.0)
def test_m2(self):
# cannot reduce, because shown null nodes aren't completely null - they're shown for test purposes
#self.compare("paper_m/M2_*.xtg", True, [[0]])
self.compare("paper_m/M2_*.xtg", False, [[0]])
def test_m3(self):
self.compare("paper_m/M3_*.xtg", True, [[0, 0],
[0]])
self.compare("paper_m/M3_*.xtg", False, [[0, 0],
[0]])
def test_m4(self):
self.compare("paper_m/M4_*.xtg", True, [[0.5, 2.0],
[2.0]])
self.compare("paper_m/M4_*.xtg", False, [[0.5, 2.0],
[2.0]])
def test_m5(self):
self.compare("paper_m/M5_*.xtg", False, [[0.00, 0.50, 2.00],
[0.50, 2.00],
[2.00]])
def test_sofa_reduce(self):
self.compare("sofa/test_reduce*.xtg", True, [[0.00]])
self.compare("sofa/test_reduce*.xtg", False, [[1.00]])
# def test_m6(self):
# self.compare("paper_m/M6_*.xtg", True, [[1.25]])
# self.compare("paper_m/M6_*.xtg", False, [[1.25]])
#
# def test_patt(self):
# self.compare("patt_*.xtg", True, [[0.75]])
# self.compare("patt_*.xtg", False, [[1.00]])
def test_to_standard_form_growth(self):
# read trees from *.xtg files in xtg folder
src_trees = read_all_trees(pattern="test/test_input/test_standard_form_growth_*.xtg", is_test_nodes=True)
# create a copy of trees to modify
trees = [copy.deepcopy(src_tree) for src_tree in src_trees]
self.assertEqual("XGN X XGN".replace(" ", ""), trees[0].to_string(3))
self.assertEqual("XGN X XGN".replace(" ", ""), trees[1].to_string(3))
trees[0].to_standard_form(3)
trees[1].to_standard_form(3)
self.assertEqual(trees[1].root.to_array(3), trees[0].root.to_array(3)) # equal after standartization
self.assertNotEqual(src_trees[1].root.to_array(3), trees[1].root.to_array(3)) # changed
self.assertEqual(src_trees[0].root.to_array(3), trees[0].root.to_array(3)) # not changed during standartization
def test_to_standard_form_completion(self):
# read trees from *.xtg files in xtg folder
src_trees = read_all_trees(pattern="test/test_input/test_standard_form_compl_*.xtg", is_test_nodes=True)
# create a copy of trees to modify
trees = [copy.deepcopy(src_tree) for src_tree in src_trees]
self.assertEqual("ZDG X NAN".replace(" ", ""), trees[0].to_string(3))
self.assertEqual("NAN X GDZ".replace(" ", ""), trees[1].to_string(3))
trees[0].to_standard_form(3)
trees[1].to_standard_form(3)
self.assertEqual("ZDG X NAN".replace(" ", ""), trees[0].to_string(3))
self.assertEqual(trees[1].root.to_array(3), trees[0].root.to_array(3)) # equal after standartization
self.assertNotEqual(src_trees[1].root.to_array(3), trees[1].root.to_array(3)) # changed
self.assertEqual(src_trees[0].root.to_array(3), trees[0].root.to_array(3)) # not changed during standartization
unittest.main(argv=[''], verbosity=2, exit=False)
# -
| TestDistance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ROI Classification
# --
# Info about classes:
# This scripts includes a pretrained model to classify ROIs as one of the following categories:
# - **Label 0:** Neuron + In-plane ("Great")
# - **Label 1:** Neuron + Semi-in-plane ("Okay")
# - **Label 4:** Neuron + Out of plane ("Poor")
# - **Label 5:** Dendrite
#
# (*note that during the manual labeling, 0-5 is actually 1-6*) <br>
# (*label 2 is for weird blobs and shadows of neurons*) <br>
# (*label 3 is empty on purpose*) <br>
# ___
#
# Info about neural network:
# The network is trained on ~4700 ROIs from one FOV
# (mouse 6.28 , recorded on 20200903)
# - **Net_ROIClassifier_20200917.pth** state_dict file must be in path
# - This script is specifically for the above .pth parameters file. Changes to the architecture cannot be made here.
# ___
#
# Input data requirements:
# ---
# - 36x36 images
# - dim1: image number
# - dim 2,3: y,x
# - should be normalized between 0-1
#
#
# - **There are cell blocks to use the 'stat.npy' output from Suite2p and convert to the required format**
#
#
# - Full FOV size (in pixels) should be known. Default=512x1024
# ___
#
#
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# +
# import sys
# import copy
import numpy as np
import scipy
import scipy.signal
from scipy.io import savemat
import matplotlib.pyplot as plt
import pathlib
# +
## lOAD & PREPROCESS stat.npy file
## outputs: 'images' (input into CNN)
# dir_load = '\\\\research.files.med.harvard.edu\\Neurobio\\MICROSCOPE\\Rich\\data\\res2p\\scanimage data\\round 4 experiments\\mouse 6.28\\20200815\\suite2p\\plane0\\'
fileName_load = 'stat.npy'
# PATH_absolute = pathlib.Path('.').absolute()
# PATH_load_dir_statFile = f'{PATH_absolute}/'
# PATH_load_dir_statFile = '/media/rich/Home_Linux_partition/GoogleDrive_ocaml_cache/Research/Sabatini Lab Stuff - working/Code/PYTHON/ROI_Classifiers/test data_ mouse2_5 _ 20200308/'
# PATH_load_dir_statFile = '/media/rich/Home_Linux_partition/GoogleDrive_ocaml_cache/Research/Sabatini Lab Stuff - working/Code/PYTHON/ROI_Classifiers/label data/mouse 6_28 _ day 20200903/'
PATH_load_dir_statFile = pathlib.Path(r'D:\RH_local\data\scanimage data\round 5 experiments\mouse 11_20_N\20200325\suite2p\plane0') / fileName_load
print(PATH_load_dir_statFile)
# +
stat = np.load(PATH_load_dir_statFile, allow_pickle=True)
print('stat file loaded')
num_ROI = stat.shape[0]
print(f'Number of ROIs: {num_ROI}')
height = 512
width = 1024
spatial_footprints_centered = np.zeros((num_ROI, 241,241))
for i in range(num_ROI):
spatial_footprints_centered[i , stat[i]['ypix'] - np.int16(stat[i]['med'][0]) + 120, stat[i]['xpix'] - np.int16(stat[i]['med'][1]) + 120] = stat[i]['lam'] # this is formatted for coding ease (dim1: y pix) (dim2: x pix) (dim3: ROI#)
spatial_footprints_centered_crop = spatial_footprints_centered[:, 102:138 , 102:138]
# %matplotlib inline
plt.figure()
plt.imshow(np.max(spatial_footprints_centered_crop , axis=0) ** 0.2);
plt.title('spatial_footprints_centered_crop MIP^0.2');
images = spatial_footprints_centered_crop
# +
## Define model architecture
## DO NOT CHANGE ANYTHING HERE!!!
## This architecture is linked to the .pth parameters file
import torch
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout, Sigmoid, SELU, GELU, PReLU, Softplus, Softmax2d
from torch.optim import Adam, SGD
# +
dropout_prob = 0.4
momentum_val = 0.9
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.cnn_layers = Sequential(
Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=0),
# Tanh(),
ReLU(),
# BatchNorm2d(6, momentum=momentum_val),
# AvgPool2d(kernel_size=2),
Conv2d(in_channels=16, out_channels=16, kernel_size=5, stride=1, padding=0),
MaxPool2d(kernel_size=2, stride=2),
# Tanh(),
ReLU(),
# BatchNorm2d(16, momentum=momentum_val),
Dropout(dropout_prob*1),
Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=1),
MaxPool2d(kernel_size=2, stride=2),
# Tanh(),
ReLU(),
# BatchNorm2d(16, momentum=momentum_val),
Dropout(dropout_prob*1),
Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=0),
# BatchNorm2d(16, momentum=momentum_val),
MaxPool2d(kernel_size=2, stride=2),
# Tanh(),
ReLU(),
Dropout(dropout_prob*1),
# Conv2d(in_channels=24, out_channels=32, kernel_size=5, stride=1, padding=0),
# Tanh(),
# # ReLU(),
# # MaxPool2d(kernel_size=2, stride=2),
# # Defining a 2D convolution layer
# BatchNorm2d(1, momentum=momentum_val),
# Conv2d(1, 8, kernel_size=5, stride=1, padding=1),
# BatchNorm2d(8, momentum=momentum_val),
# ReLU(),
# # Dropout(dropout_prob*0.1),
# MaxPool2d(kernel_size=2, stride=2),
# # Defining a 2D convolution layer
# Conv2d(8, 16, kernel_size=4, stride=1, padding=1),
# BatchNorm2d(16, momentum=momentum_val),
# ReLU(),
# # Dropout(dropout_prob*0.1),
# MaxPool2d(kernel_size=2, stride=2),
# # # Defining another 2D convolution layer
# Conv2d(16, 32, kernel_size=4, stride=1, padding=2),
# # BatchNorm2d(64, momentum=momentum_val),
# ReLU(),
# # Dropout(dropout_prob*0.1),
# # Softmax2d(),
# MaxPool2d(kernel_size=2, stride=2),
# # # Defining another 2D convolution layer
# Conv2d(64, 64, kernel_size=4, stride=1, padding=2),
# # BatchNorm2d(16, momentum=momentum_val),
# SELU(),
# # Dropout(dropout_prob*0.1),
# # Softmax2d(),
# MaxPool2d(kernel_size=2, stride=2),
# # Defining another 2D convolution layer
# Conv2d(256, 1024, kernel_size=4, stride=1, padding=4),
# # BatchNorm2d(16, momentum=momentum_val),
# ReLU(),
# # Dropout(dropout_prob*0.1),
# # Softmax2d(),
# MaxPool2d(kernel_size=2, stride=2),
# # Defining another 2D convolution layer
# Conv2d(1024, 32, kernel_size=4, stride=1, padding=4),
# # BatchNorm2d(16, momentum=momentum_val),
# ReLU(),
# # Dropout(dropout_prob*0.1),
# # Softmax2d(),
# MaxPool2d(kernel_size=2, stride=2),
# # Defining another 2D convolution layer
# Conv2d(64, 16, kernel_size=4, stride=1, padding=2),
# # BatchNorm2d(16, momentum=momentum_val),
# SELU(),
# # Dropout(dropout_prob*0.1),
# # Softmax2d(),
# MaxPool2d(kernel_size=2, stride=2),
)
self.linear_layers = Sequential(
Linear(in_features=64, out_features=256),
# Tanh(),
ReLU(),
Dropout(dropout_prob*1),
# Linear(in_features=256, out_features=64),
# # Tanh(),
# ReLU(),
# Dropout(dropout_prob*1),
Linear(in_features=256, out_features=6),
# Linear(1296, 512),
# # Dropout(dropout_prob * 1),
# ReLU(),
# Softmax(dim=0),
# # Sigmoid(),
# Linear(216, 512),
# Dropout(dropout_prob * 1),
# ReLU(),
# Linear(512, 6),
# # ReLU(),
# # Softmax(dim=0),
)
# Defining the forward pass
def forward(self, x):
x = self.cnn_layers(x)
# if epoch%1000==0:
# print(epoch)
# print(x.shape)
# # x = x.view(x.size(0), -1)
# x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
x = torch.flatten(x, 1)
# if epoch%1000==0:
# print(epoch)
# print(x.shape)
x = self.linear_layers(x)
return x
# +
#### ENTER FILENAME + PATH OF NETWORK .pth FILE
PATH_load_name_netParams = 'Net_trainedOnAug_20201004_trainingSet_mouse628_20200903and20200815.pth'
# PATH_load_dir_netParams = 'G:/My Drive/Research/Sabatini Lab Stuff - working/Code/PYTHON/ROI_Classifiers/'
PATH_absolute = pathlib.Path('.').absolute()
PATH_load_dir_netParams = f'{PATH_absolute}/'
model = Net()
model.load_state_dict(torch.load(f'{PATH_load_dir_netParams}{PATH_load_name_netParams}'))
model.eval()
# +
input_x = torch.tensor(images[:,None,:,:]).type('torch.FloatTensor')
output = model(input_x).cpu().detach().numpy()
softmax = np.exp(output)
prob = softmax
predictions = np.argmax(list(prob), axis=1)
# +
## Show some of the results
# %matplotlib inline
ind_to_plot =np.arange(100,200)
fig_output = plt.figure();
plt.imshow(output[ind_to_plot,:].T);
plt.title('output activations');
fig_output.set_size_inches(18.5, 10.5)
fig_prob = plt.figure();
plt.imshow(prob[ind_to_plot,:].T);
plt.title('output probabilities');
fig_prob.set_size_inches(18.5, 10.5)
fig_prob_lines = plt.figure();
plt.plot(output[ind_to_plot, :][:,np.array([0,1,4,5])])
plt.title('output activations');
plt.xlabel('ROI num')
fig_prob_lines.set_size_inches(18.5, 4.5)
plt.legend(('0','1','4','5'))
fig_predHist = plt.figure();
plt.hist(predictions , 15);
plt.xlabel('label')
fig_predLines = plt.figure();
plt.plot(predictions);
plt.plot(scipy.signal.savgol_filter(predictions,19,3));
plt.xlabel('ROI num')
plt.ylabel('label')
fig_scatterGroups0 = plt.figure();
plt.scatter(output[ind_to_plot , np.array([0])] , output[ind_to_plot , np.array([1])])
plt.scatter(output[ind_to_plot , np.array([0])] , output[ind_to_plot , np.array([4])])
plt.scatter(output[ind_to_plot , np.array([0])] , output[ind_to_plot , np.array([5])])
fig_scatterGroups1 = plt.figure();
plt.scatter(output[ind_to_plot , np.array([1])] , output[ind_to_plot , np.array([4])])
plt.scatter(output[ind_to_plot , np.array([1])] , output[ind_to_plot , np.array([5])])
fig_scatterGroups4 = plt.figure();
plt.scatter(output[ind_to_plot , np.array([4])] , output[ind_to_plot , np.array([5])])
# +
## UNCOMMENT to Look at individual cells
## Press ENTER to advance. Press 9 to exit
class_to_test = 1
# %matplotlib qt
plt.figure()
plt.pause(0.5)
input_val = 0
iter_start = 0
while np.int8(input_val) < 7:
for ii in np.where(predictions ==class_to_test)[0]:
if ii >= iter_start:
plt.imshow(images[ii, : , :])
plt.title(ii)
plt.show(block=False)
plt.pause(0.05)
input_val = input()
if not input_val:
continue
if np.int8(input_val) >=7:
break
plt.pause(0.05)
# -
# ## Save data as a .mat file (for CellReg or whatever)
#
# # PATH_save_dir = 'G:/My Drive/Research/Sabatini Lab Stuff - working/Code/PYTHON/ROI_Classifiers/'
# PATH_absolute = pathlib.Path('.').absolute()
# PATH_save_dir = f'{PATH_absolute}/'
# PATH_save_name = 'outputs_CNN.mat'
#
# outputs_CNN = dict()
# outputs_CNN = {
# 'CNN_outputs_raw': output ,
# 'CNN_probabilities': prob ,
# 'CNN_predictions': predictions}
#
# scipy.io.savemat(f'{PATH_save_dir}{PATH_save_name}' , outputs_CNN)
labels_posthoc = np.zeros(num_ROI)
# +
# # %matplotlib qt
plt.figure()
# plt.imshow(spatial_footprints_crop[: , : , 0])
plt.pause(0.5)
input_val = 0
iter_start = 50
num_ROI = images.shape[0]
while np.int8(input_val) < 7:
for ii in np.where(labels_posthoc_filledIn ==5)[0]:
if ii >= iter_start:
# print(ii)
plt.imshow(images[ii, : , :])
plt.title(ii)
plt.show(block=False)
plt.pause(0.25)
input_val = input()
if not input_val:
continue
if np.int8(input_val) >=7:
break
if input_val:
labels_posthoc[ii] = np.int8(input_val)
plt.pause(0.25)
# print(f'Num labeled: {ii}')
# -
# # %matplotlib inline
plt.figure()
plt.plot(labels_posthoc)
plt.figure()
plt.hist(labels_posthoc[labels_posthoc > 0]);
import copy
labels_posthoc_indOfCorrected_bool = labels_posthoc > 0
# labels_posthoc_filledIn = copy.deepcopy(predictions)
labels_posthoc_filledIn[labels_posthoc_indOfCorrected_bool] = labels_posthoc[labels_posthoc_indOfCorrected_bool] -1
plt.figure()
plt.plot(labels_posthoc_filledIn)
plt.figure()
plt.hist(labels_posthoc_filledIn,20);
np.save('labels_posthoc_all_NEW22222.npy',labels_posthoc_filledIn)
# np.save('labels_posthoc.npy',labels_posthoc)
| ROI_Classifiers/ROI_Classifier_pth_files/ROI_Classifier_20201004/script_ROIClassifier_20201004.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting aggregate variables
#
# Pyam offers many great visualisation and analysis tools. In this notebook we highlight the `aggregate` and `stack_plot` methods of an `IamDataFrame`.
# +
import numpy as np
import pandas as pd
import pyam
# -
# %matplotlib inline
import matplotlib.pyplot as plt
# Here we provide some sample data for this tutorial. This data is for a single model-scenario-region combination but provides multiple subsectors of CO$_2$ emissions. The emissions in the subsectors are both positive and negative and so provide a good test of the flexibility of our aggregation and plotting routines.
df = pyam.IamDataFrame(pd.DataFrame([
['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|Oil', 'Mt CO2/yr', 2, 3.2, 2.0, 1.8],
['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|Gas', 'Mt CO2/yr', 1.3, 1.6, 1.0, 0.7],
['IMG', 'a_scen', 'World', 'Emissions|CO2|Energy|BECCS', 'Mt CO2/yr', 0.0, 0.4, -0.4, 0.3],
['IMG', 'a_scen', 'World', 'Emissions|CO2|Cars', 'Mt CO2/yr', 1.6, 3.8, 3.0, 2.5],
['IMG', 'a_scen', 'World', 'Emissions|CO2|Tar', 'Mt CO2/yr', 0.3, 0.35, 0.35, 0.33],
['IMG', 'a_scen', 'World', 'Emissions|CO2|Agg', 'Mt CO2/yr', 0.5, -0.1, -0.5, -0.7],
['IMG', 'a_scen', 'World', 'Emissions|CO2|LUC', 'Mt CO2/yr', -0.3, -0.6, -1.2, -1.0]
],
columns=['model', 'scenario', 'region', 'variable', 'unit', 2005, 2010, 2015, 2020],
))
df.head()
# Pyam's `stack_plot` method plots the stacks in the clearest way possible, even when some emissions are negative. The optional `total` keyword arguments also allows the user to include a total line on their plot.
df.stack_plot();
df.stack_plot(total=True);
# The appearance of the stackplot can be simply controlled via ``kwargs``. The appearance of the total line is controlled by passing a dictionary to the `total_kwargs` keyword argument.
df.stack_plot(alpha=0.5, total={"color": "grey", "ls": "--", "lw": 2.0});
# If the user wishes, they can firstly filter their data before plotting.
df.filter(variable="Emissions|CO2|Energy*").stack_plot(total=True);
# Using `aggregate`, it is possible to create arbitrary sums of sub-sectors before plotting.
pdf = df.copy()
afoluluc_vars = ["Emissions|CO2|LUC", "Emissions|CO2|Agg"]
fossil_vars = list(set(pdf.variables()) - set(afoluluc_vars))
pdf.aggregate(
"Emissions|CO2|AFOLULUC",
components=afoluluc_vars,
append=True
)
pdf.aggregate(
"Emissions|CO2|Fossil",
components=fossil_vars,
append=True
)
pdf.filter(variable=[
"Emissions|CO2|AFOLULUC",
"Emissions|CO2|Fossil"
]).stack_plot(total=True);
| doc/source/tutorials/aggregating_variables_and_plotting_with_negative_values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import os, glob, cv2, random
import seaborn as sns
import pandas as pd
from PIL import Image
import tensorflow as tf
from tensorflow import keras
import numpy as np
# # Preview
# +
path = "./dataset/"
# 학습 데이터 준비
filenames = os.listdir(path)
X=[]
y=[]
categories=[]
for filename in filenames:
image = Image.open(path + filename)
image = np.array(image)
X.append(image)
category=filename.split("_")[0]
if category =="close":
y.append([0])
else:
y.append([1])
# -
X = np.array(X)
y = np.array(y)
X.shape, y.shape
n_total = len(X)
X_result = np.empty((n_total, 26, 34,1))
for i, x in enumerate(X):
img = x.reshape((26, 34,1))
X_result[i] = img
# +
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(X_result, y, test_size=0.1)
print(x_train.shape, y_train.shape)
print(x_val.shape, y_val.shape)
np.save('dataset/x_train.npy', x_train)
np.save('dataset/y_train.npy', y_train)
np.save('dataset/x_val.npy', x_val)
np.save('dataset/y_val.npy', y_val)
# -
plt.subplot(2, 1, 1)
plt.title(str(y_train[0]))
plt.imshow(x_train[0].reshape((26, 34)), cmap='gray')
plt.subplot(2, 1, 2)
plt.title(str(y_val[3]))
plt.imshow(x_val[3].reshape((26, 34)), cmap='gray')
sns.distplot(y_train, kde=False)
sns.distplot(y_val, kde=False)
import datetime
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Activation, Conv2D, Flatten, Dense, MaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
plt.style.use('dark_background')
# +
x_train2 = np.load('dataset/x_train.npy').astype(np.float32)
y_train2 = np.load('dataset/y_train.npy').astype(np.float32)
x_val2 = np.load('dataset/x_val.npy').astype(np.float32)
y_val2 = np.load('dataset/y_val.npy').astype(np.float32)
print(x_train2.shape, y_train2.shape)
print(x_val2.shape, y_val2.shape)
# +
x_train1 = np.load('make_model/dataset/x_train.npy').astype(np.float32)
y_train1 = np.load('make_model/dataset/y_train.npy').astype(np.float32)
x_val1 = np.load('make_model/dataset/x_val.npy').astype(np.float32)
y_val1 = np.load('make_model/dataset/y_val.npy').astype(np.float32)
print(x_train1.shape, y_train1.shape)
print(x_val1.shape, y_val1.shape)
# +
x_train = np.concatenate((x_train1,x_train2),axis=0)
y_train = np.concatenate((y_train1,y_train2),axis=0)
x_val = np.concatenate((x_val1,x_val2),axis=0)
y_val = np.concatenate((y_val1,y_val2),axis=0)
print(x_train.shape, y_train.shape)
print(x_val.shape, y_val.shape)
# -
plt.subplot(2, 1, 1)
plt.title(str(y_train[0]))
plt.imshow(x_train[0].reshape((26, 34)), cmap='gray')
plt.subplot(2, 1, 2)
plt.title(str(y_val[4]))
plt.imshow(x_val[4].reshape((26, 34)), cmap='gray')
# +
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2
)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(
x=x_train, y=y_train,
batch_size=32,
shuffle=True
)
val_generator = val_datagen.flow(
x=x_val, y=y_val,
batch_size=32,
shuffle=False
)
# +
inputs = Input(shape=(26, 34, 1))
net = Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(inputs)
net = MaxPooling2D(pool_size=2)(net)
net = Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(net)
net = MaxPooling2D(pool_size=2)(net)
net = Conv2D(128, kernel_size=3, strides=1, padding='same', activation='relu')(net)
net = MaxPooling2D(pool_size=2)(net)
net = Flatten()(net)
net = Dense(512)(net)
net = Activation('relu')(net)
net = Dense(1)(net)
outputs = Activation('sigmoid')(net)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.summary()
# +
start_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
model.fit_generator(
train_generator, epochs=50, validation_data=val_generator,
callbacks=[
ModelCheckpoint('./gaze_tracking/trained_models/%s.h5' % (start_time), monitor='val_acc', save_best_only=True, mode='max', verbose=1),
ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=10, verbose=1, mode='auto', min_lr=1e-05)
]
)
# +
from sklearn.metrics import accuracy_score, confusion_matrix
import seaborn as sns
model = load_model('./gaze_tracking/trained_models/%s.h5' % (start_time))
y_pred = model.predict(x_val/255.)
y_pred_logical = (y_pred > 0.5).astype(np.int)
print ('test acc: %s' % accuracy_score(y_val, y_pred_logical))
cm = confusion_matrix(y_val, y_pred_logical)
sns.heatmap(cm, annot=True)
# -
ax = sns.distplot(y_pred, kde=False)
| train_custom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# gmail_spam_detection:
#
# - our goal for this competition is to build a spam filter by predicting whether an email message is spam (junk email) or ham (good email). This is a classic data set derived from a *bag-of-words* model applied 4601 email messages collected at Hewlett-Packard Labs. The features consist of the relative frequencies of 57 of the most commonly occurring words and punctuation marks in all the training the email messages. For this problem, not all errors are equal; misclassifying spam is not as bad as misclassifying ham!
#
#
# Importing the dataset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
data=pd.read_csv("spam_ham_dataset.csv")
data.head()
data.tail()
data.shape
data.columns
data.info()
data.describe()
Vis = data['label_num'].value_counts()
Vis.plot(kind="bar")
plt.xticks(np.arange(2), ('Non spam', 'spam'),rotation=0)
plt.show()
data.loc[data['label'] == 'spam', 'Category',] = 0
data.loc[data['label'] == 'ham', 'Category',] = 1
X = data['text']
Y = data['label']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=3)
feature_extraction = TfidfVectorizer(min_df = 1, stop_words='english', lowercase='True')
X_train_transformed = feature_extraction.fit_transform(X_train)
X_test_transformed = feature_extraction.transform(X_test)
model = LogisticRegression()
model.fit(X_train_transformed, Y_train)
prediction_on_training_data = model.predict(X_train_transformed)
accuracy_on_training_data = accuracy_score(Y_train, prediction_on_training_data)
print('Accuracy on training data : ', accuracy_on_training_data *100)
knn_cv = KNeighborsClassifier(n_neighbors=3)
knn_cv.fit(X_train_transformed, Y_train)
prediction_on_training_data = knn_cv.predict(X_train_transformed)
accuracy_on_training_data = accuracy_score(Y_train, prediction_on_training_data)
print('Accuracy on training data : ', accuracy_on_training_data *100)
rfc = RandomForestClassifier(random_state = 42)
cv_scores = cross_val_score(rfc, X, Y, cv=5)
rfc.fit(X_train_transformed, Y_train)
prediction_on_training_data =rfc.predict(X_train_transformed)
accuracy_on_training_data = accuracy_score(Y_train, prediction_on_training_data)
print('Accuracy on training data : ', accuracy_on_training_data *100)
# # Conclusion:
# Hence, the accuracy of random forest is more, this model is used to build the model.
| Datascience_With_Python/Natural Language Processing/Videos/Gmail Spam Detection/gmail_spam_detection.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# Diferente de um extrator de caracteristicas, nesse arquivo apenas focamos em entender se apenas contarmos com apenas alguns lags anteriores é suficiente para realizar uma classifciação de qualidade.
#
# Basicamente, apenas pegamos os últimos momentos capturados pelo percurso e tranformamos esses momentos finais em atributos de uma instância de um possível modelo. Nessa simples abordagem é muito utilizada em regressão para previsão dos próximos passos ou valores.
#
# Com essa abordagem podemos simplificar e deixar por conta dos classificadores extrair ou linearmente separar as classes binárias, como é o nosso caso.
#
# Abaixo apenas consideramos os últimos **2 lags** e assim fazer com que o classificador, SVM, separe cada instância.
# +
library(caret)
library(kernlab)
library(pROC)
groups <- read.csv(file="./MovementAAL/groups/MovementAAL_DatasetGroup.csv",head=TRUE,sep=",")
targetAll <- read.csv(file="./MovementAAL/dataset/MovementAAL_target.csv",head=TRUE,sep=",")
# +
#Group 1
allDataGroup1<-list()
allDataGroup1Target<-list()
groups1 = groups[groups$dataset_ID==1, ]
index<-1
for (id in groups1$X.sequence_ID){
caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="")
allDataGroup1[[index]]<-read.csv(file=caminho,head=TRUE,sep=",")
allDataGroup1Target[index]<-targetAll[[2]][id]
index<-index+1
}
wtData <- NULL
minStepsBack = 1
for (i in 1:length(allDataGroup1)){
aa<- t(unlist(allDataGroup1[[i]][(nrow(allDataGroup1[[i]])-minStepsBack):nrow(allDataGroup1[[i]]),]))
wtData <- rbind(wtData, aa)
}
wtData <- as.data.frame(wtData)
data = unlist(allDataGroup1Target)
target = factor(data,labels=c("No","Yes"))
frameDataFinal <- data.frame(cbind(target, wtData))
head(frameDataFinal)
##use only lagged data
# -
# #### Média e Desvio padrão respectivamente.
# ##### Group 1, com Cross Validation tipo 10-fold
# +
inTraining <- createDataPartition(frameDataFinal$target, p = .7, list = TRUE,times=10)
allAccuracyGroup1 <- c()
for( i in 1:length(inTraining)){
training <- frameDataFinal[ inTraining[[i]],]
testing <- frameDataFinal[-inTraining[[i]],]
fitControl <- trainControl(method = "none", classProbs = TRUE)
svmLinearFit <- train(target ~ ., data = training,
method = "svmLinear",
trControl = fitControl,
family=binomial)
preds<- predict(svmLinearFit, newdata = testing)
matrix <- confusionMatrix(preds,frameDataFinal$target[-inTraining[[i]]])
allAccuracyGroup1 <- c(allAccuracyGroup1,matrix[3]$overall[[1]])
}
mean(allAccuracyGroup1)
sd(allAccuracyGroup1)
# +
#Group 2
allDataGroup2<-list()
allDataGroup2Target<-list()
groups2 = groups[groups$dataset_ID==2, ]
index<-1
for (id in groups2$X.sequence_ID){
caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="")
allDataGroup2[[index]]<-read.csv(file=caminho,head=TRUE,sep=",")
allDataGroup2Target[index]<-targetAll[[2]][id]
index<-index+1
}
wtData <- NULL
minStepsBack = 1
for (i in 1:length(allDataGroup2)){
aa<- t(unlist(allDataGroup2[[i]][(nrow(allDataGroup2[[i]])-minStepsBack):nrow(allDataGroup2[[i]]),]))
wtData <- rbind(wtData, aa)
}
wtData <- as.data.frame(wtData)
data = unlist(allDataGroup2Target)
target = factor(data,labels=c("No","Yes"))
frameDataFinal <- data.frame(cbind(target, wtData))
head(frameDataFinal)
##use only lagged data
# -
# #### Média e Desvio padrão respectivamente.
# ##### Group 2, com Cross Validation tipo 10-fold
# +
inTraining <- createDataPartition(frameDataFinal$target, p = .7, list = TRUE,times=10)
allAccuracyGroup2 <- c()
for( i in 1:length(inTraining)){
training <- frameDataFinal[ inTraining[[i]],]
testing <- frameDataFinal[-inTraining[[i]],]
fitControl <- trainControl(method = "none", classProbs = TRUE)
svmLinearFit <- train(target ~ ., data = training,
method = "svmLinear",
trControl = fitControl,
family=binomial)
preds<- predict(svmLinearFit, newdata = testing)
matrix <- confusionMatrix(preds,frameDataFinal$target[-inTraining[[i]]])
allAccuracyGroup2 <- c(allAccuracyGroup2,matrix[3]$overall[[1]])
}
mean(allAccuracyGroup2)
sd(allAccuracyGroup2)
# +
#Group 3
allDataGroup3<-list()
allDataGroup3Target<-list()
groups3 = groups[groups$dataset_ID==3, ]
index<-1
for (id in groups3$X.sequence_ID){
caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="")
allDataGroup3[[index]]<-read.csv(file=caminho,head=TRUE,sep=",")
allDataGroup3Target[index]<-targetAll[[2]][id]
index<-index+1
}
wtData <- NULL
minStepsBack = 1
for (i in 1:length(allDataGroup3)){
aa<- t(unlist(allDataGroup3[[i]][(nrow(allDataGroup3[[i]])-minStepsBack):nrow(allDataGroup3[[i]]),]))
wtData <- rbind(wtData, aa)
}
wtData <- as.data.frame(wtData)
data = unlist(allDataGroup3Target)
target = factor(data,labels=c("No","Yes"))
frameDataFinal <- data.frame(cbind(target, wtData))
head(frameDataFinal)
##use only lagged data
# -
# #### Média e Desvio padrão respectivamente.
# ##### Group 3, com Cross Validation tipo 10-fold
# +
inTraining <- createDataPartition(frameDataFinal$target, p = .7, list = TRUE,times=10)
allAccuracyGroup3 <- c()
for( i in 1:length(inTraining)){
training <- frameDataFinal[ inTraining[[i]],]
testing <- frameDataFinal[-inTraining[[i]],]
fitControl <- trainControl(method = "none", classProbs = TRUE)
svmLinearFit <- train(target ~ ., data = training,
method = "svmLinear",
trControl = fitControl,
family=binomial)
preds<- predict(svmLinearFit, newdata = testing)
matrix <- confusionMatrix(preds,frameDataFinal$target[-inTraining[[i]]])
allAccuracyGroup3 <- c(allAccuracyGroup3,matrix[3]$overall[[1]])
}
mean(allAccuracyGroup3)
sd(allAccuracyGroup3)
# +
#All Groups
allData<-list()
allDataTarget<-list()
targetAll <- read.csv(file="./MovementAAL/dataset/MovementAAL_target.csv",head=TRUE,sep=",")
index<-1
for (id in targetAll$X.sequence_ID){
caminho <-paste("./MovementAAL/dataset/MovementAAL_RSS_",id,".csv",sep="")
allData[[index]]<-read.csv(file=caminho,head=TRUE,sep=",")
allDataTarget[index]<-targetAll[[2]][id]
index<-index+1
}
wtData <- NULL
minStepsBack = 1
for (i in 1:length(allData)){
aa<- t(unlist(allData[[i]][(nrow(allData[[i]])-minStepsBack):nrow(allData[[i]]),]))
wtData <- rbind(wtData, aa)
}
wtData <- as.data.frame(wtData)
data = unlist(allDataTarget)
target = factor(data,labels=c("No","Yes"))
frameDataFinal <- data.frame(cbind(target, wtData))
head(frameDataFinal)
# -
# #### Média e Desvio padrão respectivamente.
# ##### Todos os Groups em uma base apenas, com Cross Validation tipo 10-fold
# +
inTraining <- createDataPartition(frameDataFinal$target, p = .7, list = TRUE,times=10)
allAccuracy <- c()
for( i in 1:length(inTraining)){
training <- frameDataFinal[ inTraining[[i]],]
testing <- frameDataFinal[-inTraining[[i]],]
fitControl <- trainControl(method = "none", classProbs = TRUE)
svmLinearFit <- train(target ~ ., data = training,
method = "svmLinear",
trControl = fitControl,
family=binomial)
preds<- predict(svmLinearFit, newdata = testing)
matrix <- confusionMatrix(preds,frameDataFinal$target[-inTraining[[i]]])
allAccuracy <- c(allAccuracy,matrix[3]$overall[[1]])
}
mean(allAccuracy)
sd(allAccuracy)
# -
# #### Matrix de confusão
# #### Todos os Groups em uma base apenas
# +
#All groups datasets Confusion Matrix
inTraining <- createDataPartition(frameDataFinal$target, p = .7, list = TRUE,times=1)
training <- frameDataFinal[ inTraining[[1]],]
testing <- frameDataFinal[-inTraining[[1]],]
fitControl <- trainControl(method = "none", classProbs = TRUE)
svmLinearFit <- train(target ~ ., data = training,
method = "svmLinear",
trControl = fitControl,
family=binomial)
preds<- predict(svmLinearFit, newdata = testing)
matrix <- confusionMatrix(preds,frameDataFinal$target[-inTraining[[1]]])
matrix
# -
# #### Curva ROC e AUC
# #### Todos os Groups em uma base apenas
#ROC CURVE AND AUC
predsProb<- predict(svmLinearFit, newdata = testing,type="prob")
outcome<- predsProb[,2]
classes <- frameDataFinal$target[-inTraining[[1]]]
rocobj <- roc(classes, outcome,levels=c("No","Yes"))
plot(rocobj)
| No Extractor Only Lags.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <figure>
# <IMG SRC="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/Fachhochschule_Südwestfalen_20xx_logo.svg/320px-Fachhochschule_Südwestfalen_20xx_logo.svg.png" WIDTH=250 ALIGN="right">
# </figure>
#
# # Machine Learning
# ### Sommersemester 2021
# Prof. Dr. <NAME>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from sklearn.datasets import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import *
from ipywidgets.widgets import IntSlider
from ipywidgets import interact
# # Klassifikation mit Support Vector Machines
# Bisher haben wir in überwachten ML-Agorithmen Daten verwendet, um zu lernen, zu welchen Klassen die Daten gehören:
# - Die **logistische Regression** lernt Abhängigkeiten zwischen Eingabedaten und Zielen
# - **Entscheidungsbäume** verwenden eine Hirarchiestruktur, um die Auswirkungen verschiedener Attribute auf das Ziel abzuwägen
#
# In beiden Verfahren werden Entscheidungsgrenzen gelernt, mit denen sich die Punkte des Trainingsdatensatzes möglichst gut separieren lassen.
# Der konkrete Verlauf der Entscheidungsgrenzen wird dabei nicht nicht berücksichtig.
# D.h., wenn es mehrere Entscheidungsgrenzen gibt, die die Punkte bestmöglich klassifizieren, liefern die o.g. Verfahren nicht unbedingt die beste Entscheidungsgrenze.
#
# Aber was bedeutet überhaupt *beste* Entscheidungsgrenze?
# Man kann sich leicht vorstellen, dass eine Entscheidungsgrenze umso besser ist, je mehr sie durch die *Mitte* der Raumes verläuft, der zwischen den *Punktwolken* der einzelnen Klassen liegt.
# Wenn die Entscheidungsgrenze so eingezogen wird, ist der Abstand zwischen den *äußeren* Punkten der Trainingsdaten und der Entscheidungsgrenze maximal groß.
# Mathematisch gesprochen erzeugt ein Klassifikator, der nach diesem Prinzip funktioniert, Hyperebenen mit größtmöglicher geometrischer Marge.
# Man spricht daher im Englischen auch von einem *Maximal Margin Classifier*.
#
# Die *Support Vector Machine* (SVM) ist eine Form der *Maximal Margin Classifier*.
# Mit SVMs lassen sich sowohl lineare als auch nichtlineare Klassifikationsaufgaben lösen.
# Ebenfalls können SVMs zur Regression oder Ausreißer-Erkennung eingesetzt werden.
# SVMs zählen zur Klasse der Instanz-basierten Lernverfahren und eignen sich besonders zur Klassifikation komplexer Datensätze (viele Merkmale) mit kleiner oder mittlerer Größe (moderate Anzahl von Datenpunkten).
# Im Gegensatz zu statistischen Ansätzen (wie etwa der der logistischen Regression) basiert die SVM auf den geometrischen Eigenschaften der Daten.
# Dabei ist die Bedeutung der Variablen bei der SVM weniger relevant, weswegen dieses Modell gut mit unstrukturierten und halbstrukturierten Daten wie Texten und Bildern funktioniert.
#
# +
n_classes = 2
n_data = 50
# generating two-class dataset
X, y = make_blobs(n_samples=n_data, centers=n_classes, random_state=1, center_box=(0, 10))
X = MinMaxScaler().fit_transform(X)
y_col = np.array(['red' if i == 0 else 'lime' for i in y])
# plotting the data according to class
for i, s, m in [(0, 100, 'o'), (1, 80, 's')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black')
plt.axis('off')
plt.show()
# -
# Die beiden Datencluster sind eindeutig linear trennbar.
# Dies ist nicht immer der Fall, für den Moment gehen wir aber dvon aus, dass dies für unseren Datensatz gilt.
# Wo können wir die Grenze ziehen?
# In der folgenden Abbildung ziehen wir mehrere möglich Hyperebenen zwischen die Punktewolken ein.
# +
xline = np.linspace(0,1)
# plotting the data according to class
for i, s, m, label in [(0, 100, 'o', 'Negative'), (1, 80, 's', 'Positive')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black', label=label)
# plotting potential hyperparameters
for m, b in [(-1, 1), (0, 0.45)]:
plt.plot(xline, m * xline + b)
plt.plot(xline*0+0.55, xline)
plt.axis('off')
plt.legend()
plt.show()
# -
# Jede der Hyperebenen separiert die *Positiven* und *Negativen* Datenpunkte optimal.
# Aber nicht alle Hyperebenen sind **optimal generalisierend**.
# Bei der grünen und orongenen Hyperebene ibt es *Bereiche*, die zwar nach an den Punkten einer Klasse liegen, aber dem Bereich der anderen Klasse zugeordnet sind.
#
# Die blaue Hyperebene maximiert auch die Bereiche *um* die Punktwolken, sie schafft also die breiteste Marge (eng. *Margin*) zwischen den Punkten *am Rand* und der trennenden Hyperebene.
# Daher ist sie objektiv besser als die beiden anderen eingezeichneten Möglichkeiten.
# +
xline = np.linspace(0, 1)
yline = -1.5*xline+1.3
delta = 0.35
# plotting the data according to class
for i, s, m, label in [(0, 100, 'o', 'Negative'), (1, 80, 's', 'Positive')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black', label=label)
# plotting decision boundry
for s, m in [(1, 'k--'),(0, 'k'),(-1, 'k--')]:
plt.plot(xline, yline +delta*s, 'k--', alpha = 0.8)
# Annotationen
plt.text(0.26, 0.6, s='Trennende\nHyperebene\n',fontsize= 13, rotation = -45)
plt.annotate(s='', xy=(xline[30], yline[30]), xytext=(xline[36],yline[36]+delta), arrowprops=dict(arrowstyle='<->'))
plt.text(0.7, 0.3, s='Marge',fontsize= 13, rotation = -45)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.legend()
plt.show()
# -
# **SVM Klassifizierer** finden Hyperebenen mit maximaler Marge, sind also als *Maximal Margin Classifier* einzustufen.
# Um eine *Support Vector Machine* mit `sklearn` zu erstellen, benötigen wir ein Objekt vom Typ `SVM`. Dieses Modell trainieren wir auf den entsprechenden Datensatz mit der `fit`-Funktion:
model = SVC(kernel='linear', C=1e5).fit(X, y)
# Nun können wir die gefunden Hyperebenen darstellen:
# +
# plotting data
for i, s, m, label in [(0, 100, 'o', 'Negative'), (1, 80, 's', 'Positive')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black', label=label)
# support vectors
plt.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=400, edgecolors='black', facecolors='none')
# create grid to evaluate model
xx = np.linspace(0, 1, 30)
yy = np.linspace(0, 1, 30)
yy, xx = np.meshgrid(yy, xx)
xy = np.vstack([xx.ravel(), yy.ravel()]).T
P = model.decision_function(xy).reshape(xx.shape)
# plotting decision boundry
plt.contour(xx, yy, P, colors='k', levels=[-1, 0, 1],
alpha=0.8, linestyles=['--', '-', '--'])
plt.axis('off')
plt.legend()
plt.show()
# -
# Wo die Hyperebene mit der maximalen Marge liegt, wird von den Punkten am äußeren *Rand* der Punktwolken bestimmt (in der Abbildung zu erkennen durch die schwarze Umrandung).
# Diese Punkte nennt man auch **Support Vektoren**.
# Wie sich leicht erkennen lässt, haben diese Punkte eine herausragende Bedeutung für SVMs.
# Im Folgenden wollen daher betrachten, wie man diese Punkte identifizieren kann.
# ## Support Vector Machines
# Wenn wir von einem linearen Modell ausgehen, lässt sich unsere Hyperebene im $n$-dimensionalen Raum mit der Gleichung
# $$\langle\textbf{w},\textbf{x}\rangle+b=0$$
# beschreiben.
#
# Dabei ist $\langle \textbf{w}, \textbf{x} \rangle$ das Standardskalarprodukt des Paramter-Vektors $\mathbf{w}$ und des Eingabe-Vektors $\mathbf{x}$. Wenn wir davon ausgehen, dass $\textbf{w}$ und $\textbf{x}$ Spaltenvektoren sind, erhalten wir das Skalarprodukt, wenn wir $\textbf{w}$ durch Transponieren zum Zeilenvektor machen und dann die generelle Matrizenmultiplkation anwenden. Die obige Gleichung ist also identisch mit der Form $ \textbf{w}^T\textbf{x}+b$
#
#
#
# Eine solche Hypereben trent den $n$-dimensionalen Raum in zwei Halbräume.
# Auf welcher Seite, also in welchem Halbraum ein Punkt mit dem Ortsvektor $\textbf{x}_i$ liegt, erkennt man durch einsetzen in die Gleichung. Aller Punkte $\textbf{x}_i$, mit
# - $\textbf{w}^T\textbf{x}_i+b<0$ liegen in der ersten Halbebene
# - $\textbf{w}^T\textbf{x}_i+b=0$ liegen auf der Ebene
# - $\textbf{w}^T\textbf{x}_i+b>0$ liegen in der zweiten Halbebene
#
# Wenn wir also die Vorzeicheinfunktion verwenden erhalten wir auf diesem Weg einen Klassifizierer, der die Punkte im Raum in die Klassen $-1$ (Negative) und $1$ (Positive) aufteilt:
# $$ \hat{y}=sign(\textbf{w}^T\textbf{x}_i+b)$$
#
#
# +
# Daten
for i, s, m, label in [(0, 100, 'o', 'Negative'), (1, 80, 's', 'Positive')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black', label=label, alpha=0.2)
# Entscheidungsgrenze
plt.contour(xx, yy, P, colors='k', levels= 0,
alpha=0.8, linestyles='-')
# Normalenvektor w und Bias b
for xy, xyt, st, lxy, l in [((0,0), (0.42,0.68), '<->', (0.15, 0.4), '$b$'),
((0.52, 0.52), (0.57,0.6), '<|-', (0.5, 0.6), '$\overrightarrow w$'),
((0.52, 0.52), X[28], '<|-', (0.62, 0.48), '$\overrightarrow x_{neg}$')]:
plt.annotate(s='', xy=xy, xytext=xyt, arrowprops=dict(arrowstyle=st))
plt.text(lxy[0], lxy[1], s=l,fontsize= 14)
plt.axis('on')
plt.xlim([0, 1.1])
plt.ylim([0, 1.1])
plt.legend()
plt.show()
# -
# Um einen Abstand (*Margin*) zur Entscheidungsgrenze festzulegen, fordern wir nun zusätzlich, dass die positiven (Klasse $1$) unf negativen (Klasse $-1$) Punkte einen Abstand größer 1 zur Entscheidungsgrenze haben sollen:
# $$
# \begin{array}{ll}
# \textbf{w}^T\textbf{x}_i + b \geq +1, & y_i = +1 (Positive)\\
# \textbf{w}^T\textbf{x}_i + b \leq -1, & y_i = -1 (Negative)
# \end{array}
# $$
#
#
# Mit dieser Forderung erhält man nun 2 weitere Hyperebenen $H_1$ und $H_2$, die parallel zur Entscheidungsgrenze stehen:
#
# $$
# \begin{array}{lll}
# H_1 = &\textbf{w}^T\textbf{x}_i + b = +1, & \text{Hyperebene in der Klasse mit } y_i = +1 (Positive)\\
# H_2 = &\textbf{w}^T\textbf{x}_i + b = -1, & \text{Hyperebene in der Klasse mit } y_i = -1 (Negative)
# \end{array}
# $$
# +
# plotting data
for i, s, m, label in [(0, 100, 'o', 'Negative'), (1, 80, 's', 'Positive')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black', label=label, alpha=0.2)
# plotting decision boundry
plt.contour(xx, yy, P, colors='k', levels= [-1 ,0, 1],
alpha=0.8, linestyles=['--', '-', '--'])
# Plotting wieght and bias
for xy, xyt, st, lxy, l in [((0,0), (0.42,0.68), '<->', (0.15, 0.4), '$b$'),
((0.52, 0.52), (0.57,0.6), '<|-', (0.5, 0.6), '$\overrightarrow w$'),
((0.52, 0.52), X[28], '<|-', (0.62, 0.48), '$\overrightarrow x_{neg}$')]:
plt.annotate(s='', xy=xy, xytext=xyt, arrowprops=dict(arrowstyle=st))
plt.text(lxy[0], lxy[1], s=l,fontsize= 14)
plt.axis('on')
plt.xlim([0, 1.1])
plt.ylim([0, 1.1])
plt.legend()
plt.show()
# -
# Dieser Zusätzliche Abstand zur Entscheidungsgrenze impliziert, dass kein Punkt innerhalb der Marge zwischen $H_1$ und $H_2$ liegt.
# Die meisten der Datenpunkte liegen abseits dieser Hyperebenen innerhalb der Halbräume.
# Die Datenpunkt, die genau auf den Hyperebenen $H_1$ und $H_2$ liegen, nennt man **Support Vektoren**.
# Der Abstand $\delta$ der Hyperplanes $H_1$ und $H_2$ von der Entscheidungsgrenze kann man durch den Abstand der Support Vektoren ausdrücken:
# $$2\delta=(\textbf{x}_{pos}-\textbf{x}_{neg})\cdot\frac{\textbf{w}}{|\textbf{w}|}$$
# $$2\delta=\frac{\textbf{w}^T \textbf{x}_{pos} - \textbf{w}^T \textbf{x}_{neg}}{|\textbf{w}|}$$
# $$2\delta=\frac{(b+1)-(b-1)}{|\textbf{w}|}$$
# $$2\delta=\frac{2}{|\textbf{w}|} \implies \delta=\frac{1}{|\textbf{w}|}$$
# +
# plotting data
for i, s, m, label in [(0, 100, 'o', 'Negative'), (1, 80, 's', 'Positive')]:
plt.scatter(X[y==i,0], X[y==i,1], c=y_col[y==i], s=s, marker = m, edgecolors='black', label=label, alpha=0.2)
for i, m, c in [(0, 'o', 'red'),(1, 's', 'lime')]:
plt.scatter(model.support_vectors_[i, 0],
model.support_vectors_[i, 1],marker = m,
s=200, edgecolors='black', facecolors=c)
# plotting decision boundry
plt.contour(xx, yy, P, colors='k', levels= [-1 ,0, 1],
alpha=0.8, linestyles=['--', '-', '--'])
# annotation
for xy, i, lxy in [((0.47,0.61), 1, (0.35, 0.55)),
((0.55,0.46), 0, (0.62, 0.5))]:
plt.annotate(s='', xy=xy, xytext=model.support_vectors_[i], arrowprops=dict(arrowstyle='<|-|>'))
plt.text(lxy[0], lxy[1], s='$\delta$',fontsize= 14, rotation=45)
plt.xlim(0.1,0.9)
plt.ylim(0.1,0.9)
plt.axis('off')
plt.legend()
plt.show()
# -
# Um $\delta$ zu maximieren, muss $|\textbf{w}|$ minimiert werden.
# Es ergibt sich also das Minimierungsproblem:
# $$
# \text{min: }J(\textbf{w},b) = \frac{1}{2}|\textbf{w}|\\\text{für alle } \textbf{x}_i \text{ mit } y_i(\textbf{w}^T\textbf{x}_i + b) \geq 1
# $$
# **TBC**
# ## Quellen:
# [1] M. Berthold, <NAME>, <NAME> and <NAME>, *Guide to Intelligent Data Analysis*, Springer, 2010.\
# [2] <NAME>, [*Python Data Science Handbook*](https://jakevdp.github.io/PythonDataScienceHandbook), O'Reilly, 2016.\
# [3] <NAME>. *6.034 Artificial Intelligence*. MIT OpenCourseWare, 2010, https://ocw.mit.edu. License: Creative Commons BY-NC-SA.
#
# ### Web-Quellen:
# 1. [Understanding the mathematics behind Support Vector Machines](https://shuzhanfan.github.io/2018/05/understanding-mathematics-behind-support-vector-machines/)
# 2. [Support vector machine](https://en.wikipedia.org/wiki/Support_vector_machine)
| u5/12_SVMs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="https://s3.amazonaws.com/edu-static.mongodb.com/lessons/M220/notebook_assets/screen_align.png" style="margin: 0 auto;">
#
# + [markdown] slideshow={"slide_type": "slide"}
# <h1 style="text-align: center; font-size=58px;">MongoClient</h1>
# + slideshow={"slide_type": "subslide"}
from pymongo import MongoClient
uri = "mongodb+srv://m220student2:m220password@mflix-xjwqd.mongodb.net/test"
# + [markdown] slideshow={"slide_type": "notes"}
# The MongoClient constructor accepts many different arguments to configure how the driver connects to MongoDB and how many operations will be performed. We'll look at the most basic configuration first, which is passing the SRV string of our Atlas cluster to MongoClient.
# + slideshow={"slide_type": "subslide"}
client = MongoClient(uri)
# + slideshow={"slide_type": "fragment"}
client.stats
# + [markdown] slideshow={"slide_type": "notes"}
# Note that because we're using an Atlas SRV string, we got an SSL connection for free! It also defaults the **authSource** to the **admin** database.
#
# Now that we've connected to our **mongod**, we can create a database handle. Let's look at the available databases.
# + slideshow={"slide_type": "subslide"}
client.list_database_names()
# + [markdown] slideshow={"slide_type": "notes"}
# Let's use the **mflix** database. One useful property of a MongoClient object is we can use property accessors
# + slideshow={"slide_type": "subslide"}
mflix = client.mflix
mflix.list_collection_names()
# + [markdown] slideshow={"slide_type": "notes"}
# or we can use dictionary accessors
# + slideshow={"slide_type": "subslide"}
mflix = client['mflix']
mflix.list_collection_names()
# + [markdown] slideshow={"slide_type": "notes"}
# Now that we have a database object and have listed available collections, let's create a collection object. As with the database object, we can use either property or dictionary accessors.
# + slideshow={"slide_type": "subslide"}
movies = mflix.movies
# + [markdown] slideshow={"slide_type": "notes"}
# And let's perform a query on our movies collection. We'll just get the count of documents in the collection.
# + slideshow={"slide_type": "subslide"}
movies.count_documents({})
# + [markdown] slideshow={"slide_type": "notes"}
# The MongoClient constructor also accepts many optional keyword parameters. We can set the maximum connection pool, default read and write concerns, whether to retry writes, configuring SSL, authentication, and much more.
#
# A full list and how to use MongoClient for more advanced use cases is available [here](http://api.mongodb.com/python/current/api/pymongo/mongo_client.html)
# + [markdown] slideshow={"slide_type": "subslide"}
# Here is an example setting the **connectTimeoutMS** to 200 milliseconds, how long the driver will allow attempt to connect before erroring, and setting **retryWrites** to True, signaling to the driver to retry a write in the event of a network error.
# + slideshow={"slide_type": "fragment"}
client = MongoClient(uri, connectTimeoutMS=200, retryWrites=True)
# + slideshow={"slide_type": "fragment"}
client.stats
# + [markdown] slideshow={"slide_type": "slide"}
# ## Summary
#
# * MongoClient accepts many optional keyword arguments to fine-tune your connection.
# * After instantiating the client, databases handles can be created via property or dictionary accessors on the client object.
# * Collections handles are referenced from the database object.
# * Collection specific operations like querying or updating documents are performed on the collection object.
| mflix-python/notebooks/MongoClient.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <p>"Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) <NAME>, <NAME> 2017. Thanks to NSF for support via CAREER award #1149784."
# <a href="https://twitter.com/LorenaABarba">@LorenaABarba</a></p>
#
# <h1>12 steps to Navier–Stokes</h1><hr>
# For a moment, recall the Navier–Stokes equations for an incompressible fluid, where $\vec{\textbf{v}}$ represents the velocity field:
#
# \begin{eqnarray*}
# \nabla \cdot\vec{\textbf{v}} = 0 \\
# \frac{\partial \vec{\textbf{v}}}{\partial t}+(\vec{\textbf{v}}\cdot\nabla)\vec{\textbf{v}} = -\frac{1}{\rho}\nabla p + \nu \nabla^2\vec{\textbf{v}}
# \end{eqnarray*}
#
# <p>The first equation represents mass conservation at constant density. The second equation is the conservation of momentum. But a problem appears: the continuity equation for incompressble flow does not have a dominant variable and there is no obvious way to couple the velocity and the pressure. In the case of compressible flow, in contrast, mass continuity would provide an evolution equation for the density $\rho$, which is coupled with an equation of state relating $\rho$ and $p$.</p>
# <p>In incompressible flow, the continuity equation $\nabla \cdot\vec{\textbf{v}}=0$ provides a kinematic constraint that requires the pressure field to evolve so that the rate of expansion $\nabla \cdot\vec{\textbf{v}}$ should vanish everywhere. A way out of this difficulty is to construct a pressure field that guarantees continuity is satisfied; such a relation can be obtained by taking the divergence of the momentum equation. In that process, a Poisson equation for the pressure shows up!</p>
#
# <h2>Step 10: 2D Poisson Equation</h2>
# <p><a href="https://en.wikipedia.org/wiki/Poisson's_equation">Poisson's equation</a> is obtained from adding a source term to the right-hand-side of Laplace's equation:</p>
#
# $$\frac{\partial ^2 p}{\partial x^2} + \frac{\partial ^2 p}{\partial y^2} = b$$
# So, unlike the <a href="https://en.wikipedia.org/wiki/Poisson's_equation">Laplace equation</a>, there is some finite value inside the field (i.e. b) that affects the solution. Poisson's equation acts to "relax" the initial sources in the field.
#
# In discretized form, this looks almost the same as Step 9, except for the source term:
#
# $$\frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\Delta x^2}+\frac{p_{i,j+1}^{n}-2 p_{i,j}^{n}+p_{i,j-1}^{n}}{\Delta y^2}=b_{i,j}^{n}$$
#
# <p>As before, we rearrange this so that we obtain an equation for $p$ at point $i,j$. Thus, we obtain:</p>
# $$p_{i,j}^{n}=\frac{(p_{i+1,j}^{n}+p_{i-1,j}^{n})\Delta y^2+(p_{i,j+1}^{n}+p_{i,j-1}^{n})\Delta x^2-b_{i,j}^{n}\Delta x^2\Delta y^2}{2(\Delta x^2+\Delta y^2)}$$
#
# <p>We will solve this equation by assuming an initial state of $p=0$ everywhere, and applying boundary conditions as follows:</p>
#
# $p=0$ at $x=0, \ 2 \, $ and $y=0, \ 1$
#
# <p>and the source term consists of two initial spikes inside the domain, as follows:</p>
#
# $b_{i,j}=100$ at $i=\frac{1}{4}nx, j=\frac{1}{4}ny$
#
# $b_{i,j}=-100$ at $i=\frac{3}{4}nx, j=\frac{3}{4}ny$
#
# $b_{i,j}=0$ everywhere else.
#
# The iterations will advance in pseudo-time to relax the initial spikes. The relaxation under Poisson's equation gets slower and slower as they progress. Why?
#
# Let's look at one possible way to write the code for Poisson's equation. As always, we load our favorite Python libraries. We also want to make some lovely plots in 3D. Let's get our parameters defined and the initialization out of the way. What do you notice of the approach below?
import numpy
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
# +
# Parameters
nx = 50 # How wide is our matrix in x
ny = 50 # How wide is our matrix in y
nt = 100 # How many time units
# Boundary Conditions.
xmin = 0
xmax = 2
ymin = 0
ymax = 1
# The size of our incremental steps.
dx = (xmax-xmin) / (nx-1)
dy = (ymax-ymin) / (ny-1)
# Initialize our Arrays
p = numpy.zeros((ny, nx)) #set our initial pressure to 0 everywhere
pd = numpy.zeros((ny, nx))
b = numpy.zeros((ny, nx))
#
x = numpy.linspace(xmin, xmax, nx)
y = numpy.linspace(ymin, ymax, ny)
# Now build a source (the +b term) and a sink (the -b term).
b[int(ny / 4), int(nx / 4)] = 100
b[int(3 * ny / 4), int(3 * nx / 4)] = -100
# -
# Copy over the applicable code from step 9 and modify it for our current use. <b>Note:</b> The primary difference being $\textbf{pn}$ has changed to $\textbf{pd}$ and the $\textbf{b[]}$ term has been added. Also the boundaries have changed.
for it in range(nt):
pd = p.copy()
p[1:-1, 1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +
(pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -
b[1:-1, 1:-1] * dx**2 * dy**2) /
(2 * (dx**2 + dy**2)))
p[0, :] = 0
p[ny-1, :] = 0
p[:, 0] = 0
p[:, nx-1] = 0
# Reuse the plotting function code from Step 9.
def plot2D(x, y, p):
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
X, Y = numpy.meshgrid(x, y)
surf = ax.plot_surface(X, Y, p[:], cmap=cm.viridis, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_xlim(0,2)
ax.set_ylim(0,1)
ax.view_init(30,225)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
# Now plot our initial conditions to insure that we got what we thought we should get. Two peaks one positive and one negative. (This looks like a Source and Sink used in Aerodynamics to compute flow lines.)
plot2D(x,y,p)
# As expected the plot shows what we thought we would get, Two peaks, one positive, one negative.
# <br>
# <h2>Learn More</h2>
# <p> To learn more about the role of Pisson's equation in CFD, watch <b>Video Lesson</b> 11 on YouTube.
from IPython.display import YouTubeVideo
YouTubeVideo('ZjfxA3qq2Lg')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| lessons/13_Step_10-2D_Poisson's_Equation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
# +
x = torch.arange(-5, 5, 0.1).view(-1, 1)
y = -5 * x + 0.1 * torch.randn(x.size())
model = torch.nn.Linear(1, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
def train_model(iter):
for epoch in range(iter):
y1 = model(x)
loss = criterion(y1, y)
writer.add_scalar("Loss/train", loss, epoch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_model(10)
writer.flush()
# -
writer.close()
# %load_ext tensorboard
# +
# # %tensorboard --logdir=runs
# -
# +
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
# Writer will output to ./runs/ directory by default
writer = SummaryWriter()
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
trainset = datasets.MNIST('mnist_train', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
model = torchvision.models.resnet50(False)
# Have ResNet model take in grayscale rather than RGB
model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
images, labels = next(iter(trainloader))
grid = torchvision.utils.make_grid(images)
writer.add_image('images', grid, 0)
writer.add_graph(model, images)
writer.close()
# -
import tensorboard
tensorboard --logdir=runs
import torch
torch.__version__
# %load_ext tensorboard
# +
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
# Writer will output to ./runs/ directory by default
writer = SummaryWriter()
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
trainset = datasets.MNIST('mnist_train', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
model = torchvision.models.resnet50(False)
# Have ResNet model take in grayscale rather than RGB
model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
images, labels = next(iter(trainloader))
grid = torchvision.utils.make_grid(images)
writer.add_image('images', grid, 0)
writer.add_graph(model, images)
writer.close()
# -
# %tensorboard --logdir=runs
| test_tensorboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <b>Image classification batch feature demo</b>
# The AIServiceVisionClient offers the image classification feature in batch mode. This notebook aims to provide overall clarity about the feature to the user in terms of requirements, usage and the output of the batch i.e. asynchronous API.<br>
# <ul>
# <li>The output response files are stored at the object storage specified in <code>data/output_object_image_batch.json</code>. </li>
# <li>The detected classes for a random input image are displayed in the last section of the notebook.</li>
# </ul>
# ### Steps to run the notebook:
# <details>
# <summary>Notebook session setup</summary>
# <ol>
# <li><font size="2">Installing the OCI Vision SDK</font></li>
# <li><font size="2">Installing other dependencies</font></li>
# <li><font size="2">Setup sample input images</font></li>
# <li><font size="2">Setup helper .py files</font></li>
# </ol>
# </details>
#
# <details>
# <summary>Importing the required modules</summary>
# </details>
#
# <details>
# <summary>Setting the input variables</summary>
# <font size="2">The user can change the input variables, if necessary. They have been assigned default values.</font>
# </details>
#
# <details>
# <summary>Running the main pipeline</summary>
# <font size="2">Run all cells to get the output in the <code>output</code> directory. </font><br>
# </details>
# ### Notebook session setup
# <details>
# <summary>Instructions</summary>
# <ul>
# <li><font size="2">The user needs to setup only once.</font></li>
# <li><font size="2">Uncomment the commented cells and run once to setup.</font></li>
# <li><font size="2">Comment back the same cells to avoid running again.</font></li>
# </ul>
# </details>
# #### Installing the OCI Vision SDK
# +
# # !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/vision_service_python_client-0.3.45-py2.py3-none-any.whl"
# # !pip install vision_service_python_client-0.3.45-py2.py3-none-any.whl
# # !rm vision_service_python_client-0.3.45-py2.py3-none-any.whl
# -
# #### Installing other dependencies
# +
# # !pip install matplotlib==3.3.4
# # !pip install pandas==1.1.5
# -
# #### Setup sample input images
# +
# # !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/input_objects_image_batch.json"
# # !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/output_object_image_batch.json"
# # !mkdir data
# # !mv input_objects_image_batch.json data
# # !mv output_object_image_batch.json data
# -
# #### Setup helper .py files
# +
# # !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/analyze_image_batch_utils.py"
# # !mkdir helper
# # !mv analyze_image_batch_utils.py helper
# -
# ### Imports
# +
import time
import json
import io
from random import randint
import oci
from PIL import Image
from vision_service_python_client.models import output_location
from vision_service_python_client.ai_service_vision_client import AIServiceVisionClient
from vision_service_python_client.models.create_image_job_details import CreateImageJobDetails
from vision_service_python_client.models.image_classification_feature import ImageClassificationFeature
from helper.analyze_image_batch_utils import load_input_object_locations, load_output_object_location, display_classes, clean_output
from IPython.display import JSON
# -
# ### Set input variables
# <details>
# <summary><font size="3">input_location_path</font></summary>
# <font size="2">The file <code>data/input_objects_image_batch.json</code> specifies where the input images are to be taken from. Sample file has been provided. The user needs to provide the following in this file:
# <ul>
# <li><code>compartment_id</code> : Compartment ID</li>
# <li><code>input_objects</code>: List with the object locations in the following format-</li>
# <ul>
# <li><code>namespace</code> : Namespace name</li>
# <li><code>bucket</code> : Bucket name</li>
# <li><code>objects</code> : List of object names</li>
# </ul>
# </ul>
# </font>
# </details>
#
# <details>
# <summary><font size="3">output_location_path</font></summary>
# <font size="2">The file <code>data/output_object_image_batch.json</code> specifies where the output files will be stored. Sample file has been provided. The user needs to provide the following in this file:
# <ul>
# <li><code>namespace</code> : Namespace name</li>
# <li><code>bucket</code> : Bucket name</li>
# <li><code>prefix</code> : Prefix name</li>
# </ul>
# </font>
# </details>
#
# <details>
# <summary><font size="3">max_results</font></summary>
# <font size="2">Provide the maximum number of results needed for image classification. This is an upper limit over the output classes, the API may detect lesser classes according to the image.</font><br>
# </details>
input_location_path = 'data/input_objects_image_batch.json'
output_location_path = 'data/output_object_image_batch.json'
max_results = 5
# ### Authorize user config
config = oci.config.from_file('~/.oci/config')
# ### Load input and output object locations
compartment_id, input_location = load_input_object_locations(input_location_path)
output_location = load_output_object_location(output_location_path)
# ### Create AI service vision client and image job
# +
ai_service_vision_client = AIServiceVisionClient(config=config)
create_image_job_details = CreateImageJobDetails()
image_classification_feature = ImageClassificationFeature()
image_classification_feature.max_results = max_results
features = [image_classification_feature]
create_image_job_details.features = features
create_image_job_details.compartment_id = compartment_id
create_image_job_details.input_location = input_location
create_image_job_details.output_location = output_location
res = ai_service_vision_client.create_image_job(create_image_job_details=create_image_job_details)
# -
# ### Job submitted
# The job is created and is in <code>ACCEPTED</code> state.
res_json = json.loads(repr(res.data))
clean_res = clean_output(res_json)
JSON(clean_res)
# ### Job in progress
# The job progress is tracked till completion with an interval of 5 seconds and is in <code>IN_PROGRESS</code> state.
# +
job_id = res.data.id
print("Job ID :", job_id, '\n')
seconds = 0
res = ai_service_vision_client.get_image_job(image_job_id=job_id)
while res.data.lifecycle_state in ["IN_PROGRESS", "ACCEPTED"]:
print("Job is IN_PROGRESS for " + str(seconds) + " seconds")
time.sleep(5)
seconds += 5
res = ai_service_vision_client.get_image_job(image_job_id=job_id)
# -
# ### Job completed
# The job is completed and is in <code>SUCCEEDED</code> state.
res_json = json.loads(repr(res.data))
clean_res = clean_output(res_json)
JSON(clean_res)
# ### Display detected classes
# The detected classes will be displayed in decreasing order of confidence level for a randomly selected image from the batch input.
# +
object_storage_client = oci.object_storage.ObjectStorageClient(config)
index = randint(0, len(input_location.object_locations) - 1)
object_location = input_location.object_locations[index]
output_object_name = output_location.prefix + "/" + res.data.id + "/" + \
object_location.namespace_name + "_" + object_location.bucket_name + "_" + \
object_location.object_name
res_json = object_storage_client.get_object(output_location.namespace_name, \
output_location.bucket_name, object_name = output_object_name+".json").data.content
res_dict = json.loads(res_json)
print("Image :", object_location.object_name)
if res_dict['labels'] is not None:
display_classes(res_dict['labels'])
else:
print("No image classes detected.")
# -
| oci-library/oci-hol/oci-artificial-intelligence/ai-vision/ai-vision-datascience/files/image_batch_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import matplotlib.pyplot as plt
import sys
## This function fits a random forest model to the contact data. It takes as an inputa set of labeled contacts
## please refer to Supplementary Note 5 for extra information about data labeling
def contact_ml_model(input_file):
features = pd.read_csv(input_file)
features = pd.get_dummies(features)
# Labels are the values we want to predict
labels = np.array(features['label'])
# Remove the labels from the features
# axis 1 refers to the columns
features= features.drop('label', axis = 1)
# Saving feature names for later use
feature_list = list(features.columns)
# Convert to numpy array
features = np.array(features)
# Split the data into training and testing sets
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 42)
clf = RandomForestClassifier(n_estimators=20, max_depth=8,random_state=1, n_jobs=2, oob_score = True)
clf.fit(train_features,train_labels)
y_pred = clf.predict(test_features)
y_pred_train = clf.predict(train_features)
accuracy = metrics.accuracy_score(test_labels, y_pred)
accuracy_train = metrics.accuracy_score(train_labels, y_pred_train)
return clf, accuracy, accuracy_train
## this function uses the random forest model abovve to classify contacts as close or random
def classify_contacts(clf,input_contacts):
contacts = pd.read_csv(input_contacts)
features = pd.get_dummies(contacts)
features= features.drop('dev_a', axis = 1)
features= features.drop('dev_b', axis = 1)
features= features.drop('label', axis = 1)
# Saving feature names for later use
features_list = list(features.columns)
# Convert to numpy array
features = np.array(features)
calssification = clf.predict(features)
return calssification
| analyzing-contact-data/classify-contacts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Lecture 6:
#
# - get a first peek at the very useful Python packages called **NumPy** and **matplotlib**
#
#
# In the last lecture we learned how to create modules. These are files that contain one or more functions and variables. They can be imported and used in other programs or notebooks, saving us a lot of time and headache.
#
# A Python _package_ contains a collection of modules that are related to each other. Our first _package_ is one of the most useful ones for us science types: **NumPy**.
#
#
# ### A first look at NumPy
#
# O.K. First of all - how do you pronounce "NumPy"'? It should be pronounced "Num" as in "Number" and "Pie" as in, well, pie, or Python. It is way more fun to say Numpee! It's difficult to suppress this urge.
#
# Now with that out of the way, what can we do with **NumPy**? Turns out, a whole heck of a lot! But for now, we will just scratch the surface. For starters, **NumPy** can give us the value of the square root of a number with the function **numpy.sqrt( )**. Note how the package name comes first, then the function we wish to use (just as in our example from the last lecture).
#
# To use **NumPy** functions, we must first **import** the package with the command **import**. It may take a while the first time you use **import** after installing Python, but after that it should load quickly.
#
# We encountered **import** very briefly in the last lecture. Now it is time to go deeper. There are many different ways you can use the **import** command. Each way allows your program to access the functions and variables defined in the imported package, but differs in how you call the function after importing:
# +
import numpy
#This makes all the functions in NumPy available to you,
#but you have to call them with the numpy.FUNC() syntax
numpy.sqrt(2)
# +
# Here is another way to import a module:
import numpy as np # or any other variable e.g.: N
# This does the same as the first, but allows you to set NumPy with a nickname
# In this case, you substitute "np" for numpy:
np.sqrt(2) # or N.pi in the second case.
# Note: Some folks in the NumPy community use N; I use np.
# That seems to be the most common way now.
# -
# To import all the functions from NumPy:
# +
from numpy import *
# now all the functions are available directly, without the initial module name:
sqrt(2)
# -
# The '\*' imports all the functions into the local namespace, which is a heavy load on your computer's memory. Alternatively, you can import the few, specific functions you'll use, for example, **sqrt**:
#
# +
from numpy import sqrt # square root
sqrt(4)
# -
# Did you notice how "sqrt(4)", where 4 was an integer, returned a floating point variable (2.0)?
#
#
# **TIP**: I tend to import the **NumPy** package using the **np** option above. That way I know where the functions I'm using come from. This is useful, becuase we don't use or know ALL of the functions available in any given package. AND the same function name can mean different things in different packages. So, a function defined in the package could conflict with one defined in your program. It is just good programming practice to specify the origin of the function you are using.
#
# ### NumPy functions
#
# Here is a (partial) list of some useful **NumPy** functions:
#
#
# |function | purpose|
# |------------ |--------------|
# |absolute(x) | absolute value|
# |arccos(x) |arccosine |
# |arcsin(x) | arcsine |
# |arctan(x) | arctangent |
# |arctan2(y,x) |arctangent of y/x in correct quadrant|
# |cos(x) |cosine |
# |cosh(x) | hyperbolic cosine |
# |exp(x) | exponential |
# |log(x) | natural logarithm |
# |log10(x) | base 10 log |
# |sin(x) | sine |
# |sinh(x) | hyperbolic sine |
# |sqrt(x) | square root |
# |tan(x) | tangent |
# |tanh(x) | hyperbolic tangent |
#
#
#
#
# ### Numpy attributes
#
# **NumPy** has more than just _functions_; it also has _attributes_ which are variables stored in the package, for example $\pi$.
#
#
np.pi
# **TIP**: In the trigonometric functions, the argument is in RADIANS!.You can convert between degrees and radians by multiplying by: np.pi/180. OR you can convert using the **NumPy** functions **np.degrees( )** which converts radians to degrees and **np.radians( )** which converts degrees to radians.
#
#
# Also notice how the functions have parentheses, as opposed to **np.pi** which does not. The difference is that **np.pi** is not a function but an _attribute_. It is a variable defined in **NumPy** that you can access. Every time you call the variable **np.pi**, it returns the value of $\pi$.
#
#
#
# ### Using NumPy Functions
#
# As already mentioned, **NumPy** has many math functions. We will use a few to generate some data sets that we can then plot using **matplotlib**, another Python module.
#
# First, let's make a list of angles ($\theta$ or **theta**) around a circle. We begin with the list of angles in degrees, convert them to radians (using **np.radians( )**), then construct a list of sines of those angles.
thetas_in_degrees=range(0,360,5) # list (generator) of angles from 0 to 359 at five degree intervals
# uncomment the following line, if you'd like to print the list
#print (list(thetas_in_degrees))
thetas_in_radians=np.radians(thetas_in_degrees) # convert to radians
sines=np.sin(thetas_in_radians) # calculate the sine values for all the thetas
sines
# ### Plotting data
#
# Now that we've generated some data, we can look at them. Yes, we just printed out the values, but it is way more interesting to make a plot. The easiest way to do this is using the package **matplotlib** which has many plotting functions, among them a whole module called **pyplot**. By convention, we **import** the **matplotlib.pyplot** module as **plt**.
#
# We've also included one more line that tells **pyplot** to plot the image within the notebook: The magic command: **%matplotlib inline**. Note that this does not work in other environments, like command line scripts; magic commands are only for Jupyter notebooks (lucky us!).
# +
import matplotlib.pyplot as plt # import the plotting module
# call this magic command to show the plots in the notebook
# %matplotlib inline
plt.plot(thetas_in_degrees,sines); # plot the sines with the angles
# -
# ### Features and styling in matplotlib
#
# Every plot should at least have axis labels and can also have a title, a legend, bounds, etc. We can use **matplotlib.pyplot** to add these features and more.
#
#
#
# I want to plot the sine curve as a green line, so I use 'g-' to do that:
plt.plot(thetas_in_degrees,sines,'g-',label='Sine')
# the "label" argument saves this line for annotation in a legend
# let's add X and Y labels
plt.xlabel('Degrees') # make an X label
plt.ylabel('Sine') # label the Y axis
# and now change the x axis limits:
plt.xlim([0,360]) # set the limits
plt.title('Sine curve') # set the title
plt.legend(); # put on a legend!
# Now let's add the cosine curve and a bit of style! We'll plot the cosine curve as a dashed blue line ('b--'), move the legend to a different position and plot the sine curve as little red dots ('r.').
# For a complete list of possible symbols (markers), see: http://matplotlib.org/api/markers_api.html
cosines=np.cos(thetas_in_radians)
# plot the sines with the angles as a green line
plt.plot(thetas_in_degrees,sines,'r.',label='Sine')
# plot the cosines with the angles as a dashed blue line
plt.plot(thetas_in_degrees,cosines,'b--',label='Cosine')
plt.xlabel('Degrees')
plt.ylabel('Trig functions')
plt.xlim([0,360]) # set the limits
plt.legend(loc=3); # put the legend in the lower left hand corner this time
# The function **plt.plot( )** in **matplotlib.pyplot** includes many more styling options. Here's a complete list of arguments and keyword arguments that plot accepts:
help(plt.plot)
# ### Reading in text files
#
# One VERY useful function in **NumPy** is to read data sets into an array. Arrays are a new kind of data container, very much like lists, but with special attributes. Arrays must be all of one data type (e.g., floating point). Arrays can be operated on in one go, unlike lists that must be operated on element by element. I sneakily showed this to you by taking the cosine of the entire array returned by **np.radians( )**. It took a list and quietly turned it into an array, which I could operate on. Also, arrays don't separate the numbers with commas like lists do. We will see more benefits (and drawbacks) of arrays in the coming lectures.
#
# #### A brief comparison of lists and arrays:
#
# The _built-in_ function **range( )** makes a list generator as we have already seen. But the **NumPy** function **np.arange( )** makes and array. Let's compare the two:
#
#
print (list(range(10)))
print (np.arange(10))
# They are superficially similar (except for the missing commas), but try a simple addition trick:
np.arange(10)+2
# versus
range(10)+2
# Oh dear! We would have to go through the list one by one to do this addition using a list.
# Time for some SCIENCE!
#
# Let's start with data from an earthquake. We will read in data from an Earthquake available from the IRIS website: http://ds.iris.edu/wilber3/find_event. We can read in the data using the function **np.loadtext( )**.
#
# I chose the Christmas Day, 2016 magnitude 7.6 Earthquake in Chile (latitude=-43.42, longitude=-73.95). It was recorded at a seismic station run by Scripps Institution of Oceanography called "Pinyon Flat Observatory" (PFO, latitude=33.3, longitude=-115.7).
EQ=np.loadtxt('Datasets/seismicRecord/earthquake.txt') # read in data
print (EQ)
# Notice that EQ is NOT a **list** (it would have commas). In fact it is an N-dimensional array (actually only 1 dimensional in this case). You can find out what any object is using the built-in function **type( )**:
type(EQ)
# We'll learn more about the _ndarray_ data structure in the next lecture.
#
# But now, let's plot the earthquake data.
plt.plot(EQ); # the semi-colon suppresses some annoying jibberish,
# try taking it out!
# Here, **plt.plot( )** plots the array **EQ** against the index number for the elements in the array because we didn't pass a second argument.
#
# We can decorate this plot in many ways. For example, we can add axis labels and truncate the data with plt.xlim( ), or change the color of the line to name a few:
plt.plot(EQ,'r-') # plots as a red line
plt.xlabel('Arbitrary Time') # puts a label on the X axis
plt.ylabel('Velocity'); # puts a label on the Y axis
# ### Assignment #2
#
# - Make a notebook and change the name of the notebook to: YourLastNameInitial_HW_02
# (for example, CychB_HW_02)
# - In a **markdown** cell, write a description of what the notebook does
# - Create a **Numpy** array of numbers from 0 to 100
# - Create another list that is empty
# - Write a **for** loop that takes the square root of all the values in your list of numbers (using **np.sqrt**) and appends them to the empty list.
# - Print out all the numbers that are divisible by 4 (using the modulo operator).
# - Plot the square roots against the original list.
# - Create a dictionary with at least 4 key:value pairs
#
# - Write your own module that contains at least four functions and uses a dictionary and a list. Include a doc string in your module and a comment before each function that briefly describes the function. Save it with the magic command %%writefile YOURMODULENAME.py
# - Import the module into your notebook and call all of the functions.
#
# Hint: For the purposes of debugging, you will probably want to 'reload' your module as you refine it. To do this
#
# _from importlib import reload_
#
# then
#
# _reload(YOURMODULENAME)_
#
# Your code must be fully commented.
#
| .ipynb_checkpoints/Lecture_06-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Amazon SageMaker Neo でコンパイルしたモデルを AWS IoT Greengrass V2 を使ってデバイスにデプロイする
#
# このサンプルノートブックは、エッジ推論を行うために学習済みモデルを Amazon SageMaker Neo でコンパイルして AWS Iot Greengrass V2 を使ってデバイスにデプロイするパイプラインを AWS Step Functions を使って自動化する方法をご紹介します。このノートブックを Amazon SageMaker のノートブックインスタンスで使用する場合は、`conda_tensorflow_p36` のカーネルをご利用ください。
#
# このノートブックでは、デプロイしたいモデルや Greengrass アーティファクトファイルに関する情報を yaml 形式の設定ファイルで作成し、ワークフロー実行時にその設定ファイルを入力パラメタとすることで、以下のことを実現しています。
#
# - いつ何をどのデバイスにデプロイしたのかの記録(トレーサビリティ)
# - 同じ設定ファイルを使用することで同じワークフローを実行可能(再現性)
#
# Python コードでワークフローを構築するために、AWS Step Functions Data Science SDK を使用します。詳しい情報は以下のドキュメントをご参照ください。
#
# - [AWS Step Functions](https://aws.amazon.com/step-functions/)
# - [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html)
# - [AWS Step Functions Data Science SDK](https://aws-step-functions-data-science-sdk.readthedocs.io/)
#
# このノートブックの大まかな流れは以下の通りです。
#
# 
#
# 1. 3つの Lambda 関数を作成
# - デプロイしたいコンポーネントに対応するコンテナイメージをデプロイ用 Lambda 関数に適用する Lambda 関数
# - 必要に応じて機械学習モデルを Amazon SageMaker Neo でコンパイルする Lambda 関数
# - 指定されたアーティファクトをコンポーネント化して AWS IoT Greengrass デバイスにデプロイする Lambda 関数
# 1. 作成した Lambda 関数を順に実行するような AWS Step Functions Data Science SDK ワークフローを作成
# 1. デプロイに関する情報が記載された設定ファイルを作成
# 1. Step Functions ワークフローを実行してファイルをデバイスにデプロイ
# 1. デプロイ関連情報を一覧表示
# 1. リソースの削除
#
#
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#[事前準備]-AWS-IoT-サービスとデバイス(AWS-Cloud9)のセットアップ" data-toc-modified-id="[事前準備]-AWS-IoT-サービスとデバイス(AWS-Cloud9)のセットアップ-1"><span class="toc-item-num">1 </span>[事前準備] AWS IoT サービスとデバイス(AWS Cloud9)のセットアップ</a></span></li><li><span><a href="#ノートブックインスタンスの-IAM-ロールに権限を追加" data-toc-modified-id="ノートブックインスタンスの-IAM-ロールに権限を追加-2"><span class="toc-item-num">2 </span>ノートブックインスタンスの IAM ロールに権限を追加</a></span></li><li><span><a href="#Lambda-関数が使用するコンテナイメージを作成" data-toc-modified-id="Lambda-関数が使用するコンテナイメージを作成-3"><span class="toc-item-num">3 </span>Lambda 関数が使用するコンテナイメージを作成</a></span><ul class="toc-item"><li><span><a href="#コンテナイメージ更新用-Lambda-関数" data-toc-modified-id="コンテナイメージ更新用-Lambda-関数-3.1"><span class="toc-item-num">3.1 </span>コンテナイメージ更新用 Lambda 関数</a></span></li><li><span><a href="#モデルコンパイル用-Lambda-関数" data-toc-modified-id="モデルコンパイル用-Lambda-関数-3.2"><span class="toc-item-num">3.2 </span>モデルコンパイル用 Lambda 関数</a></span></li><li><span><a href="#デプロイ用-Lambda-関数" data-toc-modified-id="デプロイ用-Lambda-関数-3.3"><span class="toc-item-num">3.3 </span>デプロイ用 Lambda 関数</a></span></li></ul></li><li><span><a href="#Lambda-関数の作成と権限の設定" data-toc-modified-id="Lambda-関数の作成と権限の設定-4"><span class="toc-item-num">4 </span>Lambda 関数の作成と権限の設定</a></span><ul class="toc-item"><li><span><a href="#タイムアウト時間の設定" data-toc-modified-id="タイムアウト時間の設定-4.1"><span class="toc-item-num">4.1 </span>タイムアウト時間の設定</a></span></li><li><span><a href="#アクセス権限の設定" data-toc-modified-id="アクセス権限の設定-4.2"><span class="toc-item-num">4.2 </span>アクセス権限の設定</a></span></li></ul></li><li><span><a href="#Step-Functions-Data-Science-SDK-でワークフローを作成" data-toc-modified-id="Step-Functions-Data-Science-SDK-でワークフローを作成-5"><span class="toc-item-num">5 </span>Step Functions Data Science SDK でワークフローを作成</a></span><ul class="toc-item"><li><span><a href="#Step-Functions-の実行ロールの作成" data-toc-modified-id="Step-Functions-の実行ロールの作成-5.1"><span class="toc-item-num">5.1 </span>Step Functions の実行ロールの作成</a></span></li><li><span><a href="#AWS-Step-Functions-ワークフローの作成" data-toc-modified-id="AWS-Step-Functions-ワークフローの作成-5.2"><span class="toc-item-num">5.2 </span>AWS Step Functions ワークフローの作成</a></span></li><li><span><a href="#Choice-State-と-Wait-State-の作成" data-toc-modified-id="Choice-State-と-Wait-State-の作成-5.3"><span class="toc-item-num">5.3 </span>Choice State と Wait State の作成</a></span></li><li><span><a href="#Fail-状態の作成" data-toc-modified-id="Fail-状態の作成-5.4"><span class="toc-item-num">5.4 </span>Fail 状態の作成</a></span></li><li><span><a href="#Workflow-の作成" data-toc-modified-id="Workflow-の作成-5.5"><span class="toc-item-num">5.5 </span>Workflow の作成</a></span></li></ul></li><li><span><a href="#エッジ推論で使用する学習済みモデルの準備" data-toc-modified-id="エッジ推論で使用する学習済みモデルの準備-6"><span class="toc-item-num">6 </span>エッジ推論で使用する学習済みモデルの準備</a></span></li><li><span><a href="#デプロイ設定ファイルのセットアップ" data-toc-modified-id="デプロイ設定ファイルのセットアップ-7"><span class="toc-item-num">7 </span>デプロイ設定ファイルのセットアップ</a></span><ul class="toc-item"><li><span><a href="#ユーティリティ関数の定義" data-toc-modified-id="ユーティリティ関数の定義-7.1"><span class="toc-item-num">7.1 </span>ユーティリティ関数の定義</a></span></li><li><span><a href="#デプロイ関連情報の定義" data-toc-modified-id="デプロイ関連情報の定義-7.2"><span class="toc-item-num">7.2 </span>デプロイ関連情報の定義</a></span></li><li><span><a href="#学習済みモデルとアーティファクトファイルを-S3-にアップロード" data-toc-modified-id="学習済みモデルとアーティファクトファイルを-S3-にアップロード-7.3"><span class="toc-item-num">7.3 </span>学習済みモデルとアーティファクトファイルを S3 にアップロード</a></span></li><li><span><a href="#設定ファイル(yaml-形式)をファイルに保存" data-toc-modified-id="設定ファイル(yaml-形式)をファイルに保存-7.4"><span class="toc-item-num">7.4 </span>設定ファイル(yaml 形式)をファイルに保存</a></span></li><li><span><a href="#設定ファイルを-S3-にアップロード" data-toc-modified-id="設定ファイルを-S3-にアップロード-7.5"><span class="toc-item-num">7.5 </span>設定ファイルを S3 にアップロード</a></span></li></ul></li><li><span><a href="#AWS-Step-Functions-ワークフローの実行" data-toc-modified-id="AWS-Step-Functions-ワークフローの実行-8"><span class="toc-item-num">8 </span>AWS Step Functions ワークフローの実行</a></span><ul class="toc-item"><li><span><a href="#ワークフローの実行" data-toc-modified-id="ワークフローの実行-8.1"><span class="toc-item-num">8.1 </span>ワークフローの実行</a></span></li><li><span><a href="#既存のワークフローを呼び出して実行" data-toc-modified-id="既存のワークフローを呼び出して実行-8.2"><span class="toc-item-num">8.2 </span>既存のワークフローを呼び出して実行</a></span></li></ul></li><li><span><a href="#デプロイされたモデルの情報を一覧表示" data-toc-modified-id="デプロイされたモデルの情報を一覧表示-9"><span class="toc-item-num">9 </span>デプロイされたモデルの情報を一覧表示</a></span></li><li><span><a href="#[重要]-リソースの削除" data-toc-modified-id="[重要]-リソースの削除-10"><span class="toc-item-num">10 </span>[重要] リソースの削除</a></span></li></ul></div>
# -
# ## [事前準備] AWS IoT サービスとデバイス(AWS Cloud9)のセットアップ
#
# このノートブックでは、デバイスとして AWS Cloud9 を使用します。[こちらのワークショップコンテンツ](https://greengrassv2.workshop.aws/ja/) の、以下の部分を、このノートブックの実行を始める前に実施してください。
#
# 1. 「1. GREENGRASSを動かす環境の用意」すべてを実施
# - 「ディスク容量の拡張」の部分でディスク容量拡張コマンドを実行した際にエラーが出た場合、1、2分待ってから再度実行するとうまくいくことがあります
# 1. 「2. GREENGRASSのセットアップ」すべてを実施
# - (同一アカウントの複数名で本ノートブックを実行する場合)「2.2 GREENGRASSのセットアップ」の初めにある「Greengrassコアデバイスのセットアップ」の手順を以下のとおり変更してください
# - 「ステップ 1: Greengrass コアデバイスを登録する」のコアデバイスにご自身の名前など他のデバイスと区別可能な文字列を入れる
# - 「ステップ 2: モノのグループに追加して継続的なデプロイを適用する」の「モノのグループ」で「グループなし」を選択する
# - 「2.2 GREENGRASSのセットアップ」で環境変数を設定する手順では、ご自身の IAM ユーザの認証情報をご利用ください
# - AWS マネジメントコンソールの左上にあるサービスの検索欄にIAMと入力し、IAM サービスを選択します
# - IAM ダッシュボード画面にて、左のペインにあるユーザーをクリックします
# - お客様が用いている IAM ユーザー名をクリックします (IAM アカウントが無い場合はこちらを参考に作成してください)
# - ユーザー管理画面から認証情報タブを開きます
# - アクセスキーの作成をクリックします
# - アクセスキー ID とシークレットアクセスキーをコピーし、ローカルに保存します。こちらがお客様の IAM アカウントの認証情報です。
# 1. 「5.1 (CASE1) コンポーネントの作成とデプロイの準備」の「S3バケットの作成」までを実施
# - 同一アカウントの複数名で本ノートブックを実行する場合、バケットをひとつのみ作成しそれを共有する形でも構いません
# 1. (追加手順)IAM のコンソールの左側のメニューから「アクセス管理」->「ロール」をクリックし、検索窓に「GreengrassV2TokenExchangeRole」と入力して「GreengrassV2TokenExchangeRole」を開く
# 1. (追加手順)「ポリシーをアタッチします」をクリックして `AmazonS3FullAccess` をアタッチ
# ---
#
#
# ## ノートブックインスタンスの IAM ロールに権限を追加
#
# 以下の手順を実行して、ノートブックインスタンスに紐づけられた IAM ロールに、AWS Step Functions のワークフローを作成して実行するための権限と Amazon ECR にイメージを push するための権限を追加してください。
#
# 1. [Amazon SageMaker console](https://console.aws.amazon.com/sagemaker/) を開く
# 1. **ノートブックインスタンス** を開いて現在使用しているノートブックインスタンスを選択する
# 1. **アクセス許可と暗号化** の部分に表示されている IAM ロールへのリンクをクリックする
# 1. IAM ロールの ARN は後で使用するのでメモ帳などにコピーしておく
# 1. **ポリシーをアタッチします** をクリックして `AWSStepFunctionsFullAccess` を検索する
# 1. `AWSStepFunctionsFullAccess` の横のチェックボックスをオンにする
# 1. 同様の手順で以下のポリシーのチェックボックスをオンにして **ポリシーのアタッチ** をクリックする
# - `AmazonEC2ContainerRegistryFullAccess`
# - `AWSGreengrassFullAccess`
# - `AWSIoTFullAccess`
# - `IAMFullAccess`
# - `AWSLambda_FullAccess`
#
# もしこのノートブックを SageMaker のノートブックインスタンス以外で実行している場合、その環境で AWS CLI 設定を行ってください。詳細は [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) をご参照ください。
#
# ---
# 以下のセルの下から二番目の行の `<BUCKET_NAME>` に事前準備で作成した S3 バケット名(ggv2-workshop-xxx)を記載してから実行してください。同一アカウントで複数人でこのノートブックを実行する場合は一番下の行の `user_name` を各自のお名前で置き換えてください。
# +
import boto3
import yaml
import pandas as pd
from dateutil import tz
import sagemaker
import os
JST = tz.gettz('Asia/Tokyo')
region = boto3.session.Session().region_name
s3_client = boto3.client('s3', region_name=region)
account_id = boto3.client('sts').get_caller_identity().get('Account')
ggv2_client = boto3.client('greengrassv2', region_name=region)
iot_client = boto3.client('iot', region_name=region)
lambda_client = boto3.client('lambda', region_name=region)
ecr_client = boto3.client('ecr', region_name=region)
s3 = boto3.resource('s3')
sagemaker_session = sagemaker.Session()
sagemaker_role = sagemaker.get_execution_role()
bucket_name = '<BUCKET_NAME>'
user_name = 'sample'
# -
# ## Lambda 関数が使用するコンテナイメージを作成
#
# ここからは、3つのコンテナイメージを作成していきます。
#
# それぞれの Lambda 関数では、デフォルトの Lambda 環境にないライブラリなどを使用するため、コンテナイメージごとソースコードを Lambda 関数にデプロイします。
#
# ### コンテナイメージ更新用 Lambda 関数
#
# まずは、コンテナイメージ更新用 Lambda 関数が使用するコンテナイメージを作成します。この関数は、デプロイ用の Lambda 関数で使用するコンテナイメージを、ワークフロー実行時に設定したパラメタに応じて切り替えるためので、デプロイ用 Lambda 関数が使用するコンテナイメージを変えることのないユースケースであれば不要です。
#
# Dockerfile と、Lambda 内で動かすスクリプトファイルを作成します。
# !mkdir -p docker/lambda-update/app
# +
# %%writefile ./docker/lambda-update/Dockerfile
FROM public.ecr.aws/lambda/python:3.8
RUN pip3 install --upgrade pip
RUN pip3 install -qU boto3 pyyaml
COPY app/app.py ./
CMD ["app.handler"]
# +
# %%writefile ./docker/lambda-update/app/app.py
import json
import boto3
import yaml
def handler(event, context):
config_path = event['configPath']
bucket_name = config_path.split('/')[2]
object_key = config_path[len(bucket_name)+6:]
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(object_key)
response = obj.get()
body = response['Body'].read()
config = yaml.safe_load(body)
container_image = config['lambda-container']
function_name = config['lambda-deploy-function']
lambda_client = boto3.client('lambda')
response = lambda_client.update_function_code(
FunctionName=function_name,
ImageUri=container_image
)
return {
'statusCode': 200,
'configPath': config_path,
'response': response
}
# -
# 必要なファイルを作成できたので、コンテナイメージをビルドして Amazon ECR にプッシュします。
# +
import datetime
ecr_repository_lambda_update = 'lambda-update-continer-' + user_name
uri_suffix = 'amazonaws.com'
tag = ':' + datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y%m%d-%H%M%S')
lambda_update_repository_uri = '{}.dkr.ecr.{}.{}/{}'.format(account_id, region, uri_suffix, ecr_repository_lambda_update + tag)
print(lambda_update_repository_uri)
# Create ECR repository and push docker image
# !docker build -t {ecr_repository_lambda_update + tag} docker/lambda-update
# !aws ecr get-login-password --region {region} | docker login --username AWS --password-stdin {account_id}.dkr.ecr.{region}.amazonaws.com
# !aws ecr create-repository --repository-name $ecr_repository_lambda_update
# !docker tag {ecr_repository_lambda_update + tag} $lambda_update_repository_uri
# !docker push $lambda_update_repository_uri
# -
# ### モデルコンパイル用 Lambda 関数
#
# 次に、学習済みモデルを Amazon SageMaker Neo でコンパイルする Lambda 関数用のコンテナイメージを作成します。流れは先ほどと同様です。
# !mkdir -p docker/lambda-compile/app
# +
# %%writefile ./docker/lambda-compile/Dockerfile
FROM public.ecr.aws/lambda/python:3.8
RUN pip3 install --upgrade pip
RUN pip3 install -qU boto3 pyyaml
COPY app/app.py ./
CMD ["app.handler"]
# +
# %%writefile ./docker/lambda-compile/app/app.py
import json
import boto3
import yaml
import datetime
import time
import os
def handler(event, context):
print(event)
config_path = event['configPath']
bucket_name = config_path.split('/')[2]
object_key = config_path[len(bucket_name)+6:]
s3 = boto3.resource('s3')
region = boto3.session.Session().region_name
sagemaker_client = boto3.client('sagemaker', region_name=region)
lambda_client = boto3.client('lambda')
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(object_key)
response = obj.get()
body = response['Body'].read()
config = yaml.safe_load(body)
if config['model-information']['compile-model']:
framework = config['model-information']['framework']
timestamp = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y%m%d-%H%M%S')
job_name = framework + '-compile-' + timestamp
model_s3_path = config['model-information']['original-model-path']
input_name = config['model-information']['input-name']
input_shape = config['model-information']['input-shape']
output_location = config['model-information']['compiled-model-path']
target_device = config['model-information']['target-device']
response = lambda_client.get_function(
FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME']
)
role = response['Configuration']['Role']
data_input_config = '{"' + input_name + '":'+input_shape + '}'
response = sagemaker_client.create_compilation_job(
CompilationJobName=job_name,
RoleArn=role,
InputConfig={
'S3Uri': model_s3_path,
'DataInputConfig':data_input_config,
'Framework': framework
},
OutputConfig={
'S3OutputLocation': output_location,
'TargetDevice':target_device
},
StoppingCondition={ 'MaxRuntimeInSeconds': 9000 }
)
time.sleep(60)
return {
'statusCode': 200,
'configPath': config_path,
'compileJobName': job_name,
'response': response
}
else:
return {
'statusCode': 200,
'configPath': config_path
}
# +
import datetime
ecr_repository_lambda_compile = 'lambda-compile-model-' + user_name
uri_suffix = 'amazonaws.com'
tag = ':' + datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y%m%d-%H%M%S')
lambda_compile_repository_uri = '{}.dkr.ecr.{}.{}/{}'.format(account_id, region, uri_suffix, ecr_repository_lambda_compile + tag)
print(lambda_compile_repository_uri)
# Create ECR repository and push docker image
# !docker build -t {ecr_repository_lambda_compile + tag} docker/lambda-compile
# !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email)
# !aws ecr create-repository --repository-name $ecr_repository_lambda_compile
# !docker tag {ecr_repository_lambda_compile + tag} $lambda_compile_repository_uri
# !docker push $lambda_compile_repository_uri
# -
# ### デプロイ用 Lambda 関数
#
# 最後に、デプロイ用 Lambda 関数が使用するコンテナイメージを作成します。
# !mkdir -p docker/lambda/app
# +
# %%writefile ./docker/lambda/Dockerfile
FROM public.ecr.aws/lambda/python:3.8
RUN pip3 install --upgrade pip
RUN pip3 install -qU boto3 pyyaml
COPY app/app.py ./
CMD ["app.handler"]
# +
# %%writefile ./docker/lambda/app/app.py
import json
import boto3
import yaml
import os
import time
region = boto3.session.Session().region_name
sagemaker_client = boto3.client('sagemaker', region_name=region)
ggv2_client = boto3.client('greengrassv2', region_name=region)
def generate_recipe(artifact_files, component_name, component_version,
model_name, entrypoint_script_name, pip_libraries, model_input_shape):
# create artifact list
artifacts = ''
for f in artifact_files:
artifacts += f' - URI: {f}\n'
# define recipe
artifacts_path = '{artifacts:path}'
recipe = f"""---
RecipeFormatVersion: '2020-01-25'
ComponentName: {component_name}
ComponentVersion: {component_version}
ComponentType: "aws.greengrass.generic"
ComponentDescription: Publish MQTT message to AWS IoT Core in Docker image.
ComponentPublisher: Amazon
ComponentConfiguration:
DefaultConfiguration:
accessControl:
aws.greengrass.ipc.pubsub:
com.example.Publisher:pubsub:1:
policyDescription: "Allows access to publish to all topics."
operations:
- "aws.greengrass#PublishToTopic"
resources:
- "*"
Manifests:
- Platform:
os: "linux"
architecture: "amd64"
Name: "Linux"
Lifecycle:
Install:
RequiresPrivilege: true
Script: "pip3 install --upgrade pip\\n\
pip3 install {pip_libraries}\\n\
pip3 install awsiotsdk numpy\\n\
apt-get install -y libgl1-mesa-dev\\n\
mkdir {artifacts_path}/model\\n\
tar xf {artifacts_path}/{model_name} -C {artifacts_path}/model"
Run: "python3 {artifacts_path}/{entrypoint_script_name} {artifacts_path} '{model_input_shape}'"
Artifacts:
{artifacts}
"""
return recipe
def deploy_component(target_arn, deployment_name, components):
response = ggv2_client.create_deployment(
targetArn=target_arn, # デプロイ先のIoT thing か group
deploymentName=deployment_name, # デプロイの名前
components=components,
iotJobConfiguration={
'timeoutConfig': {'inProgressTimeoutInMinutes': 600}
},
deploymentPolicies={
'failureHandlingPolicy': 'ROLLBACK',
'componentUpdatePolicy': {
'timeoutInSeconds': 600,
'action': 'NOTIFY_COMPONENTS'
},
'configurationValidationPolicy': {
'timeoutInSeconds': 600
}
},
tags={
'Name': deployment_name
}
)
return response
def get_deployment_info(deployment_name):
deployments = ggv2_client.list_deployments()['deployments']
deployment_id = ''
group_id = ''
for d in deployments:
if d['deploymentName'] == deployment_name:
deployment_id = d['deploymentId']
group_arn = d['targetArn']
return deployment_id, group_arn
return -1, -1
def run_deployment(group_arn, deployment_name, component_name, component_version):
components={
component_name: { # コンポーネントの名前
'componentVersion': component_version,
'runWith': {
'posixUser': 'root'
}
},
# 'aws.greengrass.Cli': { # コンポーネントの名前
# 'componentVersion': '2.3.0',
# },
}
response = deploy_component(group_arn, deployment_name, components)
return response
def handler(event, context):
print(event)
config_path = event['Payload']['Payload']['configPath']
bucket_name = config_path.split('/')[2]
object_key = config_path[len(bucket_name)+6:]
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(object_key)
response = obj.get()
body = response['Body'].read()
config = yaml.safe_load(body)
component_name = config['component-name']
component_version = config['component-version']
deployment_name = config['deployment-name']
entrypoint_script_name = config['entrypoint-script-name']
pip_libraries = config['pip-libraries']
model_path = config['model-information']['original-model-path']
compile_model = config['model-information']['compile-model']
artifact_files = config['artifact-files']
if compile_model:
compile_job_name = event['Payload']['Payload']['compileJobName']
print(compile_job_name)
while True:
status = sagemaker_client.describe_compilation_job(CompilationJobName=compile_job_name)['CompilationJobStatus']
if status == 'COMPLETED' or status == 'FAILED':
print(status)
break
time.sleep(30)
model_path = sagemaker_client.describe_compilation_job(CompilationJobName=compile_job_name)['ModelArtifacts']['S3ModelArtifacts']
model_name = os.path.basename(model_path)
artifact_files.append(model_path)
recipe = generate_recipe(artifact_files, component_name, component_version,
model_name, entrypoint_script_name, pip_libraries, config['model-information']['input-shape'])
print(recipe)
print(type(recipe.encode()))
# コンポーネント作成
response = ggv2_client.create_component_version(
inlineRecipe=recipe.encode('utf-8')
)
component_vesrion_arn = response['arn']
deployment_id, group_arn = get_deployment_info(deployment_name)
response = run_deployment(group_arn, deployment_name, component_name, component_version)
return {
'statusCode' : 200,
'component-name': component_name,
'response': response
}
# +
import datetime
ecr_repository_lambda_deploy = 'lambda-deploy-gg-' + user_name
uri_suffix = 'amazonaws.com'
deploy_lambda_container_tag = ':' + datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y%m%d-%H%M%S')
lambda_deploy_repository_uri = '{}.dkr.ecr.{}.{}/{}'.format(account_id, region, uri_suffix, ecr_repository_lambda_deploy + deploy_lambda_container_tag)
print(lambda_deploy_repository_uri)
# Create ECR repository and push docker image
# !docker build -t {ecr_repository_lambda_deploy + deploy_lambda_container_tag} docker/lambda
# !aws ecr get-login-password --region {region} | docker login --username AWS --password-stdin {account_id}.dkr.ecr.{region}.amazonaws.com
# !aws ecr create-repository --repository-name $ecr_repository_lambda_deploy
# !docker tag {ecr_repository_lambda_deploy + deploy_lambda_container_tag} $lambda_deploy_repository_uri
# !docker push $lambda_deploy_repository_uri
# -
# ## Lambda 関数の作成と権限の設定
#
# コンテナ更新用 (update-container)、モデルコンパイル用 (compile-model)、コンポーネントのデプロイ用 (deploy-components-to-device) の Lambda 関数をそれぞれ作成します。下にある2つのセルでは、関数の作成と以下の操作を API を使って行なっています。
#
# ### タイムアウト時間の設定
#
# 全ての処理が終わるまで関数がタイムアウトしないように、それぞれの関数のタイムアウト時間をすべて 5分に設定します。
#
# ### アクセス権限の設定
#
# それぞれの関数の権限を以下のように設定します。<br>**今回は AmazonS3FullAccess など強い権限を付与していますが、実際の環境で使用する場合は必要最小限の権限のみを追加するようにしてください。**
#
# **update-container 関数**
# - ロールに以下のポリシーを追加
# - AmazonS3FullAccess
# - AWSLambda_FullAccess
#
# **compile-model 関数**
# - ロールに以下のポリシーを追加
# - AmazonS3FullAccess
# - AWSLambda_FullAccess
# - AmazonSageMakerFullAccess
# - 「信頼関係」に以下を設定
# ```
# "Service": [
# "sagemaker.amazonaws.com",
# "lambda.amazonaws.com"
# ]
# ```
#
#
# **deploy-components-to-device 関数**
# - ロールに以下のポリシーを追加
# - AmazonS3FullAccess
# - AmazonSageMakerFullAccess
# - AWSIoTFullAccess
# - AWSGreengrassFullAccess
# - 「信頼関係」に以下を設定
# ```
# "Service": [
# "sagemaker.amazonaws.com",
# "lambda.amazonaws.com"
# ]
# ```
#
# ---
# +
import boto3
import json
from datetime import datetime
from dateutil import tz
from time import sleep
JST = tz.gettz('Asia/Tokyo')
iam_client = boto3.client('iam')
def create_container_lambda_function(function_name, image_uri, policy_list, trust_service_list=[]):
timestamp = datetime.now(tz=JST).strftime('%Y%m%d-%H%M%S')
lambda_function_name = function_name
lambda_inference_policy_name = lambda_function_name + '-policy-'+timestamp
lambda_inference_role_name = lambda_function_name + '-role-'+timestamp
inline_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 'logs:CreateLogGroup',
'Resource': f'arn:aws:logs:{region}:{account_id}:*'
},
{
'Effect': 'Allow',
'Action': [
'logs:CreateLogStream',
'logs:PutLogEvents'
],
'Resource': [
f'arn:aws:logs:{region}:{account_id}:log-group:/aws/lambda/{lambda_function_name}:*'
]
}
]
}
response = iam_client.create_policy(
PolicyName=lambda_inference_policy_name,
PolicyDocument=json.dumps(inline_policy),
)
policy_arn = response['Policy']['Arn']
service_list = ["lambda.amazonaws.com"]
for t in trust_service_list:
service_list.append(t)
assume_role_policy = {
"Version": "2012-10-17",
"Statement": [{"Sid": "","Effect": "Allow","Principal": {"Service":service_list},"Action": "sts:AssumeRole"}]
}
response = iam_client.create_role(
Path = '/service-role/',
RoleName = lambda_inference_role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy),
MaxSessionDuration=3600*12 # 12 hours
)
lambda_role_arn = response['Role']['Arn']
lambda_role_name = response['Role']['RoleName']
response = iam_client.attach_role_policy(
RoleName=lambda_inference_role_name,
PolicyArn=policy_arn
)
for p in policy_list:
arn = 'arn:aws:iam::aws:policy/' + p
response = iam_client.attach_role_policy(
RoleName=lambda_inference_role_name,
PolicyArn=arn
)
sleep(20) # wait until IAM is created
response = lambda_client.create_function(
FunctionName=function_name,
Role=lambda_role_arn,
Code={
'ImageUri':image_uri
},
Timeout=60*5, # 5 minutes
MemorySize=128, # 128 MB
Publish=True,
PackageType='Image',
)
return lambda_role_name, policy_arn
# +
lambda_policies = []
lambda_roles = []
lambda_function_name_update = 'update-container-' + user_name
policy_list = ['AmazonS3FullAccess', 'AWSLambda_FullAccess']
r, p = create_container_lambda_function(lambda_function_name_update, lambda_update_repository_uri, policy_list)
lambda_policies.append(p)
lambda_roles.append(r)
lambda_function_name_compile = 'compile-model-' + user_name
policy_list = ['AmazonS3FullAccess', 'AWSLambda_FullAccess', 'AmazonSageMakerFullAccess']
trust_list = ["sagemaker.amazonaws.com"]
r, p = create_container_lambda_function(lambda_function_name_compile, lambda_compile_repository_uri, policy_list, trust_list)
lambda_policies.append(p)
lambda_roles.append(r)
lambda_function_name_deploy = 'deploy-components-to-device-' + user_name
policy_list = ['AmazonS3FullAccess', 'AWSIoTFullAccess', 'AmazonSageMakerFullAccess', 'AWSGreengrassFullAccess']
r, p = create_container_lambda_function(lambda_function_name_deploy, lambda_deploy_repository_uri, policy_list, trust_list)
lambda_policies.append(p)
lambda_roles.append(r)
# -
# ## Step Functions Data Science SDK でワークフローを作成
#
# Step Functions で使用する実行ロールを作成します。
#
# ### Step Functions の実行ロールの作成
#
# 作成した Step Functions ワークフローは、AWS の他のサービスと連携するための IAM ロールを必要とします。以下のセルを実行して、必要な権限を持つ IAM Policy を作成し、それを新たに作成した IAM Role にアタッチします。
#
# なお、今回は広めの権限を持つ IAM Policy を作成しますが、ベストプラクティスとしては必要なリソースのアクセス権限と必要なアクションのみを有効にします。
# +
step_functions_policy_name = 'AmazonSageMaker-StepFunctionsWorkflowExecutionPolicy-' + user_name
inline_policy ={
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"events:PutTargets",
"events:DescribeRule",
"events:PutRule"
],
"Resource": [
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTransformJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTuningJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForECSTaskRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForBatchJobsRule"
]
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": sagemaker_role,
"Condition": {
"StringEquals": {
"iam:PassedToService": "sagemaker.amazonaws.com"
}
}
},
{
"Sid": "VisualEditor2",
"Effect": "Allow",
"Action": [
"batch:DescribeJobs",
"batch:SubmitJob",
"batch:TerminateJob",
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"ecs:DescribeTasks",
"ecs:RunTask",
"ecs:StopTask",
"glue:BatchStopJobRun",
"glue:GetJobRun",
"glue:GetJobRuns",
"glue:StartJobRun",
"lambda:InvokeFunction",
"sagemaker:CreateEndpoint",
"sagemaker:CreateEndpointConfig",
"sagemaker:CreateHyperParameterTuningJob",
"sagemaker:CreateModel",
"sagemaker:CreateProcessingJob",
"sagemaker:CreateTrainingJob",
"sagemaker:CreateTransformJob",
"sagemaker:DeleteEndpoint",
"sagemaker:DeleteEndpointConfig",
"sagemaker:DescribeHyperParameterTuningJob",
"sagemaker:DescribeProcessingJob",
"sagemaker:DescribeTrainingJob",
"sagemaker:DescribeTransformJob",
"sagemaker:ListProcessingJobs",
"sagemaker:ListTags",
"sagemaker:StopHyperParameterTuningJob",
"sagemaker:StopProcessingJob",
"sagemaker:StopTrainingJob",
"sagemaker:StopTransformJob",
"sagemaker:UpdateEndpoint",
"sns:Publish",
"sqs:SendMessage"
],
"Resource": "*"
}
]
}
response = iam_client.create_policy(
PolicyName=step_functions_policy_name,
PolicyDocument=json.dumps(inline_policy),
)
step_functions_policy_arn = response['Policy']['Arn']
# +
step_functions_role_name = 'AmazonSageMaker-StepFunctionsWorkflowExecutionRole-' + user_name
assume_role_policy = {
"Version": "2012-10-17",
"Statement": [{"Sid": "","Effect": "Allow","Principal": {"Service":"states.amazonaws.com"},"Action": "sts:AssumeRole"}]
}
response = iam_client.create_role(
Path = '/service-role/',
RoleName = step_functions_role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy),
MaxSessionDuration=3600*12 # 12 hours
)
step_functions_role_arn = response['Role']['Arn']
response = iam_client.attach_role_policy(
RoleName=step_functions_role_name,
PolicyArn=step_functions_policy_arn
)
response = iam_client.attach_role_policy(
RoleName=step_functions_role_name,
PolicyArn='arn:aws:iam::aws:policy/CloudWatchEventsFullAccess'
)
# -
workflow_execution_role = step_functions_role_arn
import sys
# !{sys.executable} -m pip install -qU "stepfunctions==2.1.0"
import stepfunctions
from stepfunctions import steps
from stepfunctions.inputs import ExecutionInput
from stepfunctions.steps import (
Chain,
ChoiceRule,
ModelStep,
ProcessingStep,
TrainingStep,
TransformStep,
)
# from stepfunctions.template import TrainingPipeline
from stepfunctions.template.utils import replace_parameters_with_jsonpath
from stepfunctions.workflow import Workflow
# ### AWS Step Functions ワークフローの作成
#
# まずは、AWS Step Functions ワークフロー実行時に指定するパラメタの定義をします。
execution_input = ExecutionInput(
schema={
"ConfigFilePath": str,
}
)
# ここからは、先ほど作成した 3つの Lambda 関数を順に実行するようなワークフローを作成していきます。
# +
from stepfunctions.steps.states import Retry
lambda_update_step = stepfunctions.steps.compute.LambdaStep(
"Update Container Image",
parameters={
"FunctionName": lambda_function_name_update,
"Payload": {
"configPath": execution_input["ConfigFilePath"],
},
},
)
lambda_compile_step = stepfunctions.steps.compute.LambdaStep(
"Compile Model",
parameters={
"FunctionName": lambda_function_name_compile,
"Payload": {
"configPath": execution_input["ConfigFilePath"],
},
},
)
lambda_deploy_step = stepfunctions.steps.compute.LambdaStep(
"Deploy components",
parameters={
"FunctionName": lambda_function_name_deploy,
"Payload": {
"Payload.$": "$"
},
},
)
lambda_update_step.add_retry(
Retry(error_equals=["States.TaskFailed"], interval_seconds=15, max_attempts=2, backoff_rate=4.0)
)
lambda_compile_step.add_retry(
Retry(error_equals=["States.TaskFailed"], interval_seconds=15, max_attempts=2, backoff_rate=4.0)
)
lambda_deploy_step.add_retry(
Retry(error_equals=["States.TaskFailed"], interval_seconds=15, max_attempts=2, backoff_rate=4.0)
)
# -
# ### Choice State と Wait State の作成
#
# 設定ファイルの `neo-compile` が True か False かによって、モデルコンパイル用 Lambda 関数実行後の待ち時間を調整するために、Choice State と Wait State を使用します。
# +
wait_5_step = stepfunctions.steps.states.Wait(
"Wait for five minutes",
seconds = 300,
)
wait_2_step = stepfunctions.steps.states.Wait(
"Wait for two minutes",
seconds = 120,
)
wait_choice_step = stepfunctions.steps.states.Choice(
"Compile enable"
)
wait_choice_step.add_choice(
rule=ChoiceRule.IsPresent(variable=lambda_compile_step.output()["Payload"]["response"], value=True),
next_step=wait_5_step
)
wait_choice_step.default_choice(
next_step=wait_2_step
)
wait_2_step.next(lambda_deploy_step)
wait_5_step.next(lambda_deploy_step)
# -
# ### Fail 状態の作成
# いずれかのステップが失敗したときにワークフローが失敗だとわかるように Fail 状態を作成します。
#
# エラーハンドリングのために [Catch Block](https://aws-step-functions-data-science-sdk.readthedocs.io/en/stable/states.html#stepfunctions.steps.states.Catch) を使用します。もし いずれかの Step が失敗したら、Fail 状態に遷移します。
# +
failed_state_sagemaker_processing_failure = stepfunctions.steps.states.Fail(
"ML Workflow failed", cause="Failed"
)
catch_state_processing = stepfunctions.steps.states.Catch(
error_equals=["States.TaskFailed"],
next_step=failed_state_sagemaker_processing_failure,
)
lambda_update_step.add_catch(catch_state_processing)
lambda_compile_step.add_catch(catch_state_processing)
lambda_deploy_step.add_catch(catch_state_processing)
# -
# ### Workflow の作成
#
# ここまでで Step Functions のワークフローを作成する準備が完了しました。`branching_workflow.create()` を実行することで、ワークフローが作成されます。一度作成したワークフローを更新する場合は `branching_workflow.update()` を実行します。
#
# Chain を使って各 Step を連結してワークフローを作成します。既存のワークフローを変更する場合は、update() を実行します。ログに ERROR が表示された場合は、以下のセルを再度実行してください。
# +
import time
workflow_graph = Chain([lambda_update_step, lambda_compile_step, wait_choice_step])
branching_workflow = Workflow(
name="gg-deploy-workflow-" + user_name,
definition=workflow_graph,
role=workflow_execution_role,
)
branching_workflow.create()
branching_workflow.update(workflow_graph)
time.sleep(5)
branching_workflow.render_graph(portrait=False)
# -
# ここまででモデルデプロイワークフローが作成できました。ワークフローは初めにいったん作成してしまえば、あとは実行するだけです。
#
# ここからの手順は、新しいモデルをエッジデバイスにデプロイするたびに実行する想定です。
# ---
#
# ## エッジ推論で使用する学習済みモデルの準備
#
# このノートブックでは、Keras の学習済みの MobileNet モデルを使用します。Amazon SageMaker Neo で動作確認済みのモデルは [こちらのドキュメント](https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-edge-tested-models.html) で確認できます。また、サポートされているフレームワークのバージョンは [こちらのドキュメント](https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html) で確認できます。
#
# 以下のセルを実行して、学習済みの MobileNet モデルをダウンロードし、`mobilenet.h5` として保存します。また、`model.summary()` を実行して、入力レイヤ名を確認します。Keras の MobileNet であればたいてい `input_1` となります。この名前を後ほど設定ファイルに記載するので覚えておいてください。
#
# 自分で学習したモデルをデプロイする場合は、この手順は不要です。
# +
import tensorflow as tf
model = tf.keras.applications.MobileNet()
model.save('mobilenet.h5')
model.summary()
# -
# ---
#
# ## デプロイ設定ファイルのセットアップ
#
# このノートブックの構成では、作成した AWS Step Functions ワークフローを実行する際に、デプロイに関する情報を記載した設定ファイル(yaml 形式)を入力パラメタとして指定します。モデルを学習した際の学習ジョブの情報など、他に記録しておきたい情報があれば、この yaml ファイルに記述を追加してください。ここからは、設定ファイルを作成していきます。
#
# ### ユーティリティ関数の定義
#
# まずは設定ファイルの作成に必要な関数を準備します。
# +
import boto3
# region = boto3.session.Session().region_name
# ggv2_client = boto3.client('greengrassv2', region_name=region)
def get_latest_component_version(component_name):
component_list = ggv2_client.list_components()['components']
for c in component_list:
if c['componentName'] == component_name:
return c['latestVersion']['componentVersion']
def get_target_group_arn(deployment_name):
deployments = ggv2_client.list_deployments()['deployments']
deployment_id = ''
group_id = ''
for d in deployments:
if d['deploymentName'] == deployment_name:
return d['targetArn']
return -1
def increment_version(current_version, target='revision'):
# target: major, minor, revision
if current_version == None:
return '0.0.1'
major, minor, revision = list(map(int, current_version.split('.')))
if target == 'revision':
revision += 1
elif target == 'minor':
minor += 1
revision = 0
elif target == 'major':
major += 1
minor = 0
revision = 0
else:
print('[ERROR] invalid target value')
return current_version
return '{}.{}.{}'.format(major, minor, revision)
# -
# ### デプロイ関連情報の定義
#
# 以下のセルに、デプロイしたいコンポーネントの情報を入力して実行します。**必ず以下の変数をご自身の環境に合わせて書き換えてください。**
#
# - `deployment_name`: [事前準備]の手順で作成したデバイスに紐づいたデプロイメント名
#
#
# 前の手順で確認したモデルの入力名が `input_1` でなかった場合は、`model_input_name` の値も書き換えてください。必要に応じて以下の変数を書き換えてください。
#
# - Amazon SageMaker Neo 用の設定
# - `model_input_name`: 学習済みモデルの入力名。Keras の場合、model.summary() で確認する
# - `model_input_shape`: 学習済みモデルの入力サイズ。Keras の場合、model.summary() で確認する
# - `model_framework`: 学習済みモデルのフレームワーク
# - `target_device`: コンパイルターゲットデバイスの種類
# - AWS IoT Greengrass V2 用の設定
# - `gg_s3_path`: 設定ファイルやアーティファクトを保存する S3 パス(このノートブックを実行するのと同じリージョンのバケットにしてください)
# - `component_name`: コンポーネントの名前(任意の名前)
# - `target_group_arm`: デプロイ先のターゲットグループの ARN
# - `pip_libraries`: Greengrass レシピに記載する、エッジ推論に必要なライブラリ名(複数ある場合はスペース区切り)
# - その他の設定
# - `deploy_lambda_container`: デプロイ用 Lambda 関数で使用するコンテナイメージの URI(コンテナイメージ作成手順の中で Amazon ECR にプッシュしたイメージ)
#
# このノートブックでは、`gg_s3_path` で設定した S3 パス以下に必要なファイルをアップロードします。また、モデルを SageMaker Neo でコンパイルするので、Greengrass コンポーネントに `pip install` するライブラリを指定するための `pip_libraries` を用意しています。
gg_s3_path = 's3://'+bucket_name+'/gg/' + user_name # 設定ファイル、アーティファクトファイルなどを保存する S3 パス
component_name = 'com.example.IoTPublisher.' + user_name
deployment_name = 'Deployment for GreengrassQuickStartGroup'
target_group_arm = get_target_group_arn(deployment_name)
target_device = 'ml_m5'
model_input_name = 'input_1'
model_input_shape = '[1, 3, 224, 224]'
model_framework = 'KERAS'
deploy_lambda_container = lambda_deploy_repository_uri
pip_libraries = 'dlr pillow opencv-python opencv-contrib-python'
# ### 学習済みモデルとアーティファクトファイルを S3 にアップロード
#
# デバイスにデプロイするサンプルとして用意したファイルを S3 にアップロードします。アーティファクトファイルは、`bucket_name`/gg/artifacts/コンポーネント名/コンポーネントバージョン にアップロードされます。
#
# このサンプルノートブックでは、アーティファクトファイルに変更がなくても必ずコンポーネントバージョンごとにファイルを S3 に保存する構成になっています。
# +
# !tar zcvf mobilenet.tar.gz mobilenet.h5
latest_version = get_latest_component_version( component_name)
component_version = increment_version(latest_version)
path = os.path.join('gg', user_name, 'artifacts', component_name, component_version, 'data')
artifacts_s3_path = sagemaker_session.upload_data(path='artifacts', bucket=bucket_name, key_prefix=path)
path = ('gg', user_name, 'models')
model_s3_path = sagemaker_session.upload_data(path='mobilenet.tar.gz', bucket=bucket_name, key_prefix=path)
print('new component version:', component_version)
# -
# ### 設定ファイル(yaml 形式)をファイルに保存
#
# 以下のセルを実行して、設定ファイルを作成します。Greengrass コンポーネントのバージョンは、最新のバージョンのリビジョン番号を 1インクリメントしたものが自動的にセットされます。メジャー番号やマイナー番号をインクリメントしたい場合は、`get_latest_component_version` の引数に `target='major'` などを指定してください。
#
# 以下のセルでは、学習済みモデルが `{gg_s3_path}/models` という S3 パスに保存されている想定で設定ファイル(yaml 形式)を作成して保存しています。SageMaker 学習ジョブが出力したファイルを直接指定したい場合は、以下のセルの `original-model-path` にモデルが保存されているフルパスを設定してください。また、SageMaker Neo でコンパイル済みのモデルを指定する場合は、`compile-model:` に `False` を設定し、`compiled-model-path` にはコンパイル済みのモデルが保存されている S3 パスを設定してください。
# +
import datetime
timestamp = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))).strftime('%Y%m%d-%H%M%S')
config_string = f"""
component-name: {component_name}
component-version: {component_version}
deployment-name: {deployment_name}
target-group-arn: {target_group_arm}
entrypoint-script-name: 'run.py'
pip-libraries: {pip_libraries}
model-information:
original-model-path: {model_s3_path}
compile-model: True
compiled-model-path: {gg_s3_path}/compiled-models/
target-device: {target_device}
input-name: {model_input_name}
input-shape: '{model_input_shape}'
framework: {model_framework}
lambda-container: {deploy_lambda_container}
lambda-deploy-function: 'deploy-components-to-device-{user_name}'
artifact-files:
- {artifacts_s3_path}/run.py
- {artifacts_s3_path}/inference.py
- {artifacts_s3_path}/classification-demo.png
- {artifacts_s3_path}/image_net_labels.json
"""
config_name = timestamp + '-' + component_version + '.yaml'
with open(config_name, 'w') as f:
f.write(config_string)
# -
# ### 設定ファイルを S3 にアップロード
#
# 作成した設定ファイルを S3 にアップロードします。アップロート先の S3 パスが `upload_path` に保存されます。AWS Step Functions ワークフローを実行する際に、このファイルパスを入力パラメタとして指定します。
# +
import yaml
import os
with open(config_name) as file:
config = yaml.safe_load(file)
component_name = config['component-name']
upload_path = os.path.join(gg_s3_path, 'config', component_name, component_version, config_name)
# !aws s3 cp $config_name $upload_path
# -
# ## AWS Step Functions ワークフローの実行
#
# それではいよいよ、ワークフローを実行して ML モデルやスクリプトをエッジにデプロイします。ワークフローの実行方法は 2通りあります。このノートブックを初めて使用した場合は、1の方法をご利用ください。
#
# 1. ワークフローを作成し、続けて実行する
# 1. 既存のワークフローを呼び出して実行する
#
# ### ワークフローの実行
# 以下のセルを実行してワークフローを開始します。引数の `ConfigFilePath` に設定ファイルが置いてある S3 パスが指定されています。このセルを実行した場合は、次のセルを実行する必要はありません。
#
# 過去に作成した設定ファイルを指定してワークフローを実行すれば、当時と同じ処理を実行することができます。
execution = branching_workflow.execute(
inputs={
'ConfigFilePath': upload_path
}
)
# ### 既存のワークフローを呼び出して実行
#
# すでに作成してあるワークフローを実行する場合は以下のセルの `workflow_arn` にワークフローの ARN を入力し、コメントアウトを解除してから実行してください。ワークフローの ARN は [AWS Step Functions のコンソール](https://ap-northeast-1.console.aws.amazon.com/states/home?region=ap-northeast-1#/statemachines) から確認できます。
# +
# from stepfunctions.workflow import Workflow
# workflow_arn = 'arn:aws:states:ap-northeast-1:420964472730:stateMachine:gg-deploy-workflow'
# existing_workflow = Workflow.attach(workflow_arn)
# execution = existing_workflow.execute(
# inputs={
# 'ConfigFilePath': upload_path
# }
# )
# -
# 以下のセルを実行すると、ワークフローの実行状況を確認できます。
execution.render_progress()
# デプロイが完了したら、Cloud9 のターミナルで以下のコマンドを実行してログを表示します。うまくデプロイできていれば、5秒ごとに推論結果が表示されます。なお、常に同じ画像を使って推論しているため、常に同じ結果が表示されます。
#
# > tail -f /tmp/Greengrass_HelloWorld.log
# ## デプロイされたモデルの情報を一覧表示
#
# どのデバイスにどのモデルがデプロイされたかを知りたいことがあります。その場合は、Greengrass の deployments のリストから各種情報を API やワークフロー実行時の設定ファイルを使って情報を一覧表示することができます。
#
# まずは必要な関数を定義します。
# +
# import boto3
# import yaml
# import pandas as pd
# from dateutil import tz
# JST = tz.gettz('Asia/Tokyo')
# region = boto3.session.Session().region_name
# s3_client = boto3.client('s3', region_name=region)
# ggv2_client = boto3.client('greengrassv2', region_name=region)
# iot_client = boto3.client('iot', region_name=region)
# s3 = boto3.resource('s3')
def get_device_in_deployment_list():
deployments = ggv2_client.list_deployments()['deployments']
device_list = []
for d in deployments:
thing_group_name = d['targetArn'].split('/')[-1]
try:
response = iot_client.list_things_in_thing_group(
thingGroupName=thing_group_name
)
things = response['things']
except:
things = [thing_group_name]
for thing in things:
response = ggv2_client.get_core_device(
coreDeviceThingName=thing
)
device_list.append({
'deployment-name': d['deploymentName'],
'target-arn': d['targetArn'],
'thing-group-name': thing_group_name,
'thing-name': thing,
'status': response['status'],
'last-status-updated': response['lastStatusUpdateTimestamp'].astimezone(JST)
})
return device_list
def get_installed_component_list(device_name):
response = ggv2_client.list_installed_components(
coreDeviceThingName=device_name
)
component_list = []
for d in response['installedComponents']:
component_list.append([d['componentName'], d['componentVersion']])
return component_list
def get_config_file_name(bucket_name, prefix):
prefix = prefix + '/'
def search_component(latest, start_after):
objects = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=prefix, StartAfter=start_after)
if "Contents" in objects:
keys = [content["Key"][len(prefix):] for content in objects["Contents"]]
for k in keys:
if k > latest:
latest = k
if objects.get("isTruncated"):
return search_component(latest=latest, start_after=keys[-1])
return latest
return search_component('0', '')
def get_config_info_as_yaml(config_file_path, component_info):
component_name, component_version = component_info
bucket_name = config_file_path.split('/')[2]
prefix = os.path.join(config_file_path[len(bucket_name)+6:] , 'config', component_name, component_version)
config_file_name = os.path.join(prefix, get_config_file_name(bucket_name, prefix))
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(config_file_name)
try:
response = obj.get()
except:
return None
body = response['Body'].read()
config = yaml.safe_load(body)
return config
def show_deployed_model_info(config_file_path):
device_list = get_device_in_deployment_list()
column_name = [
'deployment-name',#0
'target-group-arn',
'thing-name',
'thing-status',
'thing-last-status-updated',#5
'component-name',
'deployed-component-version',
'model-name',
'model-fullpath',
'model-framework',#10
'neo-compile']
df = pd.DataFrame(None,
columns=column_name,
index=None)
for d in device_list:
print('Device "', d['thing-name'], '" has these ML components: ')
component_list = get_installed_component_list(d['thing-name'])
for c in component_list:
config = get_config_info_as_yaml(config_file_path, c)
if config == None:
print('\t No ML components.')
continue
component_name, component_version = c
print('\t component-name:', component_name, '-- version:', component_version)
config = get_config_info_as_yaml(config_file_path, c)
thing_group_name = d['thing-group-name']
df = df.append({
column_name[0]: config['deployment-name'],
column_name[1]: config['target-group-arn'],
column_name[2]: d['thing-name'],
column_name[3]: d['status'],
column_name[4]: d['last-status-updated'],
column_name[5]: config['component-name'],
column_name[6]: config['component-version'],
column_name[7]: config['model-information']['original-model-path'].split('/')[-1],
column_name[8]: config['model-information']['original-model-path'],
column_name[9]: config['model-information']['framework'],
column_name[10]: config['model-information']['compile-model']}, ignore_index=True)
return df
# -
# 以下のセルを実行すると、デプロイパイプラインを使って ML モデルをデプロイしたデバイスの一覧が Pandas の DataFrame 形式で表示されます。作成した Step Functions ワークフローを使わず直接 Greengrass のコンソールからコンポーネントをデプロイした場合は、こちらの一覧には表示されないのでご注意ください。実際のワークロードでは必ずワークフローを使ってデプロイすることをルールとすることをおすすめします。
#
# `config_file_path_list` には、デプロイ設定ファイルが保存されている S3 パスのリストを設定します。
config_file_path_list = [gg_s3_path]
info = None
for config_file_path in config_file_path_list:
df = show_deployed_model_info(config_file_path)
if info is None:
info = df
else:
info = pd.concat([info, df])
info
# [おまけ] DataFrame を html ファイルとして保存することもできます。
# +
pd.set_option('colheader_justify', 'center')
html_string = '''
<html>
<head><title>Pandas DataFrame</title></head>
<link rel="stylesheet" type="text/css" href="dataframe-style.css"/>
<body>
{table}
</body>
</html>.
'''
# OUTPUT AN HTML FILE
with open('index.html', 'w') as f:
f.write(html_string.format(table=df.to_html(classes='table-style')))
# -
# Jupyter ノートブックで以下のセルを実行すると、html ファイルの中身が表示されます。Jupyter Lab や SageMaker Studio では `HTML` が機能しないので、以下のセルを実行するのではなく直接 html ファイルを開いてください。
from IPython.display import HTML
HTML('index.html')
# ## [重要] リソースの削除
#
#
# 不要な課金を避けるために、以下のリソースを削除してください。特に Amazon SageMaker ノートブックインスタンスは削除しない限りコンピュートリソースとストレージの利用料金が継続するので、不要な場合は必ず削除してください。
#
# - [必須] Amazon SageMaker ノートブックインスタンス
# - 事前準備で作成した Cloud9 環境
# - IoT Greengrass リソース
# - S3 に保存したデータ
# - AWS Step Functions ワークフロー
# - Lambda 関数
#
# ### AWS Step Functions ワークフロー関連のリソース削除
#
# 以下のセルを実行して、作成したワークフローと IAM role, IAM policy を削除します。
# +
branching_workflow.delete()
def detach_role_policies(role_name):
response = iam_client.list_attached_role_policies(
RoleName=role_name,
)
policies = response['AttachedPolicies']
for p in policies:
response = iam_client.detach_role_policy(
RoleName=role_name,
PolicyArn=p['PolicyArn']
)
detach_role_policies(step_functions_role_name)
iam_client.delete_role(RoleName=step_functions_role_name)
iam_client.delete_policy(PolicyArn=step_functions_policy_arn)
# -
# ### Lambda 関数関連のリソース削除
#
# 以下のセルを実行して、作成した Lambda 関数と IAM role, IAM policy, ECR repository を削除します。
# +
for l in lambda_roles:
detach_role_policies(l)
iam_client.delete_role(RoleName=l)
for p in lambda_policies:
iam_client.delete_policy(PolicyArn=p)
lambda_client.delete_function(FunctionName=lambda_function_name_update)
lambda_client.delete_function(FunctionName=lambda_function_name_compile)
lambda_client.delete_function(FunctionName=lambda_function_name_deploy)
ecr_client.delete_repository(
repositoryName=ecr_repository_lambda_update,
force=True
)
ecr_client.delete_repository(
repositoryName=ecr_repository_lambda_compile,
force=True
)
ecr_client.delete_repository(
repositoryName=ecr_repository_lambda_deploy,
force=True
)
# -
| mlops/edge-inference/sagemaker-neo-greengrass-v2-deploy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Covid-19: From model prediction to model predictive control
#
# ## Scenario-analysis with the age-stratified deterministic model
#
# *Original code by <NAME>. Modified by <NAME> in consultation with the BIOMATH research unit headed by prof. <NAME>.*
#
# Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved.
#
# This notebook was made to quickly perform scenario analysis with the age-stratified model implementation.
# ### Load required packages
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import Image
from ipywidgets import interact,fixed,FloatSlider,IntSlider,ToggleButtons
import pandas as pd
import datetime
import scipy
from scipy.integrate import odeint
import matplotlib.dates as mdates
import matplotlib
import scipy.stats as st
import networkx # to install networkx in your environment: conda install networkx
from covid19model.models import models
from covid19model.data import google
from covid19model.data import sciensano
from covid19model.data import polymod
from covid19model.data import parameters
from covid19model.visualization.output import population_status, infected
from covid19model.visualization.optimization import plot_fit
# OPTIONAL: Load the "autoreload" extension so that package code can change
# %load_ext autoreload
# OPTIONAL: always reload modules so that as you change code in src, it gets loaded
# %autoreload 2
import math
# ### Load interaction matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = polymod.get_interaction_matrices()
# ### Load parameter values for age-stratified deterministic model and adjust for stochastic model
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = polymod.get_interaction_matrices()
params = parameters.get_COVID19_SEIRD_parameters()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# some required parameters are different, the 'parameter loading function' must be changed in the future
params.update({'theta': 0,
'beta':0.035,
'Nc': Nc_total}) # add length of a discrete timestep to parameter dictionary (days)
del params['totalTests']
del params['dq']
del params['psi_FP']
del params['psi_PP']
# ### Initialise model
levels = initN.size
initial_states = {'S': initN, 'E': np.ones(levels)}
model = models.COVID19_SEIRD_sto(initial_states, params)
# ### Scrape high-level Sciensano data
df_sciensano = sciensano.get_sciensano_COVID19_data(update=False)
df_sciensano.head()
# ### Example simulation
# Create checkpoints dictionary
chk = {'time': [45],
'Nc': [0.3*Nc_home]
}
# Run a checkpointed simulation
out=model.sim(100,checkpoints=chk)
# Visualise the number of pre-symptomatic infections over all ages
plt.plot(out["time"],out["I"].sum(dim="stratification"))
# Built-in function to visualise the number of patients in hospitals, in ICU and deaths
infected(out)
# ### Performing a calibration
#
# Use the function fit_pso to obtain a good first estimate for the MCMC sampler.
# +
from covid19model.optimization import MCMC
# define dataset
data=[df_sciensano["H_in"].values[:7]]
# set optimisation settings
parNames = ['sigma_ICU','extraTime','beta'] # must be a list!
states = [["H_in"]]
bounds=((1,50),(20,80),(0.03,0.06)) # must be a list!
# run optimisation
theta = MCMC.fit_pso(model,data,parNames,states,bounds,maxiter=30,popsize=100)
# -
model.extraTime = int(round(theta[1]))
model.parameters.update({
'beta': theta[2],
}
)
plot_fit(model,data,'15-03-2020',states)
# Initiatize and run the sampler
model.extraTime = int(round(theta[1]))
theta = [theta[0],theta[2]]
print(model.extraTime,theta)
# +
import emcee
from covid19model.optimization import objective_fcns
parNames = ['sigma_ICU','beta'] # must be a list!
states = [["ICU"]]
bounds=((1,100),(0.01,0.10))
pos = theta + [5, 1e-2 ]* np.random.randn(4, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_fcns.log_probability,
args=(model,bounds,data,states,parNames))
sampler.run_mcmc(pos, 300, progress=True);
# +
fig, axes = plt.subplots(2, figsize=(10, 7), sharex=True)
samples = sampler.get_chain()
labels = ["sigma2_ICU","beta"]
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(labels[i])
#ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number");
# -
sampler.get_autocorr_time()
# +
import corner
flat_samples = sampler.get_chain(discard=500, thin=1, flat=True)
fig = corner.corner(
flat_samples, labels=labels,
);
# -
# To do: quick visualisation of the fit + samples. Can we make extraTime a non-integer variable?
# # Notebook works till here
# ### Calibration of the hospitalization parameters
# #### Calibrate $d_c$, $d_{icu}$ and $d_{icu, rec}$ to the new hospitalizations and hospital discharges
# +
sigma = 0.40
Nc = 0.2*Nc_home+sigma*((1-0.70)*Nc_work+(1-0.70)*Nc_transport)
# Create a dictionary of past policies
chk = {'t': [model.extraTime],
'Nc': [Nc]
}
data=[np.transpose(H_in[:,0:50]),np.transpose(H_out[:,0:50]),np.transpose(ICU_tot[:,0:50]),np.transpose(H_tot_cumsum[:,0:50])]
# set optimisation settings
parNames = ['dc','dICU','dICUrec'] # must be a list!
positions = [np.array([14]),np.array([15]),np.array([6]),np.array([5,6])] # must be a list!
bounds=((5,13),(6,15),(1,14)) # must be a list!
weights = np.array([10,10,1,1])
# run optimisation
theta = model.fit(data,parNames,positions,bounds,weights,checkpoints=chk,setvar=True,maxiter=10,popsize=100)
# plot result
model.plotFit(index[0:50],data,positions,checkpoints=chk,modelClr=['red','orange','blue','black'],legendText=('H_in (model)','H_out (model)','ICU_tot (model)','H_tot (model)'),titleText='Belgium')
# +
# Length of dataset
n=theano.shared(float(ICU_tot[:,0:30].size))
# Define coupling function of pyMC3-ICU
@as_op(itypes=[tt.dscalar,tt.dscalar,tt.dscalar,tt.dscalar], otypes=[tt.dvector])
def coupleICU_tot2COVID19MODEL(dc,dICU,dICUrec,n):
model.dc = dc
model.dICU = dICU
model.dICUrec = dICUrec
T = n+model.extraTime-1
model.sim(T,checkpoints=chk)
mdl_out = (model.sumS,model.sumE,model.sumI,model.sumA,model.sumM,model.sumCtot,model.sumICU,model.sumR,model.sumD,model.sumSQ,model.sumEQ,model.sumAQ,model.sumMQ,model.sumRQ,model.sumH_in,model.sumH_out)
positions = np.array([6])
som = 0
for idx in positions:
som = som + np.mean(mdl_out[idx],axis=1).reshape(np.mean(mdl_out[idx],axis=1).size,1)
return som[int(model.extraTime):].flatten()
# Define coupling function of pyMC3-Hospital
@as_op(itypes=[tt.dscalar,tt.dscalar,tt.dscalar,tt.dscalar], otypes=[tt.dvector])
def coupleH_tot2COVID19MODEL(dc,dICU,dICUrec,n):
model.dc = dc
model.dICU = dICU
model.dICUrec = dICUrec
T = n+model.extraTime-1
model.sim(T,checkpoints=chk)
mdl_out = (model.sumS,model.sumE,model.sumI,model.sumA,model.sumM,model.sumCtot,model.sumICU,model.sumR,model.sumD,model.sumSQ,model.sumEQ,model.sumAQ,model.sumMQ,model.sumRQ,model.sumH_in,model.sumH_out)
positions = np.array([5,6])
som = 0
for idx in positions:
som = som + np.mean(mdl_out[idx],axis=1).reshape(np.mean(mdl_out[idx],axis=1).size,1)
return som[int(model.extraTime):].flatten()
# Define coupling function of pyMC3-Hospital
@as_op(itypes=[tt.dscalar,tt.dscalar,tt.dscalar,tt.dscalar], otypes=[tt.dvector])
def coupleH_in2COVID19MODEL(dc,dICU,dICUrec,n):
model.dc = dc
model.dICU = dICU
model.dICUrec = dICUrec
T = n+model.extraTime-1
model.sim(T,checkpoints=chk)
mdl_out = (model.sumS,model.sumE,model.sumI,model.sumA,model.sumM,model.sumCtot,model.sumICU,model.sumR,model.sumD,model.sumSQ,model.sumEQ,model.sumAQ,model.sumMQ,model.sumRQ,model.sumH_in,model.sumH_out)
positions = np.array([14])
som = 0
for idx in positions:
som = som + np.mean(mdl_out[idx],axis=1).reshape(np.mean(mdl_out[idx],axis=1).size,1)
return som[int(model.extraTime):].flatten()
# Define coupling function of pyMC3-Hospital
@as_op(itypes=[tt.dscalar,tt.dscalar,tt.dscalar,tt.dscalar], otypes=[tt.dvector])
def coupleH_out2COVID19MODEL(dc,dICU,dICUrec,n):
model.dc = dc
model.dICU = dICU
model.dICUrec = dICUrec
T = n+model.extraTime-1
model.sim(T,checkpoints=chk)
mdl_out = (model.sumS,model.sumE,model.sumI,model.sumA,model.sumM,model.sumCtot,model.sumICU,model.sumR,model.sumD,model.sumSQ,model.sumEQ,model.sumAQ,model.sumMQ,model.sumRQ,model.sumH_in,model.sumH_out)
positions = np.array([15])
som = 0
for idx in positions:
som = som + np.mean(mdl_out[idx],axis=1).reshape(np.mean(mdl_out[idx],axis=1).size,1)
return som[int(model.extraTime):].flatten()
# Define prior distributions of parameters
with pm.Model() as COVID19MODEL:
db = pm.backends.Text('test')
BoundedNormal = pm.Bound(pm.Normal, lower=1.0)
# Priors for unknown model parameters
dc = BoundedNormal('dc', mu=model.dc, sigma=1)
dICU = BoundedNormal('dICU', mu=model.dICU, sigma=1)
dICUrec = BoundedNormal('dICUrec', mu=model.dICUrec, sigma=1)
sigma_ICU_tot = pm.HalfNormal('sigma_ICU_tot', sigma=10)
sigma_H_tot = pm.HalfNormal('sigma_H_tot', sigma=10)
sigma_H_in = pm.HalfNormal('sigma_H_in', sigma=10)
sigma_H_out = pm.HalfNormal('sigma_H_out', sigma=10)
mu_ICU_tot = coupleICU_tot2COVID19MODEL(dc,dICU,dICUrec,n)
mu_H_tot = coupleH_tot2COVID19MODEL(dc,dICU,dICUrec,n)
mu_H_in = coupleH_in2COVID19MODEL(dc,dICU,dICUrec,n)
mu_H_out = coupleH_out2COVID19MODEL(dc,dICU,dICUrec,n)
# Likelihood (sampling distribution) of observations
ICU_tot_obs = pm.Normal('ICU_tot_obs', mu=mu_ICU_tot, sigma=sigma_ICU_tot, observed=ICU_tot[:,0:30].flatten())
H_tot_obs = pm.Normal('H_tot_obs', mu=mu_H_tot, sigma=sigma_H_tot, observed=H_tot[:,0:30].flatten())
H_in_obs = pm.Normal('H_in_obs', mu=mu_H_in, sigma=sigma_H_in, observed=H_in[:,0:30].flatten())
H_out_obs = pm.Normal('H_out_obs', mu=mu_H_out, sigma=sigma_H_out, observed=H_out[:,0:30].flatten())
# -
map_estimate = pm.find_MAP(model=COVID19MODEL, method='L-BFGS-B',tol=1e-5)
map_estimate
with COVID19MODEL:
# draw 1000 posterior samples
trace = pm.sample(2,start=map_estimate,step=pm.Slice(),cores=8,trace=db)
# +
tracedict = {
'beta': np.asarray(trace['beta']),
'dc': np.asarray(trace['dc']),
'dICU': np.asarray(trace['dICU']),
'dICUrec': np.asarray(trace['dICUrec'])
}
# save a copy in the raw folder
abs_dir = os.path.dirname(__file__)
rel_dir = os.path.join(abs_dir, '../../data/interim/model_parameters/trace.csv')
(pd.DataFrame.from_dict(data=tracedict, orient='index')
.to_csv(reldir, header=False))
# -
with COVID19MODEL:
# draw 1000 posterior samples
trace = pm.sample(10,start=map_estimate,step=pm.Slice(),cores=1)
pm.traceplot(trace,varnames=['dc','dICU','dICUrec'])
tracedict_others = {
'dc': np.asarray(trace['dc']),
'dICU': np.asarray(trace['dICU']),
'dICUrec': np.asarray(trace['dICUrec'])
}
# ### Scenarios
#
# Use the code snippet below to see the correspondence between `'t'` in the `pastPolicy` dictionary and the actual date.
# Define data as a list containing data timeseries
data=[np.transpose(ICU_tot),np.transpose(H_tot)]
print(index[2],index[50],index[57],index[-1])
# The interaction matrices of the 2008 study by Mossong were gathered under a business-as-usual scenario. It is thus not possible to use the interaction matrices without doing a correction for social distancing. Even when using only the interactions at home (`Nc_home`), the virus cannot be stopped.
sigma = 1
# Stringent measures were taken in the evening of March 17th, which corresponds to time-index 3, however, the measures are only 'enforced' on day 8 in the `pastPolicy` dictionary. Why? The change in human behaviour was very gradual, it took between 10-14 days after March 17th before everyone was working at home (see the Google mobility report). In other words, measures were taken on March 17th, but obedience for these measures was gradual, like a ramp. However, in the model we make a step-wise change. The obedience to measures can be adressed in future work.
# Another important issue to adress is the home interaction matrix `Nc_home`. All contacts in these matrices are still assumed to be random, during a lockdown, the interactions at home should somehow be corrected for the 'bubble' effect. Since the average household size in belgium is 2 people, I correct the `Nc_home` matrix with a factor 1/2.
# +
sigma = 0.6
# Create a dictionary of past policies
pastPolicy = {'t': [1,50,64],
'Nc': [0.3*Nc_home+sigma*((1-0.70)*Nc_work+(1-0.70)*Nc_transport),
0.3*Nc_home+sigma*((1-0.30)*Nc_work+(1-0.40)*Nc_transport+(1-0.75)*Nc_schools),
0.3*Nc_home+sigma*((1-0.30)*Nc_work+(1-0.40)*Nc_transport+Nc_schools),
]
}
# -
# Create a dictionary of future policies
futurePolicy = {'t': [21], # May 21th, June 4th
'Nc': [0.3*Nc_home+sigma*((1-0.30)*Nc_work+(1-0.40)*Nc_transport)],
}
positions=[np.array([6]),np.array([5,6])]
# Run realTimeScenario
model.realTimeScenario(startdate,data,positions,pastPolicy,futurePolicy=futurePolicy,trace={'beta': [model.beta]},T_extra=124,
modelClr=['red','orange'],legendText=('ICU (model)','Hospital (model)','ICU (data)','Hospital (data)'),
titleText='Belgium',filename='test.svg')
# Save your figures by altering the variable `filename = xxxxx.svg`!
| notebooks/scratch/0.1-twallema-scenario-wo-elderlyhomes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style('whitegrid')
# %matplotlib inline
roc = pd.read_csv('discrete-ROC-step-240000.txt', sep=' ', header=None)
roc.columns = ['tpr', 'fp', 'threshold']
# +
def plot_roc():
_, axis = plt.subplots(nrows=1, ncols=1, figsize=(7, 4), dpi=120)
axis.plot(roc.fp, roc.tpr, c='r', linewidth=2.0);
axis.set_title('Discrete Score ROC')
axis.set_xlim([0, 2000.0])
axis.set_ylim([0.6, 1.0])
axis.set_xlabel('False Positives')
axis.set_ylabel('True Positive Rate');
plot_roc()
# -
import scipy.io
mat = scipy.io.loadmat('eval_tools/plot/baselines/Val/setting_int/LightHeadRCNN/wider_pr_info_LightHeadRCNN_easy_val.mat')
pr = mat['pr_cruve']
rec = pr[:, 1]
prec = pr[:, 0]
compute_ap(prec, rec)
def voc_ap(rec, prec):
mrec = np.concatenate([[0.0], rec, [1.0]])
mpre = np.concatenate([[0.0], prec, [0.0]])
n = len(prec)
for i in reversed(range(n - 1)):
mpre[i] = max(mpre[i], mpre[i+1]);
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
def compute_ap(precision, recall):
previous_recall_value = 0.0
ap = 0.0
# recall is in increasing order
for p, r in zip(precision, recall):
delta = r - previous_recall_value
ap += p*delta
previous_recall_value = r
return ap
plt.plot(pr[:, 0], pr[:, 1])
| evaluation/plot_roc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Space management
# This notebook contains steps and code to demonstrate how to manage spaces in context of Watson Machine Learning service. It facilitates [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. It introduces commands for creating, updating & deleting spaces, getting list and detailed information about them.
#
# Some familiarity with Python is helpful. This notebook uses Python 3.
# ## Learning goals
#
# The learning goals of this notebook are:
#
# - Create new space
# - List existing spaces
# - Get spaces details
# - Set default space
# - Update exisitng space
# - Delete space
#
#
# ## Contents
#
# This notebook contains the following parts:
#
# 1. [Set up the environment](#setup)
# 2. [Create new space](#create_space)
# 3. [List all existing spaces](#list_space)
# 4. [Get details about space](#get_space)
# 5. [Set default space](#set_space)
# 6. [Update space metadata](#update_space)
# 7. [Delete existing space](#delete_space)
# 8. [Summary and next steps](#summary)
#
# <a id="setup"></a>
# ## 1. Set up the environment
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# - Contact with your Cloud Pack for Data administrator and ask him for your account credentials
# ### Connection to WML
#
# Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`.
username = 'PASTE YOUR USERNAME HERE'
password = '<PASSWORD>'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"password": password,
"url": url,
"instance_id": 'openshift',
"version": '3.5'
}
# ### Install and import the `ibm-watson-machine-learning` package
# **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
# !pip install -U ibm-watson-machine-learning
# +
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
# -
# <a id="create_space"></a>
# ## 2. Create new space
#
# First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one.
#
# - Click New Deployment Space
# - Create an empty space
# - Go to space `Settings` tab
# - Copy `space_id` and paste it below
#
# **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb).
#
# **Action**: Assign space ID below
# + pycharm={"name": "#%%\n"}
space_metadata = {
'name': 'PUT_YOUR_SPACE_NAME_HERE',
'description': 'PUT_YOUR_DESCRIPTION_HERE',
}
# + [markdown] pycharm={"name": "#%% md\n"}
# Next you can create space by following cell execution.
# + pycharm={"name": "#%%\n"}
space_details = client.spaces.store(space_metadata)
print(space_details)
# + [markdown] pycharm={"name": "#%% md\n"}
# You can get space it by executing following cell.
# + pycharm={"name": "#%%\n"}
space_id = client.spaces.get_id(space_details)
print(space_id)
# + [markdown] pycharm={"name": "#%% md\n"}
# **Tip** In order to check if the space creation is completed succesfully change next cell format to code and execute it. It should return 'active'.
# + pycharm={"name": "#%% raw\n"}
client.spaces.get_details(space_id)['entity']['status']['state']
# + [markdown] pycharm={"name": "#%% md\n"}
# **Action**: If you didn't create new space in this notebook by `ibm_watson_machine_learning`, please assign space ID below and change cell format to `code`.
# + pycharm={"name": "#%% raw\n"} active=""
# space_id = 'PASTE YOUR SPACE ID HERE'
# -
# <a id="list_space"></a>
# ## 3. List all existing spaces
# + [markdown] pycharm={"name": "#%% md\n"}
# You can use `list` method to print all existing spaces.
# + pycharm={"name": "#%%\n"}
client.spaces.list()
# -
# <a id="get_space"></a>
# ## 4. Get details about space
# You can use `get_details` method to print details about given space. You need to provide `space_id` of desired space.
client.spaces.get_details(space_id)
# <a id="set_space"></a>
# ## 5. Set default space
# + [markdown] pycharm={"name": "#%% md\n"}
# To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
# + pycharm={"name": "#%%\n"}
client.set.default_space(space_id)
# -
# <a id="update_space"></a>
# ## 6. Update space metadata
# + [markdown] pycharm={"name": "#%% md\n"}
# You can update your space by reassigning space metadata and executing: `client.spaces.update(space_id, space_metadata)`.
# + pycharm={"name": "#%% raw\n"}
updated_space_metadata = {
client.spaces.ConfigurationMetaNames.NAME: "Updated space name"
}
client.spaces.update(space_id, updated_space_metadata)
# -
# <a id="delete_space"></a>
# ## 7. Delete existing space
# You can use the command below to delete existing space. You need to provide space_id of the space you want to delete.
client.spaces.delete(space_id)
# <a id="summary"></a>
# ## 8. Summary and next steps
# You successfully completed this notebook! You learned how to use ibm-watson-machine-learning client for Watson Machine Learning instance space management and clean up.
#
# Check out our <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html" target="_blank" rel="noopener noreferrer">Online Documentation</a> for more samples, tutorials, documentation, how-tos, and blog posts.
# ### Authors
#
# **<NAME>**, Software Engineer at IBM. <br>
# **<NAME>**, Software Engineer at IBM.
# Copyright © 2020, 2021, 2022 IBM. This notebook and its source code are released under the terms of the MIT License.
| cpd3.5/notebooks/python_sdk/instance-management/Space management.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Train with Azure Machine Learning Datasets
# Datasets are categorized into TabularDataset and FileDataset based on how users consume them in training.
# * A TabularDataset represents data in a tabular format by parsing the provided file or list of files. TabularDataset can be created from csv, tsv, parquet files, SQL query results etc. For the complete list, please visit our [documentation](https://aka.ms/tabulardataset-api-reference). It provides you with the ability to materialize the data into a pandas DataFrame.
# * A FileDataset references single or multiple files in your datastores or public urls. This provides you with the ability to download or mount the files to your compute. The files can be of any format, which enables a wider range of machine learning scenarios including deep learning.
#
# In this tutorial, you will learn how to train with Azure Machine Learning Datasets:
#
# ☑ Use Datasets directly in your training script
#
# ☑ Use Datasets to mount files to a remote compute
# ## Prerequisites
# If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first if you haven't already established your connection to the AzureML Workspace.
# +
# Check core SDK version number
import azureml.core
print('SDK version:', azureml.core.VERSION)
# -
# ## Initialize Workspace
#
# Initialize a workspace object from persisted configuration.
# +
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')
# -
# ## Create Experiment
#
# **Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.
# +
experiment_name = 'train-with-datasets'
from azureml.core import Experiment
exp = Experiment(workspace=ws, name=experiment_name)
# -
# ## Create or Attach existing compute resource
# By using Azure Machine Learning Compute, a managed service, data scientists can train machine learning models on clusters of Azure virtual machines. Examples include VMs with GPU support. In this tutorial, you create Azure Machine Learning Compute as your training environment. The code below creates the compute clusters for you if they don't already exist in your workspace.
#
# **Creation of compute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace the code will skip the creation process.
# +
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
import os
# choose a name for your cluster
compute_name = os.environ.get('AML_COMPUTE_CLUSTER_NAME', 'cpu-cluster')
compute_min_nodes = os.environ.get('AML_COMPUTE_CLUSTER_MIN_NODES', 0)
compute_max_nodes = os.environ.get('AML_COMPUTE_CLUSTER_MAX_NODES', 4)
# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6
vm_size = os.environ.get('AML_COMPUTE_CLUSTER_SKU', 'STANDARD_D2_V2')
if compute_name in ws.compute_targets:
compute_target = ws.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print('found compute target. just use it. ' + compute_name)
else:
print('creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size=vm_size,
min_nodes=compute_min_nodes,
max_nodes=compute_max_nodes)
# create the cluster
compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)
# can poll for a minimum number of nodes and for a specific timeout.
# if no min node count is provided it will use the scale settings for the cluster
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# For a more detailed view of current AmlCompute status, use get_status()
print(compute_target.get_status().serialize())
# -
# You now have the necessary packages and compute resources to train a model in the cloud.
# ## Use Datasets directly in training
#
# ### Create a TabularDataset
# By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred.
#
# Every workspace comes with a default [datastore](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data) (and you can register more) which is backed by the Azure blob storage account associated with the workspace. We can use it to transfer data from local to the cloud, and create Dataset from it.We will now upload the [Titanic data](./train-dataset/Titanic.csv) to the default datastore (blob) within your workspace.
datastore = ws.get_default_datastore()
datastore.upload_files(files = ['./train-dataset/Titanic.csv'],
target_path = 'train-dataset/tabular/',
overwrite = True,
show_progress = True)
# Then we will create an unregistered TabularDataset pointing to the path in the datastore. You can also create a Dataset from multiple paths. [learn more](https://aka.ms/azureml/howto/createdatasets)
# +
from azureml.core import Dataset
dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'train-dataset/tabular/Titanic.csv')])
# preview the first 3 rows of the dataset
dataset.take(3).to_pandas_dataframe()
# -
# ### Create a training script
#
# To submit the job to the cluster, first create a training script. Run the following code to create the training script called `train_titanic.py` in the script_folder.
script_folder = os.path.join(os.getcwd(), 'train-dataset')
# +
# %%writefile $script_folder/train_titanic.py
import os
from azureml.core import Dataset, Run
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
run = Run.get_context()
# get input dataset by name
dataset = run.input_datasets['titanic']
df = dataset.to_pandas_dataframe()
x_col = ['Pclass', 'Sex', 'SibSp', 'Parch']
y_col = ['Survived']
x_df = df.loc[:, x_col]
y_df = df.loc[:, y_col]
x_train, x_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.2, random_state=223)
data = {'train': {'X': x_train, 'y': y_train},
'test': {'X': x_test, 'y': y_test}}
clf = DecisionTreeClassifier().fit(data['train']['X'], data['train']['y'])
model_file_name = 'decision_tree.pkl'
print('Accuracy of Decision Tree classifier on training set: {:.2f}'.format(clf.score(x_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'.format(clf.score(x_test, y_test)))
os.makedirs('./outputs', exist_ok=True)
with open(model_file_name, 'wb') as file:
joblib.dump(value=clf, filename='outputs/' + model_file_name)
# -
# ### Configure and use Datasets as the input to Estimator
# You can ask the system to build a conda environment based on your dependency specification. Once the environment is built, and if you don't change your dependencies, it will be reused in subsequent runs.
# +
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
conda_env = Environment('conda-env')
conda_env.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk','azureml-dataprep[pandas,fuse]>=1.1.','scikit-learn'])
# -
# An estimator object is used to submit the run. Azure Machine Learning has pre-configured estimators for common machine learning frameworks, as well as generic Estimator. Create a generic estimator for by specifying
#
# * The name of the estimator object, `est`
# * The directory that contains your scripts. All the files in this directory are uploaded into the cluster nodes for execution.
# * The training script name, train_titanic.py
# * The input Dataset for training
# * The compute target. In this case you will use the AmlCompute you created
# * The environment definition for the experiment
# +
from azureml.train.estimator import Estimator
est = Estimator(source_directory=script_folder,
entry_script='train_titanic.py',
# pass dataset object as an input with name 'titanic'
inputs=[dataset.as_named_input('titanic')],
compute_target=compute_target,
environment_definition= conda_env)
# -
# ### Submit job to run
# Submit the estimator to the Azure ML experiment to kick off the execution.
run = exp.submit(est)
# +
from azureml.widgets import RunDetails
# monitor the run
RunDetails(run).show()
# -
# ## Use Datasets to mount files to a remote compute
#
# You can use the Dataset object to mount or download files referred by it. When you mount a file system, you attach that file system to a directory (mount point) and make it available to the system. Because mounting load files at the time of processing, it is usually faster than download.<br>
# Note: mounting is only available for Linux-based compute (DSVM/VM, AMLCompute, HDInsights).
# ### Upload data files into datastore
# We will first load diabetes data from `scikit-learn` to the train-dataset folder.
# +
from sklearn.datasets import load_diabetes
import numpy as np
training_data = load_diabetes()
np.save(file='train-dataset/features.npy', arr=training_data['data'])
np.save(file='train-dataset/labels.npy', arr=training_data['target'])
# -
# Now let's upload the 2 files into the default datastore under a path named `diabetes`:
datastore.upload_files(['train-dataset/features.npy', 'train-dataset/labels.npy'], target_path='diabetes', overwrite=True)
# ### Create a FileDataset
# +
from azureml.core.dataset import Dataset
dataset = Dataset.File.from_files(path = [(datastore, 'diabetes/')])
# see a list of files referenced by dataset
dataset.to_path()
# -
# ### Create a training script
#
# To submit the job to the cluster, first create a training script. Run the following code to create the training script called `train_diabetes.py` in the script_folder.
# +
# %%writefile $script_folder/train_diabetes.py
import os
import glob
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from azureml.core.run import Run
from sklearn.externals import joblib
import numpy as np
os.makedirs('./outputs', exist_ok=True)
run = Run.get_context()
base_path = run.input_datasets['diabetes']
X = np.load(glob.glob(os.path.join(base_path, '**/features.npy'), recursive=True)[0])
y = np.load(glob.glob(os.path.join(base_path, '**/labels.npy'), recursive=True)[0])
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
data = {'train': {'X': X_train, 'y': y_train},
'test': {'X': X_test, 'y': y_test}}
# list of numbers from 0.0 to 1.0 with a 0.05 interval
alphas = np.arange(0.0, 1.0, 0.05)
for alpha in alphas:
# use Ridge algorithm to create a regression model
reg = Ridge(alpha=alpha)
reg.fit(data['train']['X'], data['train']['y'])
preds = reg.predict(data['test']['X'])
mse = mean_squared_error(preds, data['test']['y'])
run.log('alpha', alpha)
run.log('mse', mse)
model_file_name = 'ridge_{0:.2f}.pkl'.format(alpha)
with open(model_file_name, 'wb') as file:
joblib.dump(value=reg, filename='outputs/' + model_file_name)
print('alpha is {0:.2f}, and mse is {1:0.2f}'.format(alpha, mse))
# -
# ### Configure & Run
# +
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory=script_folder,
script='train_diabetes.py',
# to mount the dataset on the remote compute and pass the mounted path as an argument to the training script
arguments =[dataset.as_named_input('diabetes').as_mount('tmp/dataset')])
src.run_config.framework = 'python'
src.run_config.environment = conda_env
src.run_config.target = compute_target.name
# +
run = exp.submit(config=src)
# monitor the run
RunDetails(run).show()
# -
# ### Display run results
# You now have a model trained on a remote cluster. Retrieve all the metrics logged during the run, including the accuracy of the model:
print(run.get_metrics())
metrics = run.get_metrics()
# ### Register Datasets
# Use the register() method to register datasets to your workspace so they can be shared with others, reused across various experiments, and referred to by name in your training script.
dataset = dataset.register(workspace = ws,
name = 'diabetes dataset',
description='training dataset',
create_new_version=True)
# ## Register models with Datasets
# The last step in the training script wrote the model files in a directory named `outputs` in the VM of the cluster where the job is executed. `outputs` is a special directory in that all content in this directory is automatically uploaded to your workspace. This content appears in the run record in the experiment under your workspace. Hence, the model file is now also available in your workspace.
#
# You can register models with Datasets for reproducibility and auditing purpose.
# +
# find the index where MSE is the smallest
indices = list(range(0, len(metrics['mse'])))
min_mse_index = min(indices, key=lambda x: metrics['mse'][x])
print('When alpha is {1:0.2f}, we have min MSE {0:0.2f}.'.format(
metrics['mse'][min_mse_index],
metrics['alpha'][min_mse_index]
))
# -
# find the best model
best_alpha = metrics['alpha'][min_mse_index]
model_file_name = 'ridge_{0:.2f}.pkl'.format(best_alpha)
# register the best model with the input dataset
model = run.register_model(model_name='sklearn_diabetes', model_path=os.path.join('outputs', model_file_name),
datasets =[('training data',dataset)])
| how-to-use-azureml/work-with-data/datasets/datasets-tutorial/train-with-datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Reading in and wrangling a completely different dataset to use for the Drug Use Predictor
#
# Notebook Author: <NAME>\
# Georgetown University School of Continuing Studies, Certificate in Data Science, Cohort 11 (Spring 2018)
#
# Data Source:
# - Johnson's IPIP-NEO data repository
# - Contributors: <NAME>
# - Date created: 2015-09-22 04:21 PM | Last Updated: 2015-11-04 06:25 PM
# - Description: This project makes available information about International Personality Item Pool (IPIP) versions of the NEO Personality Inventory.
# - URL: https://osf.io/sxeq5/
#
# Specific dataset used: Data from the Johnson (2005) JRP study and documentation for those files. File ipip20993.dat contains 20,993 cases of item responses to the IPIP-NEO-300 in ASCII format. The file also contains facet and domain scale scores and two measures of intra-individual reliability described in the publication. Variables are listed at the top of the file. ipip20993.doc is a Word.doc description of the dataset
#
# Note that, prior to reading into this Notebook, I opened the ASCII file in Excel, took the top 3K some instances and discarded the rest. I deleted 300+ columns I didn't need, added an ID column, and adopted the IMMODERA and EXCITE columns as stand-ins for "Impulsiveness" and "Sensation Seeking". The columns will be renamed below.
# +
import numpy as np
import pandas as pd
from numpy import random
from random import randint
pd.options.mode.chained_assignment = None # get rid of this pesky warning; default='warn'
# -
# ### Project Workflow
#
# This Notebook moves through the following steps to ingest, sort, and wrangle the dataset so it fits into the Drug Use Predictor model:
# 1. Ingest the required xlsx data into a dataframe
# 2. Wrangle the data to provide the right format and column structure, keeping the age, gender and personality test scores
# 3. Use a random number generator to any features needed for the Drug Use Predictor that don't exist in the Johnson dataset
#
# ### Data Ingestion
#
# Grab the dataset from the data subdirectory
data = pd.read_excel('data/Johnson_ipip3K_partial.xlsx')
data.head()
# There's an order of magnitude difference in the scale of the numbers and df needs normalizing
import sklearn
from sklearn import preprocessing
# +
# I have learned that preprocessing strips the column headings, so create a working array
X = np.array(data)
X = X.astype(np.float64)
# Scale the data in the range of the UCI dataset
X = preprocessing.minmax_scale(X, feature_range=(-3,3))
# Make a df again and restore the headings
df = pd.DataFrame(X, columns = data.columns)
print(df.describe())
# +
# Aaack! How do I avoid scaling the index? I couldn't find the answer through much googling
# Below is the features list I need. So, I'll have to invent data for the missing columns
# Note, this isn't the same order as in the UCI database but that shouldn't matter
FEATURES = [
"ID", # May not be used to identify respondents
"Age", # 18-24, 25-34, 35-44, 45-54, 55-64, 65+
"Gender", # Female, Male
"NS", # Neuroticism Score
"ES", # Extroversion Score
"OS", # Openness to experience Score
"AS", # Agreeableness Score
"CS", # Conscientiousness Score
"Imp", # Impulsivity, Lickert scale with -3 = least impulsive, +3 = most impulsive
"SS", # Sensation seeking, part of the Impulsiveness assessment, -3 < score > +3
"Cntry", # Country: AUS, CAN, NZ, Other, IRE, UK, USA
"Educ", # Left before age 16, left @ 16, @ 17, @ 18, some college, prof cert, univ degree, masters, doctorate
"Ethn", # Ethnicity: Asian, Black, Mixed Bla/As, Mixed Whi/As, Mixed Whi/Bla, Other
"Alcohol", # Class of alcohol consumption
"Caffeine", # Class of caffeine consumption
"Choco", # Class of chocolate consumption
"Nicotine", # Class of nicotine consumption
]
print("{} instances with {} features\n".format(*df.shape))
# +
# Rename the two columns I'm adopting to match the Drug Use Predictor format, and correct upper/lower of others
df.rename(columns={'IMMODERA': 'Imp', 'EXCITE': 'SS', 'AGE':'Age', 'GENDER':'Gender'}, inplace=True)
# Take a look at the data again
print(df.describe())
# +
# I'll make all these people Americans for Cntry = 3
df['Cntry'] = 3
# Perhaps because I'm using .loc, it needs me to establish the other feature columns in advance
df['Educ'] = 0
df['Ethn'] = 0
df['Alcohol'] = 0
df['Caffeine'] = 0
df['Choco'] = 0
df['Nicotine'] = 0
# Now I need to generate data for the Educ, Ethn, Alcohol, Caffeine, Choco, and Nicotine features
# HOWEVER, it will help to ensure they're the same scale as the other data in the df
for i in df.index.values:
df.loc[[i],['Educ']] = np.random.normal(-3, 3)
df.loc[[i],['Ethn']] = np.random.normal(-3, 3)
df.loc[[i],['Alcohol']] = np.random.normal(-3, 3)
df.loc[[i],['Caffeine']] = np.random.normal(-3, 3)
df.loc[[i],['Choco']] = np.random.normal(-3, 3)
df.loc[[i],['Nicotine']] = np.random.normal(-3, 3)
print(df.describe())
# -
# Now, save this df in a file that can be read by the Drug Use Predictor
df.to_csv('data/Johnny_data_out.csv', index=False)
| examples/melissabphd/Reading & Wrangling Diff Dataset for Drug Use Predictor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pointnet]
# language: python
# name: conda-env-pointnet-py
# ---
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
import math
import sys
sys.path.append("..")
import pointnet.model as model
# +
import torch
import torch.utils.data as data
import os
import os.path
#from plyfile import PlyData, PlyElement
from plyfile import PlyData
import numpy as np
def load_ply(file_name, with_faces=False, with_color=False):
ply_data = PlyData.read(file_name)
points = ply_data['vertex']
points = np.vstack([points['x'], points['y'], points['z']]).T
return points
def load_list(root, train = 'train'):
input_dir = []
rootdir = root
#rootdir = '/home/cdi0/data/shape_net_core_uniform_samples_2048_split/'
if train =='train':
rootdir = os.path.join(rootdir, train)
for dirs in os.listdir(rootdir):
if dirs == 'train_0':
target_dir = os.path.join(rootdir, dirs)
elif dirs.startswith('train'):
input_dir.append(os.path.join(rootdir, dirs))
else:
rootdir = os.path.join(rootdir, 'test')
for dirs in os.listdir(rootdir):
if dirs == 'test_0':
target_dir = os.path.join(rootdir, dirs)
elif dirs.startswith('test'):
input_dir.append(os.path.join(rootdir, dirs))
input_dir.sort()
input_data_list = []
target_data_list = []
for i in input_dir:
lst = []
for dirpath, dirnames, filenames in os.walk(i):
for filename in [f for f in filenames if f.endswith(".ply")]:
lst.append(os.path.join(dirpath, filename))
lst.sort()
input_data_list.append(lst)
for dirpath, dirnames, filenames in os.walk(target_dir):
for filename in [f for f in filenames if f.endswith(".ply")]:
target_data_list.append(os.path.join(dirpath, filename))
target_data_list.sort()
input_set_list = []
for i in range(len(input_data_list)):
lst = []
for j in range(len(input_data_list[i])):
lst.append((input_data_list[i][j], target_data_list[j]))
input_set_list.append(lst)
return input_set_list
class ShapeNetDataset(data.Dataset):
def __init__(self, dir, train = 'train', n_points = 2048, augmentation = False, stage = 0, opt = None):
self.root = dir
self.loader = load_ply
self.opt = opt
self.train = train
lst = []
l = load_list(dir, self.train)
self.l = l
#print(l)
for i in range(stage+1):
lst = lst + l[i]
self.lst = lst
self.loader = load_ply
def __getitem__(self, idx):
input_pcd, target_pcd = self.lst[idx]
input_pcd = self.loader(input_pcd)
target_pcd = self.loader(target_pcd)
mask = np.isin(target_pcd, input_pcd)
m = np.all(mask, axis = 1)
t = np.zeros((target_pcd.shape[0],4))
t[:,3] = m
n = 0
for i in range(len(m)):
if m[i] == 1:
t[i,:3] = input_pcd[n]
n +=1
else:
t[i,:3] = np.random.randn(1,3) / 3
input_pcd = t
return input_pcd, target_pcd, m
def __len__(self):
return len(self.lst)
# -
class STN3d(nn.Module):
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(4, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 12)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
#iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
iden = Variable(torch.cat((torch.eye(3).repeat(batchsize,1,1), torch.zeros(batchsize, 1, 3)), dim = 1))
if x.is_cuda:
iden = iden.cuda()
x = x.view(-1, 4, 3)
x = x + iden
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
if self.global_feat:
return x, trans, trans_feat
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
# +
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
if self.global_feat:
return x
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1)
class PointNetCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetCls, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
return F.log_softmax(x, dim=1), trans, trans_feat
class PointNetDenseCls(nn.Module):
def __init__(self, feature_transform=False):
super(PointNetDenseCls, self).__init__()
#self.k = k
self.feature_transform=feature_transform
self.feat = PointNetfeat(global_feat=False, feature_transform=feature_transform)
self.conv1 = torch.nn.Conv1d(1088, 512, 1)
self.conv2 = torch.nn.Conv1d(512, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 128, 1)
self.conv4 = torch.nn.Conv1d(128, 3, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
batchsize = x.size()[0]
n_pts = x.size()[2]
x = self.feat(x)
print(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2,1).contiguous()
#x = F.log_softmax(x.view(-1,self.k), dim=-1)
x = x.view(batchsize, n_pts, 3)
return x
# -
pcls = PointNetDenseCls()
pcls.to(device = 'cuda:0')
device = 'cuda:0'
rootdir = '/home/cdi0/data/shape_net_core_uniform_samples_2048_split/'
# +
dataset = ShapeNetDataset(
dir=rootdir,
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=32,
shuffle=True,
num_workers=int(4))
test_dataset = ShapeNetDataset(
dir=rootdir,
train='test',
)
testdataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=32,
shuffle=True,
num_workers=int(4))
print(len(dataset), len(test_dataset))
# -
points, target, mask = iter(testdataloader).next()
print(target.shape)
points = points.transpose(2, 1).contiguous()
points = points.to(device='cuda:0', dtype=torch.float)
target = target.to(device='cuda:0', dtype=torch.float)
target.shape
from .pointnet.model import PointNetDenseCls
# +
from pointnet.model import PointNetDenseCls, feature_transform_regularizer
classifier = PointNetDenseCls()
classifier.to(device = 'cuda:1')
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
optimizer.zero_grad()
pred = classifier(points)
# -
target[0].shape
# +
def pairwise_dist(x, y):
xx, yy, zz = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t())
rx = xx.diag().unsqueeze(0).expand_as(xx)
ry = yy.diag().unsqueeze(0).expand_as(yy)
P = rx.t() + ry - 2 * zz
return P
def NN_loss(x, y, dim=0):
dist = pairwise_dist(x, y)
values, indices = dist.min(dim=dim)
return values.mean()
def distChamfer(a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind = torch.arange(0, num_points).type(torch.cuda.LongTensor)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = rx.transpose(2, 1) + ry - 2 * zz
return P.min(1)[0], P.min(2)[0]
#return torch.min(P, 1)[0], torch.min(P, 2)[0], torch.min(P, 1)[1], torch.min(P, 2)[1]
# -
dist1, dist2 = distChamfer(target, pred)
loss = (torch.mean(dist1)) + (torch.mean(dist2))
# +
mask_ = mask.unsqueeze(2).repeat(1,1,3)
mask__ = ~mask_
mask__ = mask__.to(device, dtype = torch.float32)
mask_ = mask_.to(device, dtype = torch.float32)
pred = (pred * mask__) + (target * mask_)
# -
mask.unsqueeze(2).repeat(1,1,3).shape
mask__.shape
target.shape
target_mask = target * mask__
target_mask[target_mask.sum(dim = 2) != 0].view(32,-1,3).shape
target_mask[target_mask]
# +
from emd import EMDLoss
mask_ = mask.unsqueeze(2).repeat(1,1,3)
mask__ = ~mask_
dist = EMDLoss()
a = pred[mask__].view(32,-1,3)
b = target[mask__].view(32,-1,3)
# +
cost = dist(a, b)
loss = torch.sum(cost)
loss.backward()
# -
loss.item()
label_real = torch.full((32,), 1)
label_fake = torch.full((32,), 0)
label = torch.stack((label_real, label_fake), dim = 1)
torch.stack((torch.ones((6)), torch.zeros(6)), dim = 1).shape
# +
criterion = nn.MSELoss()
cost = criterion(pred, target)
cost.backward()
# -
optimizer.step()
a.size(0)
a[a.nonzero()[0,1]]
a = torch.ones(3,3,3)
b = 3 * torch.ones(3,3,3) - torch.ones(3,3,3)
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind = torch.arange(0, num_points).type(torch.cuda.LongTensor)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = rx.transpose(2, 1) + ry - 2 * zz
diag_ind = torch.arange(0, num_points).type(torch.LongTensor)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ae = model.Autoencoder(device = 'cuda:0')
ae.to(device)
ae.eval()
pred, conf = ae(points)
conf.max()
import torch.distributed as dist
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:01',
world_size=20,
rank=1)
import numpy as np
-0.015 * np.log(0.5)
dataset = ShapeNetDataset(
dir=rootdir,
)
a = dataset.l[-1] + dataset.l[0]
a[-1]
a = [1,2,3]
for i in range(1):
print(i)
| utils/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/TechDomani/auth0-react/blob/master/Sound.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="_lxnF6qvbSnN" outputId="50123b4c-5b71-48e9-df4c-3af9c3c899c1"
import numpy as np
import matplotlib.pyplot as plot
time = np.arange(0, 10.1, 0.1);
amplitude = (np.sin(time - 0.75) + 1) * 4
positions = (0, 1, 2, 3, 4, 5, 6, 7, 8)
binary = ('0000', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001')
plot.yticks(positions, binary)
plot.plot(time, amplitude)
plot.title('Wave')
plot.xlabel('Time ms')
plot.xticks(np.arange(0, 11, 1))
plot.ylabel('Amplitude')
plot.grid(True, which='both')
plot.show()
# + id="FHfVZqs9MMyC"
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="kyRKNpcbpJNF" outputId="fc8755bc-d85e-4bd9-d800-7298f445b061"
import pandas as pd
from tabulate import tabulate
sample_requested = float(input("Please enter interval between samples "))
sample_rate = int(sample_requested * 10)
sample = amplitude[::sample_rate].copy().round()
sample_time = time[::sample_rate].copy()
plot.yticks(positions, binary)
plot.plot(time, amplitude)
plot.plot(sample_time, sample, 'ro')
plot.title('Wave')
plot.xlabel('Time ms')
plot.xticks(np.arange(0, 11, 1))
plot.ylabel('Amplitude')
plot.grid(True, which='both')
plot.show()
time_series = list(map(lambda val: str(round(val, 1)), sample_time.tolist()))
time_series.insert(0, "Time")
bin_series = list(map(lambda val: format(int(val), '04b'), sample.tolist()))
bin_series.insert(0, "Amplitude")
print("")
print(tabulate([time_series, bin_series], headers="firstrow", tablefmt="psql"))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="I9mWCeW2M0dZ" outputId="5251c06c-ff35-463b-9fd2-5b7fa9109df9"
sample = amplitude[::sample_rate].copy().round()
sample_time = time[::sample_rate].copy()
plot.yticks(positions, binary)
plot.plot(time, amplitude)
plot.plot(sample_time, sample)
plot.title('Wave')
plot.xlabel('Time ms')
plot.xticks(np.arange(0, 11, 1))
plot.ylabel('Amplitude')
plot.grid(True, which='both')
plot.show()
| Sound.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CPE721 - <NAME> Feedforward
import numpy as np
from sklearn import svm
from sklearn.model_selection import KFold, GridSearchCV
# ## 2. *Baseline*: SVM
X = np.loadtxt('X.txt',delimiter=',')
Y_cat = np.loadtxt('Y_cat.txt')
# +
C_range = np.arange(1,10.1,0.1)
gamma_range = np.logspace(-5,1,7)
param_grid = [dict(C=C_range, gamma=gamma_range)]
# Parameters
K = 10 # k-fold parameter
# Cross-validation
cv = KFold(n_splits=K)
grid = GridSearchCV(svm.SVC(kernel='rbf',cache_size=1000), param_grid=param_grid, cv=cv, n_jobs=-1,verbose=1)
grid.fit(X,y)
# +
best_std_score = grid.cv_results_['std_test_score'][grid.best_index_]
# Results:
print("Best parameters: %s \nAccuracy: %0.3f \u00B1 %0.3f"
% (grid.best_params_, grid.best_score_, best_std_score))
| .ipynb_checkpoints/svm-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.2
# language: julia
# name: julia-1.4
# ---
using DrWatson
@quickactivate "MEngProject"
using MEngProject, CUDA, DifferentialEquations, PyPlot, NNlib, ImageFiltering, Images, MEngProject, MEngProject.LamKernels, MEngProject.Laminart, MEngProject.Utils, BenchmarkTools, Test
function reshape2d_4d(img::AbstractArray)
reshape(img, size(img)[1], size(img)[2], 1, 1)
end
# +
img = convert(Array{Float32,2}, load(datadir("Iine_100_100_gs.png")));
img = reshape2d_4d(img)
img = cu(img)
r = similar(img)
p = LaminartGPU.kernels(img, Parameters.parameters);
LaminartGPU.I_u!(r, img, p)
temp_out = (I = img, r = r)
p = merge(p, temp_out);
# -
tspan = (0.0f0, 5f0)
u0 = cu(reshape(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1));
x_lgn = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1))
C = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1))
H_z = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1));
V_temp_1 = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1));
V_temp_2 = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1));
Q_temp = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1));
P_temp = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1));
arr1 = u0[:, :, 1:p.K,:]
arr2 = u0[:, :, 1:1,:];
f = LaminartGPU.MyFunction(
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
x_lgn, C, H_z, V_temp_1, V_temp_2, Q_temp, P_temp);
fa = LaminartGPU.MyFunctionnnn(
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
x_lgn, C, H_z, V_temp_1, V_temp_2, Q_temp, P_temp);
prob= ODEProblem(f, u0, tspan, p);
prob1= ODEProblem(fa, u0, tspan, p);
sol = solve(prob)
sol = solve(prob)
@benchmark sol = solve(prob)
# changed to ff.x=x_ etc from ff.x= CuArray(x_) in MyFunction
@benchmark sol = solve(prob)
@benchmark sol = solve(prob, save_on=false)
@benchmark sol = solve(prob, save_on=false, alias_u0=true)
@benchmark sol = solve(prob, save_on=false, save_start=false)
f_nm = LaminartGPU.MyFunction_nm(
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
x_lgn, C, H_z, V_temp_1, V_temp_2, Q_temp, P_temp);
prob_nm= ODEProblem(f_nm, u0, tspan, p);
@benchmark sol_nm = solve(prob_nm)
f_nm.x
@time f_nm.x .= u0[:, :, 1:p.K,:]
@time f.x .= u0[:, :, 1:p.K,:]
@benchmark sol = solve(prob1)
@benchmark sol = solve(prob1, save_on=false)
CUDA.device_reset!(dev::CuDevice=device())
du = similar(u0)
@time f(du,u0,p,1)
f
f(u0, u0, p, 1)
solcpu= Array(sol)
@code_warntype LaminartGPU.MyFunctionnn(
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
x_lgn, C, H_z, V_temp_1, V_temp_2, Q_temp, P_temp)
CUDA.device_reset!(dev::CuDevice=device())
f
f.dv_m
x_ = @view u[:, :, 1:p.K,:]
f.x = CuArray(x_)
sol[5]
Utils.plot_rb(sol[905][:,:,6,1])
sol
# ## Try making struct to hold arrays eg f.x = x[:,:,:,:]
# ### first: BM how long that copy takes
mutable struct testtt
y::AbstractArray
end
x= testtt(@view u0[:,:,1:2,:])
yy = @view u0[:,:,1:2,:]
yyy = yy[:,:,:,:]
@benchmark x.y = yyy
x.y
@benchmark begin
# x.y = @view u0[:,:,1:2,:]
x.y = yy[:,:,:,:]
end
# +
@benchmark begin
x.y = @view u0[:,:,1:2,:]
x.y = CuArray(x.y)
end
# -
x.y
typeof(@view u0[:,:,1:2,:])
typeof((@view u0[:,:,1:2,:])[:,:,:,:])
xx = (@view u0[:,:,1:2,:])[:,:,:,:]
@. xx[:,:,1,:] = 1
@view u0[:,:,1:2,:][:,:,:,:]
xx
u0[:,:,1:2,:]
mutable struct testta
ya::AbstractArray
yb::AbstractArray
end
yaa = @view u0[:,:,1:2,:]
yba = CuArray(yaa)
x = testta(yaa, yba)
x.ya
x.yb
@. x.yb[:,:,1,:] = 3f0
x.yb
x.ya
@. x.ya = x.yb
x.ya
@benchmark begin
x.ya = @view u0[:,:,1:2,:]
x.yb = x.ya[:,:,:,:]
@. x.yb = 4f0
@. x.ya = x.yb
end
@benchmark begin
x.ya = @view u0[:,:,1:2,:]
x.yb = CuArray(x.ya)
@. x.yb = 4f0
@. x.ya = x.yb
end
@benchmark begin
x.ya = @view u0[:,:,1:2,:]
@. x.yb = CuArray(x.ya)
@. x.yb = 4f0
@. x.ya = x.yb
end
@benchmark begin
yana = @view u0[:,:,1:2,:]
ybna = CuArray(yana)
@. ybna = 4f0
@. yana = ybna
end
yanaa = @view u0[:,:,1:2,:]
@time ybnc = CuArray(yanaa)
x_lgn = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1))
C = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1))
H_z = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1));
V_temp_1 = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1));
V_temp_2 = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1));
Q_temp = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1));
P_temp = cu(reshape(zeros(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K,1));
t= cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1))
t1= cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1))
@benchmark ta = @view t[:, :, :,:]
@benchmark t1 = @view t[:, :, :,:]
t1
t = similar(u0)
tt = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1))
ttt = cu(reshape(zeros(Float32, p.dim_i, p.dim_j), p.dim_i, p.dim_j, 1,1))
@benchmark tp = @view t[:,:,1:1,:]
@benchmark tt = @view t[:,:,1:1,:]
tt = @view t[:,:,1:1,:];
typeof(tt)
@benchmark ttt = CuArray(tt)
typeof(tt)
tttt = similar(tt);
typeof(tttt)
ttttt = tt
tt[1,1,1,1]
@benchmark ttt = tt[:,:,:,:]
ttt = tt[:,:,:,:],
typeof(ttt)
@benchmark @. t = ff.x
du = u0
u = u0
@time f(du, u, p, 1)
ff = f
# @time LaminartGPU.fun_x_lgn!(ff.x_lgn, ff.x, p)
# @time LaminartGPU.fun_v_C!(ff.C, ff.v_p, ff.v_m, ff.V_temp_1, ff.V_temp_2, ff.Q_temp, ff.P_temp, p)
# @time LaminartGPU.fun_H_z!(ff.H_z, ff.z, p)
# @time LaminartGPU.fun_dv!(ff.dv_p, ff.v_p, p.r, ff.x_lgn, p)
# @time LaminartGPU.fun_dv!(ff.dv_m, ff.v_m, .-p.r, ff.x_lgn, p)
# @time LaminartGPU.fun_dx_v1!(ff.dx, ff.x, ff.C, ff.z, p.x_V2, p)
# @time LaminartGPU.fun_dy!(ff.dy, ff.y, ff.C, ff.x, ff.m, p)
# @time LaminartGPU.fun_dm!(ff.dm, ff.m, ff.x, p)
# @time LaminartGPU.fun_dz!(ff.dz, ff.z, ff.y, ff.H_z, ff.s, p)
@time LaminartGPU.fun_ds!(ff.ds, ff.s, ff.H_z, p)
@time x_ = @view u[:, :, 1:p.K,:]
ff.x = CuArray(x_)
@time y_ = @view u[:, :, p.K+1:2*p.K,:]
@time ff.y = CuArray(y_)
x_ = @view u[:, :, 1:p.K,:]
y_ = @view u[:, :, p.K+1:2*p.K,:]
m_ = @view u[:, :, 2*p.K+1:3*p.K,:]
z_ = @view u[:, :, 3*p.K+1:4*p.K,:]
s_ = @view u[:, :, 4*p.K+1:5*p.K,:]
v_p_ = @view u[:, :, 5*p.K+1:5*p.K+1,:]
v_m_ = @view u[:, :, 5*p.K+2:5*p.K+2,:]
dx_ = @view du[:, :, 1:p.K,:]
dy_ = @view du[:, :, p.K+1:2*p.K,:]
dm_ = @view du[:, :, 2*p.K+1:3*p.K,:]
dz_ = @view du[:, :, 3*p.K+1:4*p.K,:]
ds_ = @view du[:, :, 4*p.K+1:5*p.K,:]
dv_p_ = @view du[:, :, 5*p.K+1:5*p.K+1,:]
dv_m_ = @view du[:, :, 5*p.K+2:5*p.K+2,:]
@time begin
x_ = @view u[:, :, 1:p.K,:]
y_ = @view u[:, :, p.K+1:2*p.K,:]
m_ = @view u[:, :, 2*p.K+1:3*p.K,:]
z_ = @view u[:, :, 3*p.K+1:4*p.K,:]
s_ = @view u[:, :, 4*p.K+1:5*p.K,:]
v_p_ = @view u[:, :, 5*p.K+1:5*p.K+1,:]
v_m_ = @view u[:, :, 5*p.K+2:5*p.K+2,:]
dx_ = @view du[:, :, 1:p.K,:]
dy_ = @view du[:, :, p.K+1:2*p.K,:]
dm_ = @view du[:, :, 2*p.K+1:3*p.K,:]
dz_ = @view du[:, :, 3*p.K+1:4*p.K,:]
ds_ = @view du[:, :, 4*p.K+1:5*p.K,:]
dv_p_ = @view du[:, :, 5*p.K+1:5*p.K+1,:]
dv_m_ = @view du[:, :, 5*p.K+2:5*p.K+2,:]
end
@time begin
ff.x = CuArray(x_)
ff.y = CuArray(y_)
ff.m = CuArray(m_)
ff.z = CuArray(z_)
ff.s = CuArray(s_)
ff.v_p = CuArray(v_p_)
ff.v_m = CuArray(v_m_)
ff.dx = CuArray(dx_)
ff.dy = CuArray(dy_)
ff.dm = CuArray(dm_)
ff.dz = CuArray(dz_)
ff.ds = CuArray(ds_)
ff.dv_p = CuArray(dv_p_)
ff.dv_m = CuArray(dv_m_)
end
@time begin
ff.x = CuArray(x_)
ff.y .= CuArray(y_)
ff.m .= CuArray(m_)
ff.z .= CuArray(z_)
ff.s .= CuArray(s_)
ff.v_p .= CuArray(v_p_)
ff.v_m .= CuArray(v_m_)
ff.dx .= CuArray(dx_)
ff.dy .= CuArray(dy_)
ff.dm .= CuArray(dm_)
ff.dz .= CuArray(dz_)
ff.ds .= CuArray(ds_)
ff.dv_p .= CuArray(dv_p_)
ff.dv_m .= CuArray(dv_m_)
end
@time begin
LaminartGPU.fun_x_lgn!(ff.x_lgn, ff.x, p)
LaminartGPU.fun_v_C!(ff.C, ff.v_p, ff.v_m, ff.V_temp_1, ff.V_temp_2, ff.Q_temp, ff.P_temp, p)
LaminartGPU.fun_H_z!(ff.H_z, ff.z, p)
LaminartGPU.fun_dv!(ff.dv_p, ff.v_p, p.r, ff.x_lgn, p)
LaminartGPU.fun_dv!(ff.dv_m, ff.v_m, .-p.r, ff.x_lgn, p)
LaminartGPU.fun_dx_v1!(ff.dx, ff.x, ff.C, ff.z, p.x_V2, p)
LaminartGPU.fun_dy!(ff.dy, ff.y, ff.C, ff.x, ff.m, p)
LaminartGPU.fun_dm!(ff.dm, ff.m, ff.x, p)
LaminartGPU.fun_dz!(ff.dz, ff.z, ff.y, ff.H_z, ff.s, p)
LaminartGPU.fun_ds!(ff.ds, ff.s, ff.H_z, p)
end
@time begin
@. x_ = ff.x
@. y_ =ff.y
@. m_ =ff.m
@. z_ = ff.z
@. s_ = ff.s
@. v_p_ = ff.v_p
@. v_m_ = ff.v_m
@. dx_ = ff.dx
@. dy_ = ff.dy
@. dm_ = ff.dm
@. dz_ = ff.dz
@. ds_ = ff.ds
@. dv_p_ = ff.dv_p
@. dv_m_ = ff.dv_m
end
@time begin
ff.x .= CuArray(x_)
ff.y .= CuArray(y_)
ff.m .= CuArray(m_)
ff.z .= CuArray(z_)
ff.s .= CuArray(s_)
ff.v_p .= CuArray(v_p_)
ff.v_m .= CuArray(v_m_)
ff.dx .= CuArray(dx_)
ff.dy .= CuArray(dy_)
ff.dm .= CuArray(dm_)
ff.dz .= CuArray(dz_)
ff.ds .= CuArray(ds_)
ff.dv_p .= CuArray(dv_p_)
ff.dv_m .= CuArray(dv_m_)
end
@time begin
ff.x = CuArray(x_)
@. ff.y = CuArray(y_)
ff.m = CuArray(m_)
ff.z = CuArray(z_)
ff.s = CuArray(s_)
ff.v_p .= CuArray(v_p_)
ff.v_m .= CuArray(v_m_)
ff.dx .= CuArray(dx_)
ff.dy .= CuArray(dy_)
ff.dm .= CuArray(dm_)
ff.dz .= CuArray(dz_)
ff.ds .= CuArray(ds_)
ff.dv_p .= CuArray(dv_p_)
ff.dv_m .= CuArray(dv_m_)
end
@time ff.y = CuArray(y_)
@time ff.y .= y_
# +
@time begin
@. ff.x = x_
@. ff.y = y_
@. ff.m = m_
@. ff.z = z_
@. ff.s = s_
@. ff.v_p = v_p_
@. ff.v_m = v_m_
@. ff.dx = dx_
@. ff.dy = dy_
@. ff.dm = dm_
@. ff.dz = dz_
@. ff.ds = ds_
@. ff.dv_p = dv_p_
@. ff.dv_m = dv_m_
end
# -
ff.x
x_
@. f.x = @view u0[:, :, 1:p.K,:]
@. f.x = f.x =11f0
u0[:, :, 1:p.K,:]
@time @. f.x = @view u0[:, :, 1:p.K,:]
@time begin
x_ = @view u0[:, :, 1:p.K,:]
@. f.x = x_
@. x_ = f.x
end
@time begin
x_ = @view u0[:, :, 1:p.K,:]
@. f.x = x_
@. x_ = f.x
end
@. @view u0[:, :, 1:p.K,:] = f.x
@time begin
@. f.x = @view u0[:, :, 1:p.K,:]
@. u0[:, :, 1:p.K,:] = f.x
end
f_1 = LaminartGPU.MyFunction_1(
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
x_lgn, C, H_z, V_temp_1, V_temp_2, Q_temp, P_temp);
prob_1 = ODEProblem(f_1, u0, tspan, p);
@tsol_1 = solve(prob_1)
@benchmark sol_1 = solve(prob_1)
f_2 = LaminartGPU.MyFunction_2(
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr1),
similar(arr2),
similar(arr2),
x_lgn, C, H_z, V_temp_1, V_temp_2, Q_temp, P_temp);
prob_2 = ODEProblem(f_2, u0, tspan, p);
@benchmark sol_2 = solve(prob_2)
sol_2 = solve(prob_2)
Utils.plot_rb(sol_2[300][:,:,6,1])
a = nothing; GC.gc(true)
CUDA.reclaim()
CUDA.memory_status()
ff = f_1
du = u0;
@time begin
@. ff.dx = @view du[:, :, 1:p.K,:]
@. ff.dx = ff.dx
dx_ = @view du[:, :, 1:p.K,:]
@. dx_ = ff.dx
end
| notebooks/dev/.ipynb_checkpoints/GPU_dev_0729-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import setGPU
import os
# os.environ["CUDA_VISIBLE_DEVICES"]="4"
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import stats
import tensorflow as tf
from pylab import rcParams
import seaborn as sns
from sklearn.model_selection import train_test_split
from keras.models import Model, load_model
from keras.layers import Input, Dense, Activation
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
from keras.layers.advanced_activations import PReLU, LeakyReLU
from sklearn.utils import shuffle
import h5py
import getpass
import json
# %matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Anomalous"]
# +
import json
from sklearn.preprocessing import StandardScaler, MinMaxScaler, scale, RobustScaler, normalize, MaxAbsScaler
# -
#Feature names
var_names_reduced5 = ['qPFJetPt', 'qPFJetEta', 'qPFJetPhi', 'qPFJet0Pt', 'qPFJet1Pt', 'qPFJet2Pt', 'qPFJet3Pt', 'qPFJet4Pt', 'qPFJet5Pt', 'qPFJet0Eta', 'qPFJet1Eta', 'qPFJet2Eta', 'qPFJet3Eta', 'qPFJet4Eta', 'qPFJet5Eta', 'qPFJet0Phi', 'qPFJet1Phi', 'qPFJet2Phi', 'qPFJet3Phi', 'qPFJet4Phi', 'qPFJet5Phi', 'qPFJet4CHS0Pt', 'qPFJet4CHS1Pt', 'qPFJet4CHS2Pt', 'qPFJet4CHS3Pt', 'qPFJet4CHS4Pt', 'qPFJet4CHS5Pt', 'qPFJet4CHS0Eta', 'qPFJet4CHS1Eta', 'qPFJet4CHS2Eta', 'qPFJet4CHS3Eta', 'qPFJet4CHS4Eta', 'qPFJet4CHS5Eta', 'qPFJet4CHS0Phi', 'qPFJet4CHS1Phi', 'qPFJet4CHS2Phi', 'qPFJet4CHS3Phi', 'qPFJet4CHS4Phi', 'qPFJet4CHS5Phi', 'qPFJet8CHS0Pt', 'qPFJet8CHS1Pt', 'qPFJet8CHS2Pt', 'qPFJet8CHS3Pt', 'qPFJet8CHS4Pt', 'qPFJet8CHS5Pt', 'qPFJet8CHS0Eta', 'qPFJet8CHS1Eta', 'qPFJet8CHS2Eta', 'qPFJet8CHS3Eta', 'qPFJet8CHS4Eta', 'qPFJet8CHS5Eta', 'qPFJet8CHS0Phi', 'qPFJet8CHS1Phi', 'qPFJet8CHS2Phi', 'qPFJet8CHS3Phi', 'qPFJet8CHS4Phi', 'qPFJet8CHS5Phi', 'qPFJetEI0Pt', 'qPFJetEI1Pt', 'qPFJetEI2Pt', 'qPFJetEI3Pt', 'qPFJetEI4Pt', 'qPFJetEI5Pt', 'qPFJetEI0Eta', 'qPFJetEI1Eta', 'qPFJetEI2Eta', 'qPFJetEI3Eta', 'qPFJetEI4Eta', 'qPFJetEI5Eta', 'qPFJetEI0Phi', 'qPFJetEI1Phi', 'qPFJetEI2Phi', 'qPFJetEI3Phi', 'qPFJetEI4Phi', 'qPFJetEI5Phi', 'qPFJet8CHSSD0Pt', 'qPFJet8CHSSD1Pt', 'qPFJet8CHSSD2Pt', 'qPFJet8CHSSD3Pt', 'qPFJet8CHSSD4Pt', 'qPFJet8CHSSD5Pt', 'qPFJet8CHSSD0Eta', 'qPFJet8CHSSD1Eta', 'qPFJet8CHSSD2Eta', 'qPFJet8CHSSD3Eta', 'qPFJet8CHSSD4Eta', 'qPFJet8CHSSD5Eta', 'qPFJet8CHSSD0Phi', 'qPFJet8CHSSD1Phi', 'qPFJet8CHSSD2Phi', 'qPFJet8CHSSD3Phi', 'qPFJet8CHSSD4Phi', 'qPFJet8CHSSD5Phi', 'qPFJetTopCHS0Pt', 'qPFJetTopCHS1Pt', 'qPFJetTopCHS2Pt', 'qPFJetTopCHS3Pt', 'qPFJetTopCHS4Pt', 'qPFJetTopCHS5Pt', 'qPFJetTopCHS0Eta', 'qPFJetTopCHS1Eta', 'qPFJetTopCHS2Eta', 'qPFJetTopCHS3Eta', 'qPFJetTopCHS4Eta', 'qPFJetTopCHS5Eta', 'qPFJetTopCHS0Phi', 'qPFJetTopCHS1Phi', 'qPFJetTopCHS2Phi', 'qPFJetTopCHS3Phi', 'qPFJetTopCHS4Phi', 'qPFJetTopCHS5Phi', 'qCalJet0Pt', 'qCalJet1Pt', 'qCalJet2Pt', 'qCalJet3Pt', 'qCalJet4Pt', 'qCalJet5Pt', 'qCalJet0Eta', 'qCalJet1Eta', 'qCalJet2Eta', 'qCalJet3Eta', 'qCalJet4Eta', 'qCalJet5Eta', 'qCalJet0Phi', 'qCalJet1Phi', 'qCalJet2Phi', 'qCalJet3Phi', 'qCalJet4Phi', 'qCalJet5Phi', 'qCalJet0En', 'qCalJet1En', 'qCalJet2En', 'qCalJet3En', 'qCalJet4En', 'qCalJet5En', 'qPho0Pt', 'qPho1Pt', 'qPho2Pt', 'qPho3Pt', 'qPho4Pt', 'qPho5Pt', 'qPho0Eta', 'qPho1Eta', 'qPho2Eta', 'qPho3Eta', 'qPho4Eta', 'qPho5Eta', 'qPho0Phi', 'qPho1Phi', 'qPho2Phi', 'qPho3Phi', 'qPho4Phi', 'qPho5Phi', 'qPho0En', 'qPho1En', 'qPho2En', 'qPho3En', 'qPho4En', 'qPho5En', 'qgedPho0Pt', 'qgedPho1Pt', 'qgedPho2Pt', 'qgedPho3Pt', 'qgedPho4Pt', 'qgedPho5Pt', 'qgedPho0Eta', 'qgedPho1Eta', 'qgedPho2Eta', 'qgedPho3Eta', 'qgedPho4Eta', 'qgedPho5Eta', 'qgedPho0Phi', 'qgedPho1Phi', 'qgedPho2Phi', 'qgedPho3Phi', 'qgedPho4Phi', 'qgedPho5Phi', 'qgedPho0En', 'qgedPho1En', 'qgedPho2En', 'qgedPho3En', 'qgedPho4En', 'qgedPho5En', 'qMu0Pt', 'qMu1Pt', 'qMu2Pt', 'qMu3Pt', 'qMu4Pt', 'qMu5Pt', 'qMu0Eta', 'qMu1Eta', 'qMu2Eta', 'qMu3Eta', 'qMu4Eta', 'qMu5Eta', 'qMu0Phi', 'qMu1Phi', 'qMu2Phi', 'qMu3Phi', 'qMu4Phi', 'qMu5Phi', 'qMu0En', 'qMu1En', 'qMu2En', 'qMu3En', 'qMu4En', 'qMu5En', 'qMuCosm0Pt', 'qMuCosm1Pt', 'qMuCosm2Pt', 'qMuCosm3Pt', 'qMuCosm4Pt', 'qMuCosm5Pt', 'qMuCosm0Eta', 'qMuCosm1Eta', 'qMuCosm2Eta', 'qMuCosm3Eta', 'qMuCosm4Eta', 'qMuCosm5Eta', 'qMuCosm0Phi', 'qMuCosm1Phi', 'qMuCosm2Phi', 'qMuCosm3Phi', 'qMuCosm4Phi', 'qMuCosm5Phi', 'qMuCosm0En', 'qMuCosm1En', 'qMuCosm2En', 'qMuCosm3En', 'qMuCosm4En', 'qMuCosm5En', 'qMuCosmLeg0Pt', 'qMuCosmLeg1Pt', 'qMuCosmLeg2Pt', 'qMuCosmLeg3Pt', 'qMuCosmLeg4Pt', 'qMuCosmLeg5Pt', 'qMuCosmLeg0Eta', 'qMuCosmLeg1Eta', 'qMuCosmLeg2Eta', 'qMuCosmLeg3Eta', 'qMuCosmLeg4Eta', 'qMuCosmLeg5Eta', 'qMuCosmLeg0Phi', 'qMuCosmLeg1Phi', 'qMuCosmLeg2Phi', 'qMuCosmLeg3Phi', 'qMuCosmLeg4Phi', 'qMuCosmLeg5Phi', 'qMuCosmLeg0En', 'qMuCosmLeg1En', 'qMuCosmLeg2En', 'qMuCosmLeg3En', 'qMuCosmLeg4En', 'qMuCosmLeg5En', 'qPFJet4CHSPt', 'qPFJet4CHSEta', 'qPFJet4CHSPhi', 'qPFJet8CHSPt', 'qPFJet8CHSEta', 'qPFJet8CHSPhi', 'qPFJetEIPt', 'qPFJetEIEta', 'qPFJetEIPhi', 'qPFJet8CHSSDPt', 'qPFJet8CHSSDEta', 'qPFJet8CHSSDPhi', 'qPFJetTopCHSPt', 'qPFJetTopCHSEta', 'qPFJetTopCHSPhi', 'qPFChMetPt', 'qPFChMetPhi', 'qPFMetPt', 'qPFMetPhi', 'qNVtx', 'qCalJetPt', 'qCalJetEta', 'qCalJetPhi', 'qCalJetEn', 'qCalMETPt', 'qCalMETPhi', 'qCalMETEn', 'qCalMETBEPt', 'qCalMETBEPhi', 'qCalMETBEEn', 'qCalMETBEFOPt', 'qCalMETBEFOPhi', 'qCalMETBEFOEn', 'qCalMETMPt', 'qCalMETMPhi', 'qCalMETMEn', 'qSCEn', 'qSCEta', 'qSCPhi', 'qSCEtaWidth', 'qSCPhiWidth', 'qSCEnhfEM', 'qSCEtahfEM', 'qSCPhihfEM', 'qSCEn5x5', 'qSCEta5x5', 'qSCPhi5x5', 'qSCEtaWidth5x5', 'qSCPhiWidth5x5', 'qCCEn', 'qCCEta', 'qCCPhi', 'qCCEn5x5', 'qCCEta5x5', 'qCCPhi5x5', 'qPhoPt', 'qPhoEta', 'qPhoPhi', 'qPhoEn_', 'qPhoe1x5_', 'qPhoe2x5_', 'qPhoe3x3_', 'qPhoe5x5_', 'qPhomaxenxtal_', 'qPhosigmaeta_', 'qPhosigmaIeta_', 'qPhor1x5_', 'qPhor2x5_', 'qPhor9_', 'qgedPhoPt', 'qgedPhoEta', 'qgedPhoPhi', 'qgedPhoEn_', 'qgedPhoe1x5_', 'qgedPhoe2x5_', 'qgedPhoe3x3_', 'qgedPhoe5x5_', 'qgedPhomaxenxtal_', 'qgedPhosigmaeta_', 'qgedPhosigmaIeta_', 'qgedPhor1x5_', 'qgedPhor2x5_', 'qgedPhor9_', 'qMuPt', 'qMuEta', 'qMuPhi', 'qMuEn_', 'qMuCh_', 'qMuChi2_', 'qMuCosmPt', 'qMuCosmEta', 'qMuCosmPhi', 'qMuCosmEn_', 'qMuCosmCh_', 'qMuCosmChi2_', 'qMuCosmLegPt', 'qMuCosmLegEta', 'qMuCosmLegPhi', 'qMuCosmLegEn_', 'qMuCosmLegCh_', 'qMuCosmLegChi2_', 'qSigmaIEta', 'qSigmaIPhi', 'qr9', 'qHadOEm', 'qdrSumPt', 'qdrSumEt', 'qeSCOP', 'qecEn', 'qUNSigmaIEta', 'qUNSigmaIPhi', 'qUNr9', 'qUNHadOEm', 'qUNdrSumPt', 'qUNdrSumEt', 'qUNeSCOP', 'qUNecEn', 'qEBenergy', 'qEBtime', 'qEBchi2', 'qEBiEta', 'qEBiPhi', 'qEEenergy', 'qEEtime', 'qEEchi2', 'qEEix', 'qEEiy', 'qESenergy', 'qEStime', 'qESix', 'qESiy', 'qHBHEenergy', 'qHBHEtime', 'qHBHEauxe', 'qHBHEieta', 'qHBHEiphi', 'qHFenergy', 'qHFtime', 'qHFieta', 'qHFiphi', 'qPreShEn', 'qPreShEta', 'qPreShPhi', 'qPreShYEn', 'qPreShYEta', 'qPreShYPhi']
#Authenticate in order to get permission for eos
os.system("echo %s | kinit" % getpass.getpass())
# +
#Load h5 files
#Choose where to load the files from
# b_h5 = '/eos/cms/store/user/fsiroky/hdf5_data/'
# b_h5 = '/eos/cms/store/user/fsiroky/lumih5/'
b_h5 = '/eos/cms/store/user/fsiroky/consistentlumih5/' #These files are used for the analysis
# b_h5 = '/afs/cern.ch/user/f/fsiroky/public/'
# b_h5 = '/mnt/hdf5test/'
# b_h5 = '/home/test_local/'
pds = {1: 'BTagCSV', 2: 'BTagMu', 3: 'Charmonium', 4:'DisplacedJet', 5: 'DoubleEG',
6: 'DoubleMuon', 7: 'DoubleMuonLowMass',
# 8: 'FSQJets', 9: 'HighMultiplicityEOF', #NOT ENOUGH DATA, NOTEBOOK FAILES
10: 'HTMHT', 11: 'JetHT', 12: 'MET',
# 13: 'MinimumBias', #NOT ENOUGH DATA
14: 'MuonEG', 15: 'MuOnia',
# 16: 'NoBPTX',
17: 'SingleElectron', 18: 'SingleMuon', 19: 'SinglePhoton', 20: 'Tau', 21: 'ZeroBias'
}
def get_jets(bg_files, bg_jets, sig_files, sig_jets):
#Use np.empty([0,2802]) for both good and bad jets, if you use b_h5 = '/eos/cms/store/user/fsiroky/hdf5_data/'
good_jets = np.empty([0,2813])
bad_jets = np.empty([0,2813])
# Control which time intervals files per PD to load with range in the for loop
for i in range(0,len(bg_files)): #0
try:
bg_jetfile = h5py.File(bg_files[i],'r')
bg_jet = bg_jetfile[bg_jets[i]][:]
sig_jetfile = h5py.File(sig_files[i],'r')
sig_jet = sig_jetfile[sig_jets[i]][:]
# print(bad_jets.shape, bg_jet.shape)
bad_jets = np.concatenate((bad_jets, bg_jet), axis=0)
good_jets = np.concatenate((good_jets, sig_jet), axis=0)
print( "Number of good lumis: ", len(sig_jet), " Number of bad lumis: ", len(bg_jet))
except OSError as error:
print("This Primary Dataset doesn't have ", bg_jets[i], error )
continue
return good_jets, bad_jets
# +
#Choose which PD to load
nbr = 11 #Jvariable
bg_files = [b_h5+pds[nbr]+'_C_background.h5',b_h5+pds[nbr]+'_D_background.h5', b_h5+pds[nbr]+'_E_background.h5',
b_h5+pds[nbr]+'_F_background.h5', b_h5+pds[nbr]+'_G_background.h5', b_h5+pds[nbr]+'_H_background.h5']
bg_jets = [pds[nbr]+"_C_background", pds[nbr]+"_D_background", pds[nbr]+"_E_background",
pds[nbr]+"_F_background", pds[nbr]+"_G_background", pds[nbr]+"_H_background"]
sig_files = [b_h5+pds[nbr]+'_C_signal.h5',b_h5+pds[nbr]+'_D_signal.h5', b_h5+pds[nbr]+'_E_signal.h5',
b_h5+pds[nbr]+'_F_signal.h5', b_h5+pds[nbr]+'_G_signal.h5', b_h5+pds[nbr]+'_H_signal.h5']
sig_jets = [pds[nbr]+"_C_signal", pds[nbr]+"_D_signal", pds[nbr]+"_E_signal",
pds[nbr]+"_F_signal", pds[nbr]+"_G_signal", pds[nbr]+"_H_signal"]
#Load good and bad jets
good_jets, bad_jets = get_jets(bg_files, bg_jets, sig_files, sig_jets)
# #Choose which PD to load
# nbr = 3 #Charmonium
# bg_files = [b_h5+pds[nbr]+'_C_background.h5',b_h5+pds[nbr]+'_D_background.h5', b_h5+pds[nbr]+'_E_background.h5',
# b_h5+pds[nbr]+'_F_background.h5', b_h5+pds[nbr]+'_G_background.h5', b_h5+pds[nbr]+'_H_background.h5']
# bg_jets = [pds[nbr]+"_C_background", pds[nbr]+"_D_background", pds[nbr]+"_E_background",
# pds[nbr]+"_F_background", pds[nbr]+"_G_background", pds[nbr]+"_H_background"]
# sig_files = [b_h5+pds[nbr]+'_C_signal.h5',b_h5+pds[nbr]+'_D_signal.h5', b_h5+pds[nbr]+'_E_signal.h5',
# b_h5+pds[nbr]+'_F_signal.h5', b_h5+pds[nbr]+'_G_signal.h5', b_h5+pds[nbr]+'_H_signal.h5']
# sig_jets = [pds[nbr]+"_C_signal", pds[nbr]+"_D_signal", pds[nbr]+"_E_signal",
# pds[nbr]+"_F_signal", pds[nbr]+"_G_signal", pds[nbr]+"_H_signal"]
# #Load good and bad jets
# good_jets2, bad_jets2 = get_jets(bg_files, bg_jets, sig_files, sig_jets)
# #Choose which PD to load
# nbr = 15 #
# bg_files = [b_h5+pds[nbr]+'_C_background.h5',b_h5+pds[nbr]+'_D_background.h5', b_h5+pds[nbr]+'_E_background.h5',
# b_h5+pds[nbr]+'_F_background.h5', b_h5+pds[nbr]+'_G_background.h5', b_h5+pds[nbr]+'_H_background.h5']
# bg_jets = [pds[nbr]+"_C_background", pds[nbr]+"_D_background", pds[nbr]+"_E_background",
# pds[nbr]+"_F_background", pds[nbr]+"_G_background", pds[nbr]+"_H_background"]
# sig_files = [b_h5+pds[nbr]+'_C_signal.h5',b_h5+pds[nbr]+'_D_signal.h5', b_h5+pds[nbr]+'_E_signal.h5',
# b_h5+pds[nbr]+'_F_signal.h5', b_h5+pds[nbr]+'_G_signal.h5', b_h5+pds[nbr]+'_H_signal.h5']
# sig_jets = [pds[nbr]+"_C_signal", pds[nbr]+"_D_signal", pds[nbr]+"_E_signal",
# pds[nbr]+"_F_signal", pds[nbr]+"_G_signal", pds[nbr]+"_H_signal"]
# #Load good and bad jets
# good_jets3, bad_jets3 = get_jets(bg_files, bg_jets, sig_files, sig_jets)
# #Choose which PD to load
# nbr = 14
# bg_files = [b_h5+pds[nbr]+'_C_background.h5',b_h5+pds[nbr]+'_D_background.h5', b_h5+pds[nbr]+'_E_background.h5',
# b_h5+pds[nbr]+'_F_background.h5', b_h5+pds[nbr]+'_G_background.h5', b_h5+pds[nbr]+'_H_background.h5']
# bg_jets = [pds[nbr]+"_C_background", pds[nbr]+"_D_background", pds[nbr]+"_E_background",
# pds[nbr]+"_F_background", pds[nbr]+"_G_background", pds[nbr]+"_H_background"]
# sig_files = [b_h5+pds[nbr]+'_C_signal.h5',b_h5+pds[nbr]+'_D_signal.h5', b_h5+pds[nbr]+'_E_signal.h5',
# b_h5+pds[nbr]+'_F_signal.h5', b_h5+pds[nbr]+'_G_signal.h5', b_h5+pds[nbr]+'_H_signal.h5']
# sig_jets = [pds[nbr]+"_C_signal", pds[nbr]+"_D_signal", pds[nbr]+"_E_signal",
# pds[nbr]+"_F_signal", pds[nbr]+"_G_signal", pds[nbr]+"_H_signal"]
# #Load good and bad jets
# good_jets4, bad_jets4 = get_jets(bg_files, bg_jets, sig_files, sig_jets)
#Assign good jets class label 0
df1 = pd.DataFrame(good_jets)
# cutted_df = df1.iloc[0:25000, :] #Temporarily to make training faster
# df1 = cutted_df #Temporarily to make training faster
df1['class'] = 0
#Assign bad_jets class label 1
df2 = pd.DataFrame(bad_jets)
# cutted_df = df2.iloc[0:, :] #Temporarily to make training faster
# df2 = cutted_df #Temporarily to make training faster
df2['class'] = 1
# #Assign good jets class label 0
# df3 = pd.DataFrame(good_jets2)
# df3['class'] = 0
# #Assign bad_jets class label 1
# df4 = pd.DataFrame(bad_jets2)
# df4['class'] = 1
# #Assign good jets class label 0
# df5 = pd.DataFrame(good_jets3)
# df5['class'] = 0
# #Assign bad_jets class label 1
# df6 = pd.DataFrame(bad_jets3)
# df6['class'] = 1
# df7 = pd.DataFrame(good_jets4)
# df7['class'] = 0
# df8 = pd.DataFrame(bad_jets4)
# df8['class'] = 1
# del(good_jets)
# del(bad_jets)
#Concatenate them
frames = [df1,df2]
#frames = [df1,df2,df3,df4,df5,df6] #Use something like this if you want to load multiple PDs
# frames = [df1,df2,df3,df4,df5,df6,df7,df8]
data = pd.concat(frames)
del(frames)
# del(df1)
# del(df2)
data.drop(2812, axis=1, inplace=True) #Drop per_pd flags
#The +7 every
data = data.sort_values([2807,2808], ascending=[True,True]) #Sort by runID and then by lumiID
data = data.reset_index(drop=True) #Reset index
# data = data.reindex(index=range(0,len(data)))
#Shuffle them randomly
# data = shuffle(data)
# data = data.reset_index(drop=True)
#Save labels and delete them from df not to cheat during training
# labels = data['class'].astype(int)
# del data['class']
# +
#Relabelling incorrect "Fede json" with updated one by current choice
def json_checker(json_file, orig_runid, orig_lumid): #lookup for labels and appends
outcome = 5 #Should not be anywhere
for k,v in json_file.items():
if (int(k) == orig_runid):
for d in v: #Checks each inner loop of the json per runID
for i in range (d[0], d[1]+1):
# print("key of json is ", k, " value of json is ", v)
# # print(v[0][0], "and", v[0][1])
# print("current inner list is", d, "and range is", d[0], " to ", d[1])
# print("i is ", i)
if i == orig_lumid:
# print("Flagging as bad")
outcome =0 #0 means good lumi! (to be compatible with code anomaly_detection.ipynb[mse ae])
return(outcome)
outcome = 1 #1 means bad lumisection! (to be compatible with code anomaly_detection.ipynb [mse autoencoder])
return(outcome)
#Contains golden json
json_file_path = '/afs/cern.ch/user/f/fsiroky/public/Cert_271036-284044_13TeV_PromptReco_Collisions16_JSON.txt'
def add_flags_from_json(output_json, data):
output_json = json.load(open(json_file_path))
new_json_class = np.empty([data.shape[0],1])
for i in range(0, data.shape[0]):
orig_runid = data[2807][i]
orig_runid = int(orig_runid)
orig_lumid = data[2808][i]
orig_lumid = int(orig_lumid)
new_json_class[i,0] = int(json_checker(output_json, orig_runid, orig_lumid))
data['preco_json'] = new_json_class #PromptReco GOLDEN json
return data
new_data = add_flags_from_json(json_file_path, data)
del(new_data)
# +
#TODO!
#Check how many good lumis and anomalous ones we have
# print("Laaalelaaa", data)
# anomalies = data[data['class'] == 1]
# normal = data[data['class'] == 0]
# print("Number of anomalies: ", anomalies.shape)
# del(anomalies)
# print("Number of normals: ", normal.shape)
# del(normal)
# +
#Save runIDs and lumiIDs and instantaneous luminosities for later, because now we drop them before training
runIDs = data[2807].astype(int)
lumiIDs = data[2808].astype(int)
lumisections = data[2809].astype(float) #lumisections means inst. luminosities - CHANGE!
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/datarunIDs.npy', runIDs)
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/datalumiIDs.npy', lumiIDs)
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/lumisections.npy', lumisections)
print("Save of RunIDs and LumiIDs done")
# print(data)
data.drop(2800+7, axis=1, inplace=True) #drop RunID before normalizing and training
data.drop(2801+7, axis=1, inplace=True) #drop LumiID before normalizing and training
print("RunID and LumiID dropped")
# print(data)
# +
#ORIGINAL ONE
#Normalize the data to make training better
cutted_data = data.iloc[:, 0:2810]
#classes = data.iloc[:, 2805:2806]
classes = data.iloc[:,-1] #Take PromptReco json
# print(classes.shape)
np_scaled = StandardScaler().fit_transform(cutted_data.values)
# np_scaled = MaxAbsScaler().fit_transform(np_scaled)
# print("1111",np_scaled)
# np_scaled = scale(cutted_data, axis = 1, with_mean=True, with_std=True, copy=True)
datas = pd.DataFrame(np_scaled)
# datas = pd.DataFrame(np_scaled, index=cutted_data.index, columns=cutted_data.columns)
# print("2222",datas)
# del(np_scaled)
del(cutted_data)
# print("Datas first: ", datas)
datas[2810] = runIDs #Append runID back after scaling
datas[2811] = lumiIDs #Append lumiID back after scaling
datas['qlabel'] = classes #qlabel is goldenJSON now
# print("After scale", datas)
# +
# #Normalize the data to make training better
# cutted_data = data.iloc[:, 0:2803+7]
# #classes = data.iloc[:, 2805:2806]
# classes = data.iloc[:,-1] #Take PromptReco json
# # print(classes.shape)
# np_scaled = StandardScaler().fit_transform(cutted_data.values)
# # np_scaled = MaxAbsScaler().fit_transform(np_scaled)
# # print("1111",np_scaled)
# # np_scaled = scale(cutted_data, axis = 1, with_mean=True, with_std=True, copy=True)
# datas = pd.DataFrame(np_scaled)
# # datas = pd.DataFrame(np_scaled, index=cutted_data.index, columns=cutted_data.columns)
# # print("2222",datas)
# # del(np_scaled)
# del(cutted_data)
# # print("Datas first: ", datas)
# datas[2803+7] = runIDs #Append runID back after scaling
# datas[2804+7] = lumiIDs #Append lumiID back after scaling
# datas['qlabel'] = classes #qlabel is goldenJSON now
# +
#ORIGINAL ONE
#TEST/TRAIN SPLIT
# X_train, X_test = train_test_split(datas, test_size=0.15, random_state=RANDOM_SEED) # This works when we split rndmly
split_nbr = round(datas.shape[0]*0.20) #0.10 means 10% to the validation set
print(datas.shape)
X_train = datas.iloc[0:(datas.shape[0] - split_nbr) ,:]
X_test = datas.iloc[(datas.shape[0] - split_nbr): (datas.shape[0]) ,:]
last_train_idx = X_train.shape[0]
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/last_train_idx.npy', last_train_idx)
# print(X_train.shape)
# print(X_test.shape)
del(datas)
X_train = X_train[X_train['qlabel']== 0]
# print(X_train)
X_train = X_train.drop(['qlabel'], axis=1)
ae_lumis = X_train[2807].astype(float)
# print("ae lumis", ae_lumis, "ae_lumis shape", ae_lumis.shape)
# print("XTEEEEST before PerPD json beginn")
# print(X_test)
# +
# #TEST/TRAIN SPLIT
# datas = data.iloc[:, 0:2803+7]
# classes = data.iloc[:,-1] #Take PromptReco json
# data = datas
# # print(classes)
# data[2803+7] = runIDs #Append runID back after scaling
# data[2804+7] = lumiIDs #Append lumiID back after scaling
# data['qlabel'] = classes #qlabel is goldenJSON now
# # X_train, X_test = train_test_split(datas, test_size=0.15, random_state=RANDOM_SEED) # This works when we split rndmly
# split_nbr = round(data.shape[0]*0.2) #0.10 means 10% to the test set
# # print(datas.shape)
# X_train = data.iloc[0:(data.shape[0] - split_nbr) ,:]
# X_test = data.iloc[(data.shape[0] - split_nbr): (data.shape[0]) ,:]
# last_train_idx = X_train.shape[0]
# np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/last_train_idx.npy', last_train_idx)
# # print(X_train.shape)
# # print(X_test.shape)
# # del(datas)
# X_train = X_train[X_train['qlabel']== 0]
# # print(X_train)
# X_train = X_train.drop(['qlabel'], axis=1)
# # ae_lumis = X_train[2800+7].astype(float)
# # print("ae lumis", ae_lumis, "ae_lumis shape", ae_lumis.shape)
# # print("XTEEEEST before PerPD json beginn")
# # print(X_test)
# +
# #Normalize the data to make training better
# # cutted_data = data.iloc[:, 0:2803+7]
# # #classes = data.iloc[:, 2805:2806]
# # classes = data.iloc[:,-1] #Take PromptReco json
# # print(classes.shape)
# # X_train = X_train.iloc[:, 0:2803+7]
# X_train = StandardScaler().fit_transform(X_train)
# # np_scaled = MaxAbsScaler().fit_transform(np_scaled)
# # print("1111",np_scaled)
# # np_scaled = scale(cutted_data, axis = 1, with_mean=True, with_std=True, copy=True)
# X_train = pd.DataFrame(X_train)
# classes_X_test = X_test.iloc[:,-1] #Take PromptReco json
# # print(classes.shape)
# X_test = StandardScaler().fit_transform(X_test)
# # np_scaled = MaxAbsScaler().fit_transform(np_scaled)
# # print("1111",np_scaled)
# # np_scaled = scale(cutted_data, axis = 1, with_mean=True, with_std=True, copy=True)
# X_test = pd.DataFrame(X_test)
# X_test['qlabel'] = classes_X_test #qlabel is goldenJSON now
# # datas = pd.DataFrame(np_scaled, index=cutted_data.index, columns=cutted_data.columns)
# # print("2222",datas)
# # del(np_scaled)
# # del(cutted_data)
# # print("Datas first: ", datas)
# # datas['qlabel'] = classes #qlabel is goldenJSON now
# +
json_file_path_PD = '/afs/cern.ch/user/f/fsiroky/Documents/gen_config/jsons/JetHT.json' #Specify what per PD json you want to use for test set
def add_flags_from_json_PD(output_json, X_test):
output_json = json.load(open(json_file_path))
new_json_class = np.empty([X_test.shape[0],1])
for i in range(0, X_test.shape[0]):
orig_runid = X_test[2810][i+last_train_idx]
# orig_runid = int(orig_runid)
orig_lumid = X_test[2811][i+last_train_idx]
# orig_lumid = int(orig_lumid)
new_json_class[i,0] = int(json_checker(output_json, orig_runid, orig_lumid))
X_test['PD_json'] = new_json_class
return X_test
new_data = add_flags_from_json_PD(json_file_path_PD, X_test)
del(new_data)
# print("Now new X_test label")
# print(X_test)
# y_test = X_test['qlabel']
y_test = X_test['PD_json']
# +
#Dropping labels before training and saving Test set luminosities
print("Number of good lumis in X_test: ", len(X_test[y_test==0]))
print("Number of bad lumis in X_test: ", len(X_test[y_test==1]))
X_test.drop(['qlabel'], axis=1, inplace=True)
X_test.drop(['PD_json'], axis=1, inplace=True)
X_train.drop(2810, axis=1, inplace=True) #drop RunID before training
X_train.drop(2811, axis=1, inplace=True) #drop LumiID before training
X_test.drop(2810, axis=1, inplace=True) #drop RunID before training
X_test.drop(2811, axis=1, inplace=True) #drop LumiID before training
# print("X_test before saving: ", X_test)
luminosity_vals = lumisections.iloc[:int(last_train_idx)].values
X_train = X_train.values
X_test = X_test.values
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/X_testfor3pds_model.npy', X_test)
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/data_eval/y_testfor3pds_model.npy', y_test)
# +
# #TRAINING
# from keras.layers import concatenate
# from keras.utils.generic_utils import get_custom_objects
# # def custom_activation(x):
# # return ((((x**2+1)**(.5) - 1) / 2 ) + x)
# # get_custom_objects().update({'custom_activation': custom_activation})
# input_dim = X_train.shape[1]
# encoding_dim = 1000
# input_layer = Input(shape=(input_dim, ))
# # prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
# # prellll = LeakyReLU(alpha=0.3)
# # encoder = Dense(2600, #activation="custom_activation",
# # # kernel_regularizer=regularizers.l2(0.005),
# # activity_regularizer=regularizers.l1(10e-5)
# # )(input_layer)
# # encoder = prellll(encoder)
# # encoder = prellll(encoder)
# # luminosity_neuron = Input(shape=(1,))
# # luminosity_neuron_dense = Dense(1,)(luminosity_neuron)
# # prellll = LeakyReLU(alpha=0.3)
# # encoded = Dense(2200, #activation="relu",
# # # kernel_regularizer=regularizers.l2(0.005),
# # # activity_regularizer=regularizers.l1(10e-5)
# # )(encoder)
# # encoded = prellll(encoded)
# # encoded = Dense(2600, activation='relu')(encoder)
# # x = concatenate([encoded, luminosity_neuron_dense])
# # prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
# prellll = LeakyReLU(alpha=0.3)
# encoded = Dense(encoding_dim, #activation="relu",
# kernel_regularizer=regularizers.l2(10e-5),
# # activity_regularizer=regularizers.l1(10e-5)
# )(input_layer)
# encoded = prellll(encoded)
# # luminosity_neuron = Input(shape=(1,), name='l_neu')
# # decoded = Dense(2600, activation='relu')(encoded)
# # x = concatenate([decoded, luminosity_neuron])
# # prellll = LeakyReLU(alpha=0.3)
# # decoded = Dense(2200, # activation='relu',
# # # activity_regularizer=regularizers.l1(10e-5)
# # )(encoded)
# # decoded = prellll(decoded)
# # prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
# # prellll = LeakyReLU(alpha=0.3)
# # decoded = Dense(2600, # activation='relu',
# # # activity_regularizer=regularizers.l1(10e-5)
# # )(encoded)
# # decoded = prellll(decoded)
# # encoder = Dense(int(encoding_dim / 1.2), activation="relu")(encoder)
# # encoder = Dense(int(encoding_dim / 1.5), activation="relu")(encoder)
# # decoder = Dense(2000, activation='relu')(encoded)
# # prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
# prellll = LeakyReLU(alpha=0.3)
# decoder = Dense(input_dim)(encoded)
# decoder = prellll(decoder)
# # decoder = Dense(input_dim)(encoded)
# autoencoder = Model(inputs=input_layer, outputs=decoder)
# +
# def get_model(input_dim, encoding_dim, activation, activation2, regularizer):
# input_layer = Input(shape=(input_dim, ), name="Input")
# encoded = Dense(encoding_dim, kernel_regularizer=regularizer, name="First_Hidden")(input_layer)
# encoded = activation(encoded)
# decoder = Dense(input_dim, name="Output")(encoded)
# decoder = activation2(decoder)
# return Model(inputs=input_layer, outputs=decoder)
# +
# def get_model_foo(input_dim, encoding_dim, activation, activation2, reg_val):
# models = []
# for x in [None, regularizers.l2(reg_val), regularizers.l1(reg_val)]:
# models.append(get_model(X_train.shape[1], encoding_dim, activation, activation2, x))
# return models
# +
# #TRAINING #THIS IS USED FOR HYPERPARAMETER SEARCH. ASK ADRIAN.
# input_dim = X_train.shape[1]
# # MODEL A
# activation = LeakyReLU(alpha=0.3, name="First_Activation")
# activation2 = LeakyReLU(alpha=0.3, name="Second_Activation")
# autoencoderA = get_model_foo(X_train.shape[1], 100, activation, activation2, 10e-5)
# # MODEL B
# activation = PReLU(alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, name="First_Activation")
# activation2 = PReLU(alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, name="Second_Activation")
# autoencoderB = get_model_foo(X_train.shape[1], 1000, activation, activation2, 10e-5)
# # MODEL C
# activation = LeakyReLU(alpha=0.1, name="First_Activation")
# activation2 = LeakyReLU(alpha=0.1, name="Second_Activation")
# autoencoderC = get_model_foo(X_train.shape[1], 1000, activation, activation2, 10e-5)
# # MODEL D
# activation = LeakyReLU(alpha=0.6, name="First_Activation")
# activation2 = LeakyReLU(alpha=0.6, name="Second_Activation")
# autoencoderD = get_model_foo(X_train.shape[1], 1000, activation, activation2, 10e-5)
# # MODEL E
# from keras.layers import Activation
# activation = Activation("linear", name="First_Activation")
# activation2 = Activation("linear", name="Second_Activation")
# autoencoderE = get_model_foo(X_train.shape[1], 1000, activation, activation2, 10e-5)
# # MODEL F
# activation = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, name="First_Activation")
# activation2 = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, name="Second_Activation")
# autoencoderF = get_model_foo(X_train.shape[1], 1000, activation, activation2, 10e-5)
# +
# for x in autoencoderA:
# x.summary()
# -
from keras.callbacks import EarlyStopping
from keras.callbacks import Callback
class AdditionalValidationSets(Callback):
def __init__(self, validation_sets, verbose=0, batch_size=256):
"""
:param validation_sets:
a list of 3-tuples (validation_data, validation_targets, validation_set_name)
or 4-tuples (validation_data, validation_targets, sample_weights, validation_set_name)
:param verbose:
verbosity mode, 1 or 0
:param batch_size:
batch size to be used when evaluating on the additional datasets
"""
super(AdditionalValidationSets, self).__init__()
self.validation_sets = validation_sets
for validation_set in self.validation_sets:
if len(validation_set) not in [2, 3]:
raise ValueError()
self.epoch = []
self.history = {}
self.verbose = verbose
self.batch_size = batch_size
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
# record the same values as History() as well
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
# evaluate on the additional validation sets
for validation_set in self.validation_sets:
if len(validation_set) == 3:
validation_data, validation_targets, validation_set_name = validation_set
sample_weights = None
elif len(validation_set) == 4:
validation_data, validation_targets, sample_weights, validation_set_name = validation_set
else:
raise ValueError()
results = self.model.evaluate(x=validation_data,
y=validation_targets,
verbose=self.verbose,
sample_weight=sample_weights,
batch_size=self.batch_size)
valuename = validation_set_name + '_loss'
print("test_loss: ",results)
self.history.setdefault(valuename, []).append(results)
# +
# nb_epoch = 8192
# batch_size = 256
# from keras.optimizers import Adam, Nadam
# # adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
# early_stopper = EarlyStopping(monitor="val_loss",
# patience=32,
# verbose=True,
# mode="auto")
# for indx1, group in enumerate ([autoencoderA,autoencoderB,autoencoderC,autoencoderD,autoencoderE,autoencoderF]):
# for indx2, autoencoder in enumerate (group):
# name = ("group%s_autoencoder%s" % (indx1, indx2))
# autoencoder.compile(optimizer='Adam',
# loss='mean_squared_error'
# # metrics=['accuracy']
# )
# checkpoint_callback = ModelCheckpoint(("/afs/cern.ch/user/f/fsiroky/models_ae/%s.h5" % name),
# monitor="val_loss",
# verbose=False,
# save_best_only=True,
# mode="min")
# testerror = AdditionalValidationSets([(X_test, X_test, 'test')])
# history = autoencoder.fit(X_train, X_train,
# epochs=nb_epoch,
# batch_size=batch_size,
# shuffle=True,
# validation_split=0.2,
# verbose=2,
# callbacks=[testerror, early_stopper, checkpoint_callback]).history
# #np.save('/eos/cms/store/user/fsiroky/ae_models/%s.npy' % name, history)
# np.save('/afs/cern.ch/user/f/fsiroky/models_ae/%s_loss.npy' % name , history['loss'])
# np.save('/afs/cern.ch/user/f/fsiroky/models_ae/%s_valloss.npy' % name, history['val_loss'])
# np.save('/afs/cern.ch/user/f/fsiroky/models_ae/%s_testloss.npy' % name , testerror.history['test_loss'])
# -
# +
#SINGLE TRAINING
# +
nb_epoch = 8192
batch_size = 256
from keras import optimizers
#Adam and similar optimizers dont work with epsilon=0 for this version of KERAS.
#Make sure you check the version of keras and find appropriate documentation for that version
adamm = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
sgd = optimizers.SGD(lr=0.1, clipnorm=1.)
ada = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
rmsprop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
input_dim = X_train.shape[1]
encoding_dim = 500
input_layer = Input(shape=(input_dim, ))
prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
encoded = Dense(2000, #activation="relu",
kernel_regularizer=regularizers.l1(10e-5),
# activity_regularizer=regularizers.l2(10e-5)
)(input_layer)
encoded = prellll(encoded)
prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
encoded = Dense(1000, #activation="relu",
kernel_regularizer=regularizers.l1(10e-5),
# activity_regularizer=regularizers.l2(10e-5)
)(encoded)
encoded = prellll(encoded)
prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
# prellll = LeakyReLU(alpha=0.3)
encoded = Dense(encoding_dim, #activation="relu",
kernel_regularizer=regularizers.l1(10e-5),
# activity_regularizer=regularizers.l2(10e-5)
)(encoded)
encoded = prellll(encoded)
prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
encoded = Dense(1000, #activation="relu",
kernel_regularizer=regularizers.l1(10e-5),
# activity_regularizer=regularizers.l1(10e-5)
)(encoded)
encoded = prellll(encoded)
prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
encoded = Dense(2000, #activation="relu",
kernel_regularizer=regularizers.l1(10e-5),
# activity_regularizer=regularizers.l2(10e-5)
)(encoded)
encoded = prellll(encoded)
prellll = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
# prellll = LeakyReLU(alpha=0.3)
decoder = Dense(input_dim)(encoded)
decoder = prellll(decoder)
# decoder = Dense(input_dim)(encoded)
autoencoder = Model(inputs=input_layer, outputs=decoder)
name = "Onlykernell1"
early_stopper = EarlyStopping(monitor="val_loss",
patience=50,
verbose=True,
mode="auto")
autoencoder.compile(optimizer=adamm,
loss='mean_squared_error'
# metrics=['accuracy']
)
checkpoint_callback = ModelCheckpoint(("/afs/cern.ch/user/f/fsiroky/models_ae/%s.h5" % name),
monitor="val_loss",
verbose=False,
save_best_only=True,
mode="min")
testerror = AdditionalValidationSets([(X_test, X_test, 'test')])
history = autoencoder.fit(X_train, X_train,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_split=0.25,
verbose=2,
callbacks=[testerror, early_stopper, checkpoint_callback]
).history
#np.save('/eos/cms/store/user/fsiroky/ae_models/%s.npy' % name, history)
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/%s_loss.npy' % name , history['loss'])
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/%s_valloss.npy' % name, history['val_loss'])
np.save('/afs/cern.ch/user/f/fsiroky/models_ae/%s_testloss.npy' % name , testerror.history['test_loss'])
#test_loss does not say much as it contains anomalous lumisections too
# -
autoencoder.summary()
| Autoencoder_prepro_train.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// To be able to achieve maximum height we need to be able to fall into the area vertically, thus we can disregard completely the component x. Just assume it's in a value that will reach 0 between the area.
//
// My target y position was in the negatives. We know that the same speed we will reach upwards will be the speed with which we will reach 0. For example, with speed 3 (speed, new position): (3,3), (2,5), (1,6), (0,6), (-1,5), (-2,3), (-3,0), (-4,-4)....
//
// The higher the initial velocity the higher the height too, thus we need to select the initial velocity so that initial+1 (in negative) falls at the lower end of the target.
//
// So if the target is -100, -50 we need an initial velocity of 99 in the y department. One less than the absolute number of the minimum y target number.
//
// Calculating maximum height is nothing more than the sum of 1+2+...+N-1+N, which is (N+1)*N/2.
// + dotnet_interactive={"language": "csharp"}
var y=-136; //put value by hand
var initialvelocity = (-1*y) -1;
var maxheight = (initialvelocity+1)*initialvelocity/2;
Console.WriteLine(maxheight);
// -
// target area: x=150..193, y=-136..-86
//
// https://www.wolframalpha.com/input/?i=x%5E2%2Bx%3D2y+solve+for+x
//
// x=1/2([-+]sqr(8y+1)-1)
//
// We get 16.8 from 150, so 16 is lower, 17 is sligthly higher
//
// x 17 = min velocity to reach area
// x 193 = max velocity to reach area
// y -136 = min velocity to reach area
// y 135 = max velocity to reach area
// + dotnet_interactive={"language": "csharp"}
var xmin = 150;
var xmax = 193;
var ymin = -136;
var ymax = -86;
bool InArea(int vx, int vy)
{
var x=0;
var y=0;
for (var t=0; x<xmax && y>ymin ;t++)
{
x+=vx;
vx-=(vx>0?1:0);
y+=vy;
vy--;
if (x>=xmin && x<=xmax &&
y>=ymin && y<=ymax )
return true;
}
return false;
}
var count = 0L;
for (var i=17;i<=193;i++)
for (var j=-136;j<=135;j++)
if (InArea(i,j)) count++;
Console.WriteLine(count);
// + dotnet_interactive={"language": "csharp"}
| 2021/day17/day17.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from src.utils import *
puzzle_input = parse_puzzle_input(13)
puzzle_input[:3]
sample_input = [
'6,10',
'0,14',
'9,10',
'0,3',
'10,4',
'4,11',
'6,0',
'6,12',
'4,1',
'0,13',
'10,12',
'3,4',
'3,0',
'8,4',
'1,10',
'2,14',
'8,10',
'9,0',
'',
'fold along y=7',
'fold along x=5'
]
def parse_instructions(puzzle_input):
x_list = [] # for calculating max x
y_list = [] # ditto max y
dot_list = []
fold_list = []
for line in puzzle_input:
if ',' in line:
x, y = line.split(',')
x_list.append(int(x))
y_list.append(int(y))
dot_list.append((int(x), int(y)))
elif 'fold' in line:
instruction, value = line.split('=')
axis = instruction[-1]
fold_list.append((axis, int(value)))
origami_dict = {}
for x in range(max(x_list) + 1):
for y in range(max(y_list) + 1):
origami_dict[(x, y)] = 0
for dot in dot_list:
origami_dict[dot] = 1
return origami_dict, fold_list
def find_origami_dict_dimensions(origami_dict):
x_list = []
y_list = []
for coordinate in list(origami_dict.keys()):
x_list.append(coordinate[0])
y_list.append(coordinate[1])
return min(x_list), max(x_list) + 1, min(y_list), max(y_list) + 1
def visualise_origami_dict(origami_dict):
min_x, max_x, min_y, max_y = find_origami_dict_dimensions(origami_dict)
for y in range(min_y, max_y):
line = "".join('#' if origami_dict[(x, y)] else '.' for x in range(min_x, max_x))
print(line)
def axis_to_index(axis):
if axis == 'x':
return 0
return 1
def split_origami_dict_by_fold(origami_dict, fold):
origami_dict_copy = origami_dict.copy()
axis, fold_value = fold
axis_index = axis_to_index(axis)
dict_1, dict_2 = {}, {}
dict_1 = {k: v for k, v in origami_dict_copy.items() if k[axis_index] < fold_value}
dict_2 = {k: v for k, v in origami_dict_copy.items() if k[axis_index] > fold_value}
return dict_1, dict_2
def reflect_origami_split(split_origami_dict, fold):
if fold[0] == 'x':
return {((2 * fold[1]) - k[0], k[1]): v for k, v in split_origami_dict.items()}
return {(k[0], (2 * fold[1]) - k[1]): v for k, v in split_origami_dict.items()}
# +
def flatten_origami_dicts(split_1, reflected_split_2):
flattened_dict = {}
for coordinate in split_1:
flattened_dict[coordinate] = split_1[coordinate]
if coordinate in reflected_split_2:
flattened_dict[coordinate] = max(split_1[coordinate], reflected_split_2[coordinate])
return flattened_dict
# -
def fold_origami_dict(origami_dict, fold):
split_1, split_2 = split_origami_dict_by_fold(origami_dict, fold)
reflected_split_2 = reflect_origami_split(split_2, fold)
return flatten_origami_dicts(split_1, reflected_split_2)
def count_dots(origami_dict):
return sum(list(origami_dict.values()))
def part_1_answer(puzzle_input):
origami_dict, fold_list = parse_instructions(puzzle_input)
origami_dict = fold_origami_dict(origami_dict, fold_list[0])
return count_dots(origami_dict)
part_1_answer(sample_input)
part_1_answer(puzzle_input)
# ## Part 2
def part_2_answer(puzzle_input):
origami_dict, fold_list = parse_instructions(puzzle_input)
for fold in fold_list:
origami_dict = fold_origami_dict(origami_dict, fold)
visualise_origami_dict(origami_dict)
part_2_answer(sample_input)
part_2_answer(puzzle_input)
| notebooks/day_13.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MindSpore-1.1.1
# language: python
# name: mindspore-1.1.1
# ---
# # 单节点数据缓存
#
# [](https://gitee.com/mindspore/docs/blob/master/docs/programming_guide/source_zh_cn/cache.ipynb) [](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/mindspore_cache.ipynb) [](https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9taW5kc3BvcmUtd2Vic2l0ZS5vYnMuY24tbm9ydGgtNC5teWh1YXdlaWNsb3VkLmNvbS9ub3RlYm9vay9tb2RlbGFydHMvcHJvZ3JhbW1pbmdfZ3VpZGUvbWluZHNwb3JlX2NhY2hlLmlweW5i&imagename=MindSpore1.1.1)
# ## 概述
#
# 对于需要重复访问远程的数据集或需要重复从磁盘中读取数据集的情况,可以使用单节点缓存算子将数据集缓存于本地内存中,以加速数据集的读取。
#
# 缓存算子依赖于在当前节点启动的缓存服务器,缓存服务器作为守护进程独立于用户的训练脚本而存在,主要用于提供缓存数据的管理,支持包括存储、查找、读取以及发生缓存未命中时对于缓存数据的写入等操作。
#
# 若用户的内存空间不足以缓存所有数据集,则用户可以配置缓存算子使其将剩余数据缓存至磁盘。
#
# 目前,缓存服务只支持单节点缓存,即客户端和服务器均在同一台机器上。该服务支持以下两类使用场景:
#
# - 缓存加载好的原始数据集
#
# 用户可以在数据集加载算子中使用缓存。这将把加载完成的数据存到缓存服务器中,后续若需相同数据则可直接从中读取,避免从磁盘中重复加载。
#
# 
# - 缓存经过数据增强处理后的数据
#
# 用户也可在`map`算子中使用缓存。这将允许直接缓存数据增强(如图像裁剪、缩放等)处理后的数据,避免数据增强操作重复进行,减少了不必要的计算量。
#
# 
# ## 缓存基础使用
#
# 1.配置环境。
#
# 使用缓存服务前,需要安装MindSpore,并设置相关环境变量。以Conda环境为例,设置环境如下:
# ```bash
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{path_to_conda}/envs/{your_env_name}/lib/python3.7/site-packages/mindspore:{path_to_conda}/envs/{your_env_name}/lib/python3.7/site-packages/mindspore/lib
# ```
#
# ```bash
# export PATH=$PATH:{path_to_conda}/envs/{your_env_name}/bin
# ```
# 也可以用以下python设置环境
# +
import os
import sys
import mindspore
python_path = "/".join(sys.executable.split("/")[:-1])
mindspore_path = "/".join(mindspore.__file__.split("/")[:-1])
mindspore_lib_path = os.path.join(mindspore_path, "lib")
if 'PATH' not in os.environ:
os.environ['PATH'] = python_path
elif python_path not in os.environ['PATH']:
os.environ['PATH'] += ":" + python_path
print(os.environ['PATH'])
os.environ['LD_LIBRARY_PATH'] = "{}:{}:{}".format(mindspore_path, mindspore_lib_path, mindspore_lib_path.split("python3.7")[0])
print(os.environ['LD_LIBRARY_PATH'])
# -
# > 由于使用缓存可能会造成服务器的内存紧张,因此建议用户在使用缓存前增大服务器的交换内存空间至100GB以上,Ubuntu、EulerOS以及CentOS均可参考[相关教程](https://help.ubuntu.com/community/SwapFaq#How_do_I_add_a_swap_file.3F)了解如何增大交换内存空间。
# 2.启动缓存服务器。
#
# 在使用单节点缓存服务之前,首先需要在命令行输入以下命令,启动缓存服务器:
# !cache_admin --start
# 若输出以上信息,则表示缓存服务器启动成功。
# `cache_admin`支持以下命令和参数:
#
# - `--start`:启动缓存服务器,支持通过以下参数进行配置:
# - `--workers`或`-w`:设置缓存服务器的工作线程数量,默认情况下工作线程数量为机器CPU个数的一半。该参数需要根据NUMA架构来设置,若设置值不是机器中NUMA结点数的整数倍,则缓存服务器会对其进行自动调整。
# - `--spilldir`或`-s`:设置若缓存数据的大小超过内存空间,则溢出至磁盘的数据文件路径,默认为空(表示不启用数据溢出功能)。
# - `--hostname`或`-h`:缓存服务器的ip地址,默认为127.0.0.1。
# - `--port`或`-p`:缓存服务器的端口号,默认为50052。
# - `--loglevel`或`-l`:设置日志等级,默认为1(WARNING级别)。若设置为0(INFO级别),会输出过多日志,导致性能劣化。
# - `--stop`:关闭缓存服务器。
# - `--generate_session`或`-g`:生成一个缓存会话。
# - `--destroy_session`或`-d`:删除一个缓存会话。
# - `--list_sessions`:查看当前缓存会话列表和详细信息。
# - `--server_info`:查看当前服务器配置参数及会话列表。
# - `--help`:查看帮助信息。
#
# 以上命令均可使用`-h`和`-p`参数来指定服务器,用户也可通过配置环境变量`MS_CACHE_HOST`和`MS_CACHE_PORT`来指定。若未指定则默认对ip为127.0.0.1且端口号为50052的服务器执行操作。
#
# 用户可通过`ps -ef|grep cache_server`命令来检查服务器是否已启动以及查询服务器参数。
#
# 用户也可通过`cache_admin --server_info`命令查看服务器的详细参数列表。
# !cache_admin --server_info
# 其中,Cache Server Configuration表格分别列出了当前服务器的IP地址、端口号、工作线程数、日志等级、溢出路径等详细配置信息。Active sessions模块展示了当前服务器中已启用的session ID列表。
#
# 缓存服务器日志文件的命名格式为 "cache_server.\<主机名\>.\<用户名\>.log.\<日志等级\>.\<日期-时间\>.\<进程号\>"。当`GLOG_v=0`时,可能会屏显有大量DEBUG日志。
#
# > - 若要启用数据溢出功能,则用户在启动缓存服务器时必须使用`-s`参数对溢出路径进行设置,否则该功能默认关闭。
# 3.创建缓存会话。
#
# 若缓存服务器中不存在缓存会话,则需要创建一个缓存会话,得到缓存会话id:
# !cache_admin -g
# 其中780643335为端口50052的服务器分配的缓存会话id,缓存会话id由服务器分配。
# 通过`cache_admin --list_sessions`命令可以查看当前服务器中现存的所有缓存会话信息。
# !cache_admin --list_sessions
# 输出参数说明:
#
# - `Session`: 缓存会话id。
# - `Cache Id`: 当前缓存会话中的cache实例id,`n/a`表示当前尚未创建缓存实例。
# - `Mem cached`: 缓存在内存中的数据量。
# - `Disk cached`: 缓存在磁盘中的数据量。
# - `Avg cache size`:当前缓存的每行数据的平均大小。
# - `Numa hit`:Numa命中数,该值越高将获得越好的时间性能。
# 4.创建缓存实例。
#
# 在Python训练脚本中使用`DatasetCache` API来定义一个名为`test_cache`的缓存实例,并把上一步中创建的缓存会话id传入`session_id`参数:
# +
import mindspore.dataset as ds
test_cache = ds.DatasetCache(session_id=780643335, size=0, spilling=False)
# -
# `DatasetCache`支持以下参数:
#
# - `session_id`:缓存会话的id,通过`cache_admin -g`命令来创建并获取。
# - `size`:缓存最大内存空间占用,该参数以MB为单位,例如512GB的缓存空间应设置`size=524288`,默认为0。
# - `spilling`:当内存空间超出所设置的最大内存空间占用时,是否允许将剩余的数据溢出至磁盘,默认为False。
# - `hostname`:连接至缓存服务器的ip地址,默认为127.0.0.1。
# - `port`:连接至缓存服务器的端口号,默认为50052。
# - `num_connections`:建立的TCP/IP连接数,默认为12。
# - `prefetch_size`:每次预取的数据行数,默认为20。
#
# > - 在实际使用中,通常应当首先使用`cache_admin -g`命令从缓存服务器处获得一个缓存会话id并作为`session_id`的参数,防止发生缓存会话不存在而报错的情况。
# > - 设置`size=0`代表不限制缓存所使用的内存空间,缓存服务器会根据系统的内存资源状况,自动控制缓存服务器的内存空间占用,使其不超过系统总内存的80%。
# > - 用户也可以根据机器本身的空闲内存大小,给`size`参数设置一个合理的取值。注意,当用户自主设置`size`参数时,要先确认系统可用内存和待加载数据集大小,若cache_server的内存空间占用或待加载数据集空间占耗超过系统可用内存时,有可能导致机器宕机/重启、cache_server自动关闭、训练流程执行失败等问题。
# > - 若设置`spilling=True`,则当内存空间不足时,多余数据将写入磁盘中。因此,用户需确保所设置的磁盘路径具有写入权限以及足够的磁盘空间,以存储溢出至磁盘的缓存数据。注意,若启动服务器时未指定溢出路径,则在调用API时设置`spilling=True`将会导致报错。
# > - 若设置`spilling=False`,则缓存服务器在耗尽所设置的内存空间后将不再写入新的数据。
# > - 当使用不支持随机访问的数据集(如`TFRecordDataset`)进行数据加载并启用缓存服务时,需要保证整个数据集均存放于本地。在该场景下,若本地内存空间不足以存放所有数据,则必须启用溢出,将数据溢出至磁盘。
# > - `num_connections`和`prefetch_size`为内部性能调优参数,一般情况下,用户无需设置这两个参数。
# 5.插入缓存实例。
#
# 当前缓存服务既支持对原始数据集的缓存,也可以用于缓存经过数据增强处理后的数据。下例分别展示了两种使用方式。
#
# 需要注意的是,两个例子均需要按照步骤4中的方法分别创建一个缓存实例,并在数据集加载或map算子中将所创建的`test_cache`作为`cache`参数分别传入。
#
# 下面两个样例中使用到CIFAR-10数据集。运行样例前,需参照[数据集加载](https://www.mindspore.cn/doc/programming_guide/zh-CN/master/dataset_loading.html#cifar-10-100)中的方法下载并存放CIFAR-10数据集。
# !wget -N https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/cifar-10-binary.tar.gz --no-check-certificate
# !mkdir -p datasets
# !tar -xzf cifar-10-binary.tar.gz -C datasets
# !mkdir -p datasets/cifar-10-batches-bin/train datasets/cifar-10-batches-bin/test
# !mv -f datasets/cifar-10-batches-bin/test_batch.bin datasets/cifar-10-batches-bin/test
# !mv -f datasets/cifar-10-batches-bin/data_batch*.bin datasets/cifar-10-batches-bin/batches.meta.txt datasets/cifar-10-batches-bin/train
# !tree ./datasets/cifar-10-batches-bin
# - 缓存原始数据集加载的数据。
# +
dataset_dir = "./datasets/cifar-10-batches-bin/train"
# apply cache to dataset
data = ds.Cifar10Dataset(dataset_dir=dataset_dir, num_samples=4, shuffle=False, num_parallel_workers=1, cache=test_cache)
num_iter = 0
for item in data.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has a key "image"
print("{} image shape: {}".format(num_iter, item["image"].shape))
num_iter += 1
# -
# 通过`cache_admin --list_sessions`命令可以查看当前会话有四条数据,说明数据缓存成功。
# !cache_admin --list_sessions
# - 缓存经过数据增强处理后的数据。
# +
import mindspore.dataset.vision.c_transforms as c_vision
dataset_dir = "./datasets/cifar-10-batches-bin/train"
# apply cache to dataset
data = ds.Cifar10Dataset(dataset_dir=dataset_dir, num_samples=5, shuffle=False, num_parallel_workers=1)
# apply cache to map
rescale_op = c_vision.Rescale(1.0 / 255.0, -1.0)
test_cache = ds.DatasetCache(session_id=780643335, size=0, spilling=False)
data = data.map(input_columns=["image"], operations=rescale_op, cache=test_cache)
num_iter = 0
for item in data.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has a keys "image"
print("{} image shape: {}".format(num_iter, item["image"].shape))
num_iter += 1
# -
# 通过`cache_admin --list_sessions`命令可以查看当前会话有五条数据,说明数据缓存成功。
# !cache_admin --list_sessions
# 6.销毁缓存会话。
#
# 在训练结束后,可以选择将当前的缓存销毁并释放内存:
# !cache_admin --destroy_session 780643335
# 以上命令将销毁端口50052服务器中缓存会话id为1456416665的缓存。
#
# 若选择不销毁缓存,则该缓存会话中的缓存数据将继续存在,用户下次启动训练脚本时可以继续使用该缓存。
# 7.关闭缓存服务器。
#
# 使用完毕后,可以通过以下命令关闭缓存服务器,该操作将销毁当前服务器中存在的所有缓存会话并释放内存。
# !cache_admin --stop
# 以上命令将关闭端口50052的服务器。
#
# 若选择不关闭服务器,则服务器中已创建的缓存会话将保留,并供下次使用。下次训练时,用户可以新建缓存会话或重复使用已有缓存。
# ## 缓存共享
#
# 对于单机多卡的分布式训练的场景,缓存算子还允许多个相同的训练脚本共享同一个缓存,共同从缓存中读写数据。
# 1.启动缓存服务器。
# ```bash
# $cache_admin --start
# Cache server startup completed successfully!
# The cache server daemon has been created as process id 39337 and listening on port 50052
# Recommendation:
# Since the server is detached into its own daemon process, monitor the server logs (under /tmp/mindspore/cache/log) for any issues that may happen after startup
# ```
# 2.创建缓存会话。
#
# 创建启动Python训练的Shell脚本`cache.sh`,通过以下命令生成一个缓存会话id:
# ```shell
# # #!/bin/bash
# # This shell script will launch parallel pipelines
#
# # get path to dataset directory
# if [ $# != 1 ]
# then
# echo "Usage: sh cache.sh DATASET_PATH"
# exit 1
# fi
# dataset_path=$1
#
# # generate a session id that these parallel pipelines can share
# result=$(cache_admin -g 2>&1)
# rc=$?
# if [ $rc -ne 0 ]; then
# echo "some error"
# exit 1
# fi
#
# # grab the session id from the result string
# session_id=$(echo $result | awk '{print $NF}')
# ```
# 3.将缓存会话id传入训练脚本。
#
# 继续编写Shell脚本,添加以下命令在启动Python训练时将`session_id`以及其他参数传入:
# ```bash
# # make the session_id available to the python scripts
# num_devices=4
#
# for p in $(seq 0 $((${num_devices}-1))); do
# python my_training_script.py --num_devices "$num_devices" --device "$p" --session_id $session_id --dataset_path $dataset_path
# done
# ```
#
# > 直接获取完整样例代码:[cache.sh](https://gitee.com/mindspore/docs/blob/master/tutorials/tutorial_code/cache/cache.sh)
# 4.创建并应用缓存实例。
#
# 下面样例中使用到CIFAR-10数据集。运行样例前,需参照[数据集加载](https://www.mindspore.cn/doc/programming_guide/zh-CN/master/dataset_loading.html#cifar-10-100)中的方法下载并存放CIFAR-10数据集。目录结构如下:
# ```text
# ├─cache.sh
# ├─my_training_script.py
# └─cifar-10-batches-bin
# ├── batches.meta.txt
# ├── data_batch_1.bin
# ├── data_batch_2.bin
# ├── data_batch_3.bin
# ├── data_batch_4.bin
# ├── data_batch_5.bin
# ├── readme.html
# └── test_batch.bin
# ```
# 创建并编写Python脚本`my_training_script.py`,通过以下代码接收传入的`session_id`,并在定义缓存实例时将其作为参数传入。
# ```python
# import argparse
# import mindspore.dataset as ds
#
# parser = argparse.ArgumentParser(description='Cache Example')
# parser.add_argument('--num_devices', type=int, default=1, help='Device num.')
# parser.add_argument('--device', type=int, default=0, help='Device id.')
# parser.add_argument('--session_id', type=int, default=1, help='Session id.')
# parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
# args_opt = parser.parse_args()
#
# # apply cache to dataset
# test_cache = ds.DatasetCache(session_id=args_opt.session_id, size=0, spilling=False)
# dataset = ds.Cifar10Dataset(dataset_dir=args_opt.dataset_path, num_samples=4, shuffle=False, num_parallel_workers=1,
# num_shards=args_opt.num_devices, shard_id=args_opt.device, cache=test_cache)
# num_iter = 0
# for _ in dataset.create_dict_iterator():
# num_iter += 1
# print("Got {} samples on device {}".format(num_iter, args_opt.device))
# ```
#
# > 直接获取完整样例代码:[my_training_script.py](https://gitee.com/mindspore/docs/blob/master/tutorials/tutorial_code/cache/my_training_script.py)
# 5.运行训练脚本。
#
# 运行Shell脚本`cache.sh`开启分布式训练:
# ```bash
# $ sh cache.sh cifar-10-batches-bin/
# Got 4 samples on device 0
# Got 4 samples on device 1
# Got 4 samples on device 2
# Got 4 samples on device 3
# ```
# 通过`cache_admin --list_sessions`命令可以查看当前会话中只有一组数据,说明缓存共享成功。
# ```bash
# $ cache_admin --list_sessions
# Listing sessions for server on port 50052
#
# Session Cache Id Mem cached Disk cached Avg cache size Numa hit
# 3392558708 821590605 16 n/a 3227 16
# ```
# 6.销毁缓存会话。
#
# 在训练结束后,可以选择将当前的缓存销毁并释放内存:
# ```bash
# $ cache_admin --destroy_session 3392558708
# Drop session successfully for server on port 50052
# ```
# 7.关闭缓存服务器。
#
# 使用完毕后,可以选择关闭缓存服务器:
# ```bash
# $ cache_admin --stop
# Cache server on port 50052 has been stopped successfully.
# ```
# ## 当前限制
#
# - 当前`MindDataset`、`GraphDataset`、`GeneratorDataset`、`PaddedDataset`和`NumpySlicesDataset`等数据集类不支持缓存。其中,`GeneratorDataset`、`PaddedDataset`和`NumpySlicesDataset`属于`GeneratorOp`,在不支持的报错信息中会呈现“There is currently no support for GeneratorOp under cache”。
# - 经过`batch`、`concat`、`filter`、`repeat`、`skip`、`split`、`take`和`zip`处理后的数据不支持缓存。
# - 经过随机数据增强操作(如`RandomCrop`)后的数据不支持缓存。
# - 不支持在同个数据管道的不同位置嵌套使用同一个缓存实例。
#
# ## 缓存性能调优
#
# 使用缓存服务能够在一些场景下获得显著的性能提升,例如:
#
# - 缓存经过数据增强处理后的数据,尤其是当数据预处理管道中包含decode等高复杂度操作时。在该场景下,用户不需要在每个epoch重复执行数据增强操作,可节省较多时间。
# - 在简单网络的训练和推理过程中使用缓存服务。相比于复杂网络,简单网络的训练耗时占比更小,因此在该场景下应用缓存,能获得更显著的时间性能提升。
#
# 然而,在以下场景中使用缓存可能不会获得明显的性能收益,例如:
#
# - 系统内存不足、缓存未命中等因素将导致缓存服务在时间性能上提升不明显。因此,可在使用缓存前检查可用系统内存是否充足,选择一个适当的缓存大小。
# - 过多缓存溢出会导致时间性能变差。因此,在使用可随机访问的数据集(如`ImageFolderDataset`)进行数据加载的场景,尽量不要允许缓存溢出至磁盘。
# - 在Bert等NLP类网络中使用缓存,通常不会取得性能提升。因为在NLP场景下通常不会使用到decode等高复杂度的数据增强操作。
# - 使用non-mappable数据集(如`TFRecordDataset`)的pipeline在第一个epoch的时间开销较大。根据当前的缓存机制,non-mappable数据集需要在第一个epoch训练开始前将所有数据写入缓存服务器中,因此这使得第一个epoch时间较长。
| docs/programming_guide/source_zh_cn/cache.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# # Punctuations
#
# Some ways to remove punctuations from strings.
#
# Credit/Source:
# - https://www.programiz.com/python-programming/methods/string/translate
# - https://www.tutorialspoint.com/python/string_translate.htm
# - https://stackoverflow.com/questions/34293875/how-to-remove-punctuation-marks-from-a-string-in-python-3-x-using-translate
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# get punctuatiosn from string package
import string
string.punctuation
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# We can use 'maketrans', 'translate' functions from 'str' type.
#
# In Python 2.x, those functions were from:
# from string import maketrans, translate
#
# But in Python 3.x, you can get them directly from 'str' type without having to import.
#
# format:
# maketrans('map this string','to this string', 'remove this string)
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# map aeiou to 12345
intable = "aeiou"
outtable = "12345"
translation = str.maketrans(intab, outtab)
s = "a e i o u hello"
print(s.translate(translation))
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# map aeiou to 12345 and remove 'h'
intable = "aeiou"
outtable = "12345"
remove = 'h'
translation = str.maketrans(intab, outtab,remove)
s = "a e i o u hello"
print(s.translate(translation))
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# remove punctuation
translation = str.maketrans('','',string.punctuation)
s = "hello! what is your name?"
print(s.translate(translation))
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
translator = str.maketrans('', '', string.punctuation)
# split into tokens
tokens = s.translate(translator).split()
print(tokens)
print(s.split())
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# if you want both digits and punctuation
translator = str.maketrans('', '', string.punctuation+string.digits)
# for python2
translator = string.maketrans((string.punctuation+string.digits),' '*42)
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
help(str.maketrans)
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
help(str.translate)
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
"**" in string.punctuation
| *NLP/Punctuation/Removing_Punctuation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dot Dash Plot
#
# How to make the dot-dash plot presented in Edward Tufte’s [Visual Display of Quantitative Information](https://www.edwardtufte.com/tufte/books_vdqi). Based on a JavaScript implementation by [g3o2](https://bl.ocks.org/g3o2/bd4362574137061c243a2994ba648fb8).
# +
import altair as alt
from vega_datasets import data
source = data.cars()
# Configure the options common to all layers
brush = alt.selection(type='interval')
base = alt.Chart(source).add_selection(brush)
# Configure the points
points = base.mark_point().encode(
x=alt.X('Miles_per_Gallon', title=''),
y=alt.Y('Horsepower', title=''),
color=alt.condition(brush, 'Origin', alt.value('grey'))
)
# Configure the ticks
tick_axis = alt.Axis(labels=False, domain=False, ticks=False)
x_ticks = base.mark_tick().encode(
alt.X('Miles_per_Gallon', axis=tick_axis),
alt.Y('Origin', title='', axis=tick_axis),
color=alt.condition(brush, 'Origin', alt.value('lightgrey'))
)
y_ticks = base.mark_tick().encode(
alt.X('Origin', title='', axis=tick_axis),
alt.Y('Horsepower', axis=tick_axis),
color=alt.condition(brush, 'Origin', alt.value('lightgrey'))
)
# Build the chart
y_ticks | (points & x_ticks)
| doc/gallery/dot_dash_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparación del algoritmo Lista de Clusters frente a otros
#
# Esta notebook pretende comparar el algoritmo de **Lista de Clusters** con algoritmos de clustering normalmente utilizados en el aprendizaje automatico. El objetivo es mostrar las características de diferentes algoritmos de clustering en conjuntos de datos que son "interesantes" pero aún en 2D. Si bien estos ejemplos dan cierta intuición acerca de los algoritmos, esta intuición podría no aplicarse a datos dimensionales muy elevados.
# ## Importamos las librerías necesarias
# Si vamos a comparar algoritmos de agrupamiento, necesitaremos algunas cosas; primero algunas bibliotecas para cargar y agrupar los datos, y en segundo lugar algunas herramientas de visualización para que podamos ver los resultados de la agrupación.
# +
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import hdbscan
from list_of_clusters import ListOfClusters
# %matplotlib inline
np.random.seed(0)
sns.set_context('poster')
sns.set_style('white')
sns.set_color_codes()
# -
# ## Generamos los distintos conjuntos de datos para comparar
# +
def make_var_density_blobs(n_samples=750, centers=[[0,0]], cluster_std=[0.5]):
samples_per_blob = n_samples // len(centers)
blobs = [datasets.make_blobs(n_samples=samples_per_blob, centers=[c],
cluster_std=cluster_std[i])[0]
for i, c in enumerate(centers)]
labels = [i * np.ones(samples_per_blob) for i in range(len(centers))]
return np.vstack(blobs), np.hstack(labels)
# Elegimos el tamaño lo suficientemente grande como para ver la
# escalabilidad de los algoritmos, pero no demasiado grande para
# evitar tiempos de ejecución demasiado largos.
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.055)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.065)
blobs = datasets.make_blobs(n_samples=n_samples-200, random_state=8)
noisy_blobs = np.vstack((blobs[0], 25.0*np.random.rand(200, 2)-[10.0,10.0])),\
np.hstack((blobs[1], -1*np.ones(200)))
varying_blobs = make_var_density_blobs(n_samples,
centers=[[1, 1],
[-1, -1],
[1, -1]],
cluster_std=[0.2, 0.35, 0.5])
# Aplicamos transformación a blobs
X, y = datasets.make_blobs(n_samples=n_samples, random_state=170)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
no_structure = np.random.rand(n_samples, 2), None
easy_blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
# Definimos los parámetros para cada algoritmo dado cada conjunto de datos.
default_base = {'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3,
'center_choise':'p1',
'fixed_size': None,
'fixed_radius': 1.0,
'distance_metric':'euclidean'}
clustering_datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,'n_clusters': 2,
'fixed_size': 750, 'center_choise':'p1'}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2,
'fixed_radius': 1.75, 'center_choise':'p3'}),
(noisy_blobs, {'fixed_size': 500, 'center_choise':'p1'}),
(varying_blobs, {'fixed_radius': 1.4, 'center_choise':'p1'}),
(aniso, {'eps': .15, 'n_neighbors': 2, 'fixed_size': 500,
'center_choise':'p4', 'distance_metric':'cityblock'}),
(easy_blobs,{'fixed_size': 500, 'center_choise':'p3'}),
(no_structure, {})]
clustering_names = [
'KMeans', 'AffinityPropagation',
'SpectralClustering', 'AgglomerativeClustering',
'GaussianMixture', 'DBSCAN', 'HDBSCAN', 'ListOfClusters']
# -
# ## Graficamos los grupos generados por cada algoritmo.
# +
# https://matplotlib.org/examples/color/colormaps_reference.html
my_palette = 'tab10'
plt.figure(figsize=(len(clustering_names) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
for i_dataset, (dataset, algo_params) in enumerate(clustering_datasets):
# Actualizamos los parametros con los valores específicos para cada dataset
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# Normalizamos el dataset para una selección de parámetros más sencilla.
# Estandarizamos las características removiendo la media y
# escalando a varianza unitaria.
X = StandardScaler().fit_transform(X)
# Matriz de conectividad para Ward estructurado.
connectivity = kneighbors_graph(X, n_neighbors=params['n_neighbors'],
include_self=False)
# Contruir la conectividad simétrica.
connectivity = 0.5 * (connectivity + connectivity.T)
# Creamos los estimadores para la tarea de clustering.
two_means = cluster.KMeans(n_clusters=params['n_clusters'])
spectral = cluster.SpectralClustering(n_clusters=params['n_clusters'],
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation =\
cluster.AffinityPropagation(damping=params['damping'],
preference=params['preference'])
average_linkage = \
cluster.AgglomerativeClustering(linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'],
connectivity=connectivity)
hdbscanner = hdbscan.HDBSCAN()
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
lc = ListOfClusters(center_choise=params['center_choise'],
fixed_radius=params['fixed_radius'],
fixed_size=params['fixed_size'],
distance_metric=params['distance_metric'])
clustering_algorithms = (
('KMeans', two_means),
('AffinityPropagation', affinity_propagation),
('SpectralClustering', spectral),
('Agglom.Clustering', average_linkage),
('GaussianMixture', gmm),
('DBSCAN', dbscan),
('HDBSCAN', hdbscanner),
('ListOfClusters', lc)
)
for name, algorithm in clustering_algorithms:
# Predecimos la pertenencia a clusters.
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# Graficamos
ax = plt.subplot(len(clustering_datasets), len(clustering_algorithms),
plot_num)
if i_dataset == 0:
if name == 'ListOfClusters':
plt.title(name, size=18, weight='bold')
else:
plt.title(name, size=18)
palette = sns.color_palette(my_palette, np.unique(y_pred).max() + 1)
colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in y_pred]
plt.scatter(X[:, 0], X[:, 1], color=colors, s=10)
if hasattr(algorithm, 'cluster_centers_'):
# Invertimos el orden de los centros y sus colores para que
# el primer centro obtenido quede en frente al graficar.
centers = np.flip(algorithm.cluster_centers_, 0)
center_colors = list(reversed(palette[:len(centers)]))
plt.scatter(centers[:, 0], centers[:, 1], s=50, c=center_colors,\
edgecolors='k', zorder=3)
if (hasattr(algorithm, 'cluster_radius_')):
neg_alpha = 0.65
# Invertimos al igual que con los centros y sus colores para
# el caso de los radios y sus colores.
radius = list(reversed(algorithm.cluster_radius_))
cluster_colors = list(reversed(palette[:len(radius)]))
for pos, r in enumerate(radius):
if algorithm.distance_metric == 'euclidean':
circle = plt.Circle((centers[pos][0],centers[pos][1]),
r, color=cluster_colors[pos],
alpha=0.85-neg_alpha, zorder=1)
ax.add_artist(circle)
elif algorithm.distance_metric == 'cityblock':
diagonal = 2*r
l = diagonal / (2**0.5)
rec_x = centers[pos][0]
rec_y = centers[pos][1] - r
rect = patches.Rectangle(xy=(rec_x, rec_y),
width=l, height=l,
zorder=1, angle=45,
color=cluster_colors[pos],
alpha=0.85-neg_alpha)
ax.add_patch(rect)
neg_alpha = neg_alpha * 0.80
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.85, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| List_of_Clusters_vs_Others.ipynb |