code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Pytorch env
# language: python
# name: pytorchenv
# ---
# + pycharm={"name": "#%%\n"}
import os
import numpy as np
import pandas as pd
from datetime import datetime
import time
import random
#from tqdm.autonotebook import tqdm
#from tqdm.notebook import tqdm
from tqdm import tqdm
#Torch
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
#sklearn
from sklearn.model_selection import StratifiedKFold
#CV
import cv2
# + pycharm={"name": "#%%\n"}
from models.matcher import HungarianMatcher
from models.detr import SetCriterion, PostProcess
# + pycharm={"name": "#%%\n"}
from util import box_ops
from engine import evaluate, train_one_epoch
from datasets.coco_eval import CocoEvaluator
# + pycharm={"name": "#%%\n"}
# Albumenatations
import albumentations as A
import matplotlib.pyplot as plt
from albumentations.pytorch.transforms import ToTensorV2
# -
# for plotting
from torchvision.ops.boxes import box_area
from util.plot_utils import plot_logs
from pathlib import Path
# + [markdown] pycharm={"name": "#%% md\n"}
# Try to plot performance metrics using:
# - the log.txt document
#
# from the latest version of the model
# - on crops
# - from July 1st
# - on 100 epochs
# + pycharm={"name": "#%%\n"}
#location of log.txt output file
outDir = "C:\\Users\\Eva.Locusteanu\\PycharmProjects\\detr\\outDir\\July1st"
# + pycharm={"name": "#%%\n"}
from util.plot_utils import plot_logs
from pathlib import Path
log_directory = [Path(outDir)]
print(log_directory)
# + pycharm={"name": "#%%\n"}
fields_of_interest = ("loss", "mAP",)
plot_logs(log_directory, fields_of_interest)
# + pycharm={"name": "#%%\n"}
fields_of_interest= ('loss_ce', 'loss_bbox', 'loss_giou',)
plot_logs(log_directory, fields_of_interest)
# + pycharm={"name": "#%%\n"}
fields_of_interest = ('class_error', 'cardinality_error_unscaled', )
plot_logs(log_directory, fields_of_interest)
# + pycharm={"name": "#%%\n"}
| Isolate_mAP_error.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import Kriging_testing as KT
import numpy as np
test_obj = KT.KrigingFactors('freyberg_pp.nam','structure.dat','points1.dat','lay1zones.dat','test_factors.dat',1,8,1500,1)
test_obj.allpts.head()
test_obj.pp_df.head()
cp =test_obj.allpts.loc[0]
test_obj.get_interp_points()
test_obj.kriging_weights()
test_obj.allpts.head()
test_obj.write_factor_file()
for line in open('test_factors.dat', 'r').readlines():
print(line.strip())
| misc/kriging/Test_Kriging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <span style="color:Maroon">Hurst Exponent Analysis
# __Summary:__ <span style="color:Blue">Explore the hurst exponent for various window sizes on given data
# Import required libraries
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import hurst as hs
import os
np.random.seed(0)
# User defined names
index = "SSE Composite"
filename = index+".csv"
date_col = "Date"
hurst_windows = [100, 150, 200, 250, 300, 400]
# Get current working directory
mycwd = os.getcwd()
print(mycwd)
# Change to data directory
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Data")
# Read the data
df = pd.read_csv(filename, index_col=date_col)
df.index = pd.to_datetime(df.index)
df.head()
#
#
# ## <span style="color:Maroon">Functions
def Calculate_Hurst(df, var, window_size, ser_type):
"""
Calculates the hurst exponent for the var:variable in df:dataset for given rolling window size
ser_type: gives the type of series. It can be of three types:
'change': a series is just random values (i.e. np.random.randn(...))
'random_walk': a series is a cumulative sum of changes (i.e. np.cumsum(np.random.randn(...)))
'price': a series is a cumulative product of changes (i.e. np.cumprod(1+epsilon*np.random.randn(...))
"""
hurst = [np.nan] * (window_size-1)
var_values = list(df[var])
for i in range(0, len(var_values)-(window_size-1)):
H, c, data = hs.compute_Hc(var_values[i:i+window_size], kind=ser_type, simplified=True)
hurst.append(H)
df['hurst_'+str(window_size)] = hurst
return df
#
#
# ## <span style="color:Maroon">Hurst Exponent time Plots
# Change to Images directory
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Images")
# Calculate hurst exponents for given window sizes
for i in range(0, len(hurst_windows)):
df = Calculate_Hurst(df, 'Adj Close', hurst_windows[i], 'price')
# +
var_names = ['hurst_'+str(x) for x in hurst_windows]
# Plot hurst exponents
plt.figure(figsize=(20,10))
plt.subplot(3,2,1)
plt.plot(df[var_names[0]].dropna(), 'b-', alpha=0.5)
plt.xlabel("Date", fontsize=12)
plt.ylabel("Hurst Exponent", fontsize=12)
plt.title("Window Size: {}".format(hurst_windows[0]), fontsize=16)
plt.grid()
plt.subplot(3,2,2)
plt.plot(df[var_names[1]].dropna(), 'b-', alpha=0.5)
plt.xlabel("Date", fontsize=12)
plt.ylabel("Hurst Exponent", fontsize=12)
plt.title("Window Size: {}".format(hurst_windows[1]), fontsize=16)
plt.grid()
plt.subplot(3,2,3)
plt.plot(df[var_names[2]].dropna(), 'b-', alpha=0.5)
plt.xlabel("Date", fontsize=12)
plt.ylabel("Hurst Exponent", fontsize=12)
plt.title("Window Size: {}".format(hurst_windows[2]), fontsize=16)
plt.grid()
plt.subplot(3,2,4)
plt.plot(df[var_names[3]].dropna(), 'b-', alpha=0.5)
plt.xlabel("Date", fontsize=12)
plt.ylabel("Hurst Exponent", fontsize=12)
plt.title("Window Size: {}".format(hurst_windows[3]), fontsize=16)
plt.grid()
plt.subplot(3,2,5)
plt.plot(df[var_names[4]].dropna(), 'b-', alpha=0.5)
plt.xlabel("Date", fontsize=12)
plt.ylabel("Hurst Exponent", fontsize=12)
plt.title("Window Size: {}".format(hurst_windows[4]), fontsize=16)
plt.grid()
plt.subplot(3,2,6)
plt.plot(df[var_names[5]].dropna(), 'b-', alpha=0.5)
plt.xlabel("Date", fontsize=12)
plt.ylabel("Hurst Exponent", fontsize=12)
plt.title("Window Size: {}".format(hurst_windows[5]), fontsize=16)
plt.grid()
plt.tight_layout()
plt.savefig('Hurst Exponent for different window size for ' + str(index) +'.png')
plt.show()
plt.close()
# -
# __Comments:__ <span style="color:Blue"> Window size 100 gives a volatile Hurst, but the plot for Hurst with window sizes 200, 300 and 400 is very similar. With higher window sizes the trend smoothens
#
#
# ## <span style="color:Maroon">Save the Data
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Data")
df.to_csv(index +"_hurst"+".csv", index=True)
#
#
| Dev/SSE Composite/Codes/02 Hurst Exponent Calculation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # esempio di aggregazione
#
# Dati tutti gli eventi di gioco avvenuti durante la partita, calcolarne il numero in base al tipo di evento, per ogni squadra.
#
# Soluzione:
#
# 1. lettura delle lista degli eventi dal file json
# 2. scansione della lista e conteggio eventi per squadra
# 3. plot dei risultati
# +
import json
# lista di eventi di gioco fornita in formato json
events = json.load(open('worldCup-final.json'))
# -
## esempio di dati per ogni evento
events[0]
# +
## aggregazione
event_count= {}
for event in events:
team = event['teamId']
event_type = event['eventName']
if team not in event_count:
event_count[team]={}
if event_type not in event_count[team]:
event_count[team][event_type] = 1
else :
event_count[team][event_type] += 1
# -
## dati aggregati
event_count
# +
# %matplotlib inline
import matplotlib.pyplot as plt
## la libreria matplotlib è la più semplice da utilizzare per plot veloci e usando strutture dati semplici quali
## il dizionario
team = 'Croatia'
D = event_count[team]
plt.figure(figsize=(20,10))
plt.title("Distribuzione eventi di gioco: %s"%team, fontsize = 20)
plt.bar(range(len(D)), D.values(), align='center')
plt.xticks(range(len(D)), D.keys(), fontsize = 14)
plt.show()
# +
team = 'France'
D = event_count[team]
plt.figure(figsize=(20,10))
plt.title("Distribuzione eventi di gioco: %s"%team, fontsize = 20)
plt.bar(range(len(D)), D.values(), align='center')
plt.xticks(range(len(D)), D.keys(), fontsize = 14)
plt.show()
# -
| 2018/notebooks/qualificazione_sampleAggregation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Advanced Plotting: Beyond matplotlib
# Alright, so our prior charts were plotted using `matplotlib` which helps us see the data, but the charts don't look amazing and aren't interactive at all. What other options do we have? [seaborn](https://seaborn.pydata.org/index.html) is a wrapper the top of `matplotlib` that makes much prettier plots (with a ton of statistical plotting capabilities) and [plotly](https://plot.ly/) is a great cross-platform plotting liberary that we can easil set up. Below we'll use `seaborn` and `plotly` to make some pretty pictures.
# First, let's import some libraries.
# +
import pandas as pd
import datetime as dt
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
# %matplotlib inline
# -
# Since we don't have access to some of the data that we do within JPM, we'll have to generate some data to plot. In this case, we're going to generate an approximation of Russell2k implied volatility and HYG (iTraxx High Yield Bond ETF) prices.
vols = [ 0.3, 0.06 ]
dailyVols = vols / np.sqrt( 252 )
corr = -0.4
covars = [
[ dailyVols[ 0 ] ** 2, dailyVols[ 0 ] * dailyVols[ 1 ] * corr ],
[ dailyVols[ 0 ] * dailyVols[ 1 ] * corr, dailyVols[ 1 ] ** 2 ]
]
randomSeries = np.random.multivariate_normal( ( 0.001, 0 ), covars, 500 ).T
randomSeries
# We've got two return series, but we need to convert them to a time series for what they're meant to represent.
rtyVol = 0.2 * ( 1 + randomSeries[ 0 ] ).cumprod()
hygPrice = 80 * ( 1 + randomSeries[ 1 ] ).cumprod()
# Let's see if they make sense... Often the easiest way to do that is to plot them. Many of the plotting libraries set up to operate / plot a `DataFrame` natively.
df = pd.DataFrame(np.array([rtyVol, hygPrice]).T, columns=["RTY.3m.Proxy.Implied.Vol", "HYG.spot"])
df.head()
df.plot()
# So... problem #1, these two series not similar magnitudes. We need to plot these on difference axes and while we're at it let's make it look a little better.
plt.style.use('seaborn')
df.plot(secondary_y=["HYG.spot"], legend=True)
# If we flip around RTY implied vol, we can see this inverse relationship a bit better.
df["RTY.3m.Proxy.Implied.Vol"] = df["RTY.3m.Proxy.Implied.Vol"] * -1
df.plot(secondary_y=["HYG.spot"], legend=True)
# We can look at the scatter on levels pretty easily with `matplotlib` using `matplotlib.pyplot.scatter`.
plt.scatter( df[ df.columns[ 0 ] ], df[ df.columns[ 1 ] ] )
# It looks like there is a relationship there (there should be, we generated the series with a negative correlation). Let's explore that a bit. As we said before, `seaborn` comes with a bunch of good statistical tools. In fact, it has has quick and easy way to generate a regression plot with `sns.regplot`.
fig, ax = plt.subplots( sharex=True )
sns.regplot( x="HYG.spot", y="RTY.3m.Proxy.Implied.Vol", data=df.diff(), ax=ax )
# Unfortunately, the underlying regression data is not exposed in `seaborn`, so we will need to generate it ourselves using `scipy`.
# +
from scipy import stats
diff = df.diff().dropna()
slope, intercept, rvalue, pvalue, stderr = stats.linregress(diff["HYG.spot"], diff["RTY.3m.Proxy.Implied.Vol"])
print( "R^2 = {r:.3f}".format( r=rvalue ) )
print( 'y = {m:.3f}x {sign} {b:.3f}'.format( m=slope, sign="+" if intercept >= 0 else "-", b=abs(intercept) ) )
# -
# What if I want to interact with the plot.... zoom in, inspect the values, etc. This is where `plotly` shines.
# +
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
import plotly.tools as tls
init_notebook_mode(connected=True)
# Here we can convert our matplotlib object to a plotly object
plotlyFig = tls.mpl_to_plotly(fig)
# Add annotation so you have the regression stats
plotlyFig['layout']['annotations'] = [
dict(
x=18,
y=-2,
showarrow=False,
text='R^2 = {:.3f}'.format( rvalue )
),
dict(
x=18,
y=-2.6,
showarrow=False,
text='y = {m:.3f}x {sign} {b:.3f}'.format( m=slope, sign="+" if intercept >= 0 else "-", b=abs(intercept) )
)
]
iplot(plotlyFig)
# -
# 2d plots are cool but 3d plots.... Below is an example of plotting a vol surface from start to finish.
c = [0.8023,0.814,0.8256,0.8372,0.8488,0.8605,0.8721,0.8837,0.8953,0.907,0.9186,0.9302,0.9419,0.9535,0.9651,0.9767,0.9884,1,1.0116,1.0233,1.0349,1.0465,1.0581,1.0698,1.0814,1.093,1.1047,1.1163,1.1279,1.1395,1.1512,1.1628,1.1744,1.186,1.1977,1.2093]
i = [ dt.datetime(2019,8,2), dt.datetime(2019,8,9), dt.datetime(2019,8,16), dt.datetime(2019,8,23), dt.datetime(2019,8,30), dt.datetime(2019,9,6), dt.datetime(2019,9,20), dt.datetime(2019,10,18), dt.datetime(2019,11,15), dt.datetime(2019,12,20), dt.datetime(2019,12,31), dt.datetime(2020,1,17), dt.datetime(2020,3,20), dt.datetime(2020,3,31), dt.datetime(2020,6,19) ]
d = [ [0.4244,0.4016,0.3796,0.3584,0.3381,0.3187,0.3002,0.2827,0.2662,0.2508,0.2363,0.2229,0.2105,0.1991,0.1888,0.1794,0.171,0.1636,0.157,0.1513,0.1465,0.1425,0.1393,0.1368,0.135,0.1338,0.1331,0.1329,0.133,0.1334,0.1341,0.135,0.136,0.1372,0.1384,0.1396],
[0.4006,0.3777,0.3556,0.3343,0.3139,0.2944,0.2759,0.2583,0.2418,0.2263,0.2118,0.1983,0.1859,0.1745,0.1641,0.1547,0.1463,0.1388,0.1322,0.1265,0.1216,0.1176,0.1143,0.1118,0.11,0.1088,0.1081,0.1078,0.108,0.1084,0.1092,0.1101,0.1112,0.1123,0.1136,0.1149],
[0.3431,0.3257,0.3089,0.2927,0.277,0.2621,0.2477,0.2341,0.2212,0.2089,0.1974,0.1866,0.1765,0.1671,0.1584,0.1504,0.1431,0.1364,0.1303,0.1248,0.1198,0.1154,0.1116,0.1083,0.1055,0.1032,0.1013,0.0998,0.0986,0.0977,0.0971,0.0966,0.0964,0.0963,0.0963,0.0965],
[0.3124,0.298,0.284,0.2705,0.2574,0.2449,0.2328,0.2213,0.2104,0.1999,0.1901,0.1807,0.1719,0.1637,0.156,0.1488,0.1421,0.136,0.1304,0.1253,0.1206,0.1165,0.1129,0.1098,0.1072,0.1049,0.1031,0.1015,0.1003,0.0994,0.0988,0.0983,0.098,0.0979,0.0978,0.0979],
[0.2955,0.283,0.2708,0.259,0.2475,0.2365,0.2259,0.2158,0.206,0.1968,0.1879,0.1795,0.1716,0.1641,0.1571,0.1505,0.1443,0.1385,0.1332,0.1284,0.124,0.1201,0.1167,0.1137,0.1111,0.1089,0.1071,0.1056,0.1044,0.1034,0.1027,0.1022,0.1019,0.1018,0.1017,0.1018],
[0.2866,0.2752,0.264,0.2532,0.2427,0.2326,0.2228,0.2134,0.2044,0.1958,0.1876,0.1798,0.1724,0.1653,0.1587,0.1524,0.1465,0.141,0.1359,0.1313,0.1271,0.1233,0.12,0.1171,0.1145,0.1124,0.1106,0.1091,0.1079,0.1069,0.1062,0.1057,0.1054,0.1052,0.1051,0.1051],
[0.2729,0.2632,0.2538,0.2446,0.2357,0.227,0.2187,0.2106,0.2028,0.1954,0.1882,0.1813,0.1748,0.1685,0.1626,0.1569,0.1516,0.1465,0.1418,0.1376,0.1336,0.1301,0.127,0.1242,0.1218,0.1197,0.1179,0.1164,0.1152,0.1143,0.1135,0.113,0.1126,0.1124,0.1122,0.1122],
[0.2548,0.2473,0.24,0.2329,0.226,0.2192,0.2126,0.2063,0.2001,0.1941,0.1883,0.1827,0.1773,0.1721,0.167,0.1622,0.1576,0.1532,0.149,0.1452,0.1416,0.1383,0.1354,0.1327,0.1303,0.1281,0.1262,0.1246,0.1232,0.122,0.121,0.1202,0.1195,0.119,0.1186,0.1183],
[0.2482,0.242,0.2359,0.2299,0.2241,0.2184,0.2128,0.2074,0.2021,0.1969,0.1919,0.187,0.1823,0.1777,0.1733,0.169,0.1649,0.1608,0.157,0.1535,0.1501,0.1471,0.1442,0.1415,0.1391,0.1369,0.135,0.1332,0.1316,0.1303,0.1291,0.128,0.1271,0.1263,0.1257,0.1252],
[0.2331,0.2278,0.2227,0.2176,0.2127,0.2078,0.2031,0.1984,0.1939,0.1895,0.1851,0.1809,0.1768,0.1728,0.1689,0.1652,0.1615,0.158,0.1547,0.1516,0.1486,0.1459,0.1433,0.1409,0.1387,0.1367,0.1349,0.1332,0.1317,0.1303,0.1291,0.128,0.127,0.1261,0.1254,0.1247],
[0.2313,0.2262,0.2212,0.2163,0.2115,0.2068,0.2022,0.1977,0.1932,0.1889,0.1847,0.1806,0.1766,0.1727,0.1689,0.1653,0.1617,0.1582,0.155,0.152,0.1491,0.1464,0.1438,0.1415,0.1393,0.1373,0.1354,0.1337,0.1322,0.1308,0.1296,0.1284,0.1274,0.1266,0.1258,0.1251],
[0.2302,0.2253,0.2206,0.216,0.2114,0.2069,0.2026,0.1983,0.1941,0.19,0.186,0.1821,0.1783,0.1745,0.1709,0.1674,0.164,0.1607,0.1576,0.1546,0.1518,0.1492,0.1467,0.1444,0.1423,0.1403,0.1384,0.1367,0.1351,0.1337,0.1324,0.1313,0.1302,0.1293,0.1285,0.1277],
[0.229,0.2249,0.2209,0.2169,0.213,0.2091,0.2054,0.2017,0.1981,0.1946,0.1911,0.1877,0.1845,0.1812,0.1781,0.175,0.172,0.1691,0.1664,0.1637,0.1611,0.1587,0.1563,0.1541,0.152,0.15,0.148,0.1462,0.1445,0.143,0.1415,0.1401,0.1388,0.1376,0.1365,0.1355],
[0.2289,0.2249,0.2209,0.2171,0.2133,0.2096,0.2059,0.2023,0.1988,0.1953,0.192,0.1887,0.1855,0.1823,0.1793,0.1763,0.1733,0.1705,0.1678,0.1651,0.1626,0.1602,0.1579,0.1557,0.1535,0.1515,0.1496,0.1478,0.1461,0.1445,0.143,0.1415,0.1402,0.139,0.1379,0.1368],
[0.2239,0.2207,0.2175,0.2143,0.2112,0.2082,0.2051,0.2022,0.1993,0.1964,0.1936,0.1909,0.1882,0.1855,0.1829,0.1804,0.1779,0.1755,0.1731,0.1708,0.1685,0.1663,0.1642,0.1622,0.1601,0.1582,0.1563,0.1545,0.1527,0.151,0.1494,0.1478,0.1464,0.1449,0.1436,0.1423]
]
df2 = pd.DataFrame(d, columns=c, index=i)
df2.head()
# +
import plotly.graph_objs as go
fig = go.Figure(
data=[ go.Surface(
z=df2.values.tolist(),
y=df2.columns.values,
x=df2.index.astype(str).values.tolist()
)],
layout=dict(
title = 'Vol Surface',
autosize = True,
width = 900,
height = 700,
margin = dict(
l = 65,
r = 50,
b = 65,
t = 90
),
scene = dict(
aspectratio = dict(
x = 1,
y = 1,
z = 0.667
)
)
))
go.FigureWidget(fig)
# -
| notebooks/7_advanced_plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="QiPRPofpZ4ka"
# + [markdown] id="IAcS0Ia20ktI"
# ## Lecture 1: The Very Basics of Financial Market
#
# We will discuss how to perform the most basic analysis of stock market.
#
# Starting with how to process price data, we will describe the statistical properies of equity returns. We will then introduce the Sharpe Ratio as very basic measure of portfolio performance and how volatility scaling can improve the performance of a buy-and-hold strategy.
# + [markdown] id="G63BgwxK0kw4"
# #### Price data
#
#
# Price data is determined by two charactereistics, frequency of data and types of data. Frequency of data can ranged from microseconds data (tick level) to end-of-date data (daily resolution). We will focus on data that have a minute resoluion or above. Major types of data include bar data, trade data and quote data.
#
# In an exchange, quote data (Orderbook) is displayed for a stock in real time which has the bid and ask price (with size) submitted by different market participants. Market makers are responsible for providing liquidity to different traded assets, by placing quotes continuously. A trade is made if someone is willing the buy the security at the ask price (or sell the security at the bid price). Trade data is simply a collection of all the trades of a stock made at different exchanges, along with delayed data reported from dark pools. Bar data is then aggregrated using the trade price and volume over an interval, which usually is a minute, an hour and a day.
#
# A typical bar data would consists of 5 data fields: Open, High, Low, Close and Volume (OHLCV). For free data sources you can found online, volume data will often has the most discrepencies as off-exchange trades are often ignored/miscounted.
#
# The construction and processing of market data feeds invovles a lot of technical knowledge that is essential for market makers and quants reponsible for trade execution, but not too important for long-term investors.
#
# We will first illustrate some basic concepts in finance using the most famous ETF, SPDR S&P 500 (SPY) which keep tracks of the most important stock market index S&P 500 in the world. This ETF invest in the 500 biggest stocks listed in the US, weighted by market-cap. (The exact rules to construct S&P 500 index is more complicated than this)
# + id="sOv7aiU4y5LD" colab={"base_uri": "https://localhost:8080/"} outputId="2e7dad2c-6099-4386-a563-c82e6510b013"
# ! pip install yfinance
import yfinance as yf
import numpy as np
import pandas as pd
# + id="UGIWsMQn08hm"
selected_etf = yf.Ticker("SPY")
hist = selected_etf.history(period="max")
# + [markdown] id="bjLMiCcl2RgQ"
# As stocks can pay dividend and have splits, we usually use adjusted price to perform statistical analysis. From the adjusted price, we can derive the total return of the stock (which is the sum of price return and dividends, reinvested).
#
# Always check whether dividend and splits are adjusted before running your models.
#
# Here I provide two examples of why data management is important and how listed companies can use that as their own advantage.
#
# - Example 1: Stock splits
# A stock split in theory do not change the underlying value of the company, but for retail investors looking at the price at their apps, it does look cheaper, so people buy more. Tesla has undergone stock splits more than twice in the last 5 years and stock price rises significantly after splits.
#
# - Example 2: Ticker name change
# GSX Techedu (GOTU) changes its ticker from GSX even without any major changes such as M&A or corporate restructuring. For retail investors, it appears as new stock for them. It also distances itself from the negative press as Muddy Waters and others are holding a short position against them. Search engine results for GSX and GOTU are different. It seems to me like a form of Search Engine Optimisation.
# After the name change, some of the free apps that most retail traders use to keep track of stock prices cannot recognise this change and return incomplete historical data.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="PFnk5yDnzRnA" outputId="1388c871-60df-4d60-9836-d84a272729ba"
hist.tail()
# + [markdown] id="neOtKTrg3RUp"
# Stock price are not stationary as they tend to grow over time. (Exception: Japanese stock market).
#
# For building models, it is better to use the log-return instead. Log-return is better than percentage return since it is additive over different time periods.
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="ADvWF4nA2K27" outputId="a5e4d640-74df-4eb3-b421-7b1b2a300d05"
hist['Close'].plot()
# + id="0ciVXp6-62oL"
## Calculating log-returns
hist['log_return'] = np.log(hist['Close']) - np.log(hist['Close'].shift(1))
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="I4-VbUNQ62w_" outputId="b1888be1-a792-4fff-958d-0a9579ffc3e2"
hist['log_return'].plot()
# + [markdown] id="3ihaCXb48JP6"
# #### Statistical properties of stock market log-returns
#
# - It has a positive mean, which is bigger than the average 10-year Treasury yield. This is called the risk premium of equity market
#
# - It has a negative skew, which demonstrates the fact the stock market experiences sharp drawdowns over a short period (2008,2020)
#
# - It has a kurtosis greater than 3, suggesting it is more fat-tail than a normal distribution.
#
# - The assumption of log-return following Gaussian distribution(Geometric Brownian Process) is the foundation of Black-Scholes Option pricing model.
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="nZoYtbBl7gDQ" outputId="2dc15e89-1208-4c22-a33e-e7636c316a42"
hist['log_return'].plot.hist(bins=500)
# + colab={"base_uri": "https://localhost:8080/"} id="qfGLoJP57gNr" outputId="038f634b-256a-4cfb-970e-746928795447"
hist['log_return'].describe()
# + colab={"base_uri": "https://localhost:8080/"} id="EUDSitJq8Nxq" outputId="dec36843-2de9-4df5-f2f2-7cec0fd4c302"
hist['log_return'].skew()
# + colab={"base_uri": "https://localhost:8080/"} id="P3n8u5Ds8sP7" outputId="02947079-930a-4766-d0c3-e2bc4a382232"
hist['log_return'].kurtosis()
# + [markdown] id="J8M3LZvw9Tdq"
# ## How to analyse returns in stock market
#
# We can separate the return of stock market into two parts. One as the close-to-open return (overnight return) and open-to-close return (intraday return)
#
# We can see intraday return has a mean close to zero, suggesting the mean-reversion nature of stock price during market hours.
#
# Most of the stock market return, for a buy-and-hold strategy in the long run, comes from holding stocks overnight. This can be considered as compensation of the risk to hold stocks overnight.
#
# Holding stocks over the weekend and holiday will provide a similar risk premium.
#
#
# Calendar effects in stock market are often an artificat of human habits. Examples include stock return before and after the tax filing, end of month/quarter rebalancing of mutual funds, expiry of monthly and quarterly options.
#
# + id="0fNLlirQ9TsL"
hist['intraday_return'] = np.log(hist['Close']) - np.log(hist['Open'])
hist['overnight_return'] = np.log(hist['Open']) - np.log(hist['Close'].shift(1))
# + colab={"base_uri": "https://localhost:8080/"} id="iDxUX7lR9TzK" outputId="ae24410c-740a-41fd-b3a3-0b6bb4d977a5"
hist['intraday_return'].describe()
# + colab={"base_uri": "https://localhost:8080/"} id="DeLlvVDL9ygL" outputId="b7b83972-ea84-4b22-a19b-e152f9c2fb43"
hist['overnight_return'].describe()
# + [markdown] id="aHj9sY5vDoyb"
# ## Volatility Scaling
#
# We can measure the performance of a trading strategy by the sharpe ratio, which is ratio of excess return over to the volatility.
#
# Sharpe_Ratio = $\frac{r-r_f}{\sigma}$, where r is the return of the portfolio and $\sigma$ the standard deviation of the return of the portfolio.
#
# Volatility is detrimental to the long-term growth of capital as a strategy down 20% needs a 25% growth to recover.
#
# By adjusting our stock holdings so that portfolio is at a constant volatility, so that we buy more during low volatility times and buy less during high volatility times. The Sharpe ratio can be improved compared to a constant buy-and-hold strategy.
#
# Volatility plays an important role in portfolio construction and risk management. Risk parity, a famous hedge fund strategy aims to hold constant volatility exposure to different assets in the portfolio, rather than equal weighted.
#
#
# + id="IyzO1TBkDrc6"
selected_etf = yf.Ticker("SPY")
hist = selected_etf.history(period="max")
hist['return'] = hist['Close'].pct_change()
hist['volatility'] = hist['return'].rolling(21).std()
hist['volatility_mean'] = hist['volatility'].rolling(21*12).mean()
# + id="1K0cAMMZEnZk"
# Weight portfolio by comparing the current 1-month volatility with that of the most recent year
hist['vol_adj_return'] = hist['return'] / hist['volatility'] * hist['volatility_mean']
hist['vol_port_size'] = hist['volatility_mean'] / hist['volatility']
# + colab={"base_uri": "https://localhost:8080/"} id="wx5_GuytleBT" outputId="18a42081-ce7b-48d6-8904-fd82806bd172"
hist['return'].mean() / hist['return'].std() * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/"} id="3DvKPnaGDrl6" outputId="9f9ee02e-b553-4519-f492-7fbf6724030a"
## Improvement of Sharpe ratio of constant vol strategy
hist['vol_adj_return'].mean() / hist['vol_adj_return'].std() * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="pImXy0MekkCI" outputId="f6a652a0-2084-41c9-f208-3d75e69cbf10"
(1+hist['vol_adj_return']).cumprod().plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="gKFZLhyKnBb9" outputId="eea33ad7-3379-4f2e-8603-95726e29daa7"
(1+hist['return']).cumprod().plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="nzh0S2t1oe17" outputId="559d6a36-c15c-4015-93ae-ad45b25e35be"
hist['vol_port_size'].plot()
# + [markdown] id="IwvVaduupBeY"
# To carry out the above volatility adjusted strategy, you need to have access to leverage (up to 3x).
# + id="p9PMXtbsofEH"
selected_etf = yf.Ticker("EEM")
hist = selected_etf.history(period="max")
# + id="MitbFIhQpPcp"
hist['return'] = hist['Close'].pct_change()
hist['volatility'] = hist['return'].rolling(21).std()
hist['volatility_mean'] = hist['volatility'].rolling(21*12).mean()
# Weight portfolio by comparing the current 3-month volatility with that of the most recent year
hist['vol_adj_return'] = hist['return'] / hist['volatility'] * hist['volatility_mean']
hist['vol_port_size'] = hist['volatility_mean'] / hist['volatility']
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="tgXnlwiHpPf0" outputId="955ddb68-3738-4f73-b5c4-707bbdcc8ccb"
(1+hist['vol_adj_return']).cumprod().plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="mhb4S5bUpPjG" outputId="997ac20e-e584-4774-c24a-be7a06034761"
(1+hist['return']).cumprod().plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="FJTTRUTHpPxc" outputId="88d2b602-60f3-4ef2-ce6e-65d638b7fed4"
hist['vol_port_size'].plot()
# + [markdown] id="s1A_y4Jx1krd"
# The above strategy also applies to EM stock market. As long as the ETF will generate positive return in the long run, volatility scaling can improve sharpe ratio and return of strategy.
#
# + [markdown] id="96TuDKjT-rV5"
# ## Recommended readings
#
# - Algorithmic Trading: Winning Strategies and Their Rationale, <NAME>
#
# - Advances in Financial Machine Learning, <NAME>
#
# - A Complete Guide to the Futures Market: Technical Analysis, Trading Systems, Fundamental Analysis, Options, Spreads, and Trading Principles (Wiley Trading), <NAME>
# + [markdown] id="lalSK3gkEuir"
# ## Lecture 2 Market Regimes and Volatility
#
# Volatility is undesirable since they erode long term return of the portfolio. Investors are looking for ways to reduce portfolio volatility. For asset managers, holding government bonds and derivatives(put options) are common means to hedge against equity holdings they have.
#
# A put option can be thought as an insurance policy in a informal way. From this perspective, asset managers buy put options (in reality put spreads or other more sophiscated strategies) to reduce drawdown in portfolio during market distress.
#
# The benefit of reducing drawdown and thus volatility in portfolio is that it allows rebalancing after market crash. Investors can sell their bonds or gains from put options to buy cheap stocks.
#
# In this lecture, we will show that the buy-and-hold strategy is inherent a short volatility strategy, which means it benefits when market volatility is low/falling.
#
# + [markdown] id="-j-lilF2GRFM"
# ### Fear Gauge VIX
#
# VIX is considered as a fear gauge of global market.
#
# VIX is derived from the implied volatility of front-month options of SPX index.
#
# It is not possible to invest in VIX directly. There are volatility ETFs which keep tracks of the VIX Futures, where traders buy and sell to express their views on market volatility. VXX is an example of volatility ETF that are widely traded by retail.
#
# Due to rollover costs, VXX will generate a much worse return than VIX.
#
# A crucial feature is that holding VIX futures, and similarly buying Put Options on SPY generates a negative return in the long run.
#
# Considering the correlation between VXX (Long volatility) and SH (Short SPY) we conclude the long volatility strategies are inherently shorting the market.
#
# Assuming we can short VXX (that is short volatility) without additional borrowing cost, it will have a high correlation with the market return (long SPY). This is why we call buy-and-hold equity as inherenetly a short volatility strategy.
#
# Since buy-and-hold VXX will simply cancels the return from holding SPY, therefore VXX alone is not a good portfolio hedge.
#
#
#
# + id="4d9aJTtREt8q"
## VIX index, the fear gauge of the global equity market
selected_etf = yf.Ticker("^VIX")
# + id="Ty8SobzXE0Ly"
vixhist = selected_etf.history(period="max")
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="82D-txXkGBRy" outputId="3f0ad6d8-6f6a-4a4d-cb63-a46a06a6f26a"
vixhist['Close'].loc['2018-01-01':].plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="TmQ6NUdiGBU4" outputId="f2f22044-d1fe-423b-82f3-5a306e3956ec"
selected_etf = yf.Ticker("VXX")
vxxhist = selected_etf.history(period="max")
vxxhist['Close'].loc['2018-01-01':].plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="dWFMm0AcHlCf" outputId="549d60c7-5fa7-4b3e-dd8e-aa84eb3d4e59"
selected_etf = yf.Ticker("SH")
shhist = selected_etf.history(period="max")
shhist['Close'].loc['2018-01-01':].plot()
# + colab={"base_uri": "https://localhost:8080/"} id="tWIR_qI6H9aa" outputId="319689f5-6c9f-4f45-d3f8-9566de6c9264"
## Very high correlation between long volatility (UVXY) and short equity (SH)
np.corrcoef(shhist['Close'].loc['2020-01-01':],vxxhist['Close'].loc['2020-01-01':])[0,1]
# + [markdown] id="LNeYsYpawDBQ"
# There are more than one kind of volatility ETFs
#
# - VXX keep track of the S&P 500 VIX Short-Term Futures Index Total Return
#
# - VXZ keep track of S&P 500 VIX Mid-Term Futures Index.
#
# VXZ has a less significant contango effect than VXX, but keep track of the spot VIX less well.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="FIryVxYgH9jT" outputId="4dba3d7b-44da-43ef-98f5-6b0b0384fac1"
selected_etf = yf.Ticker("VXZ")
vxzhist = selected_etf.history(period="max")
vxzhist['Close'].loc['2018-01-01':].plot()
# + [markdown] id="HXxO80AbYhOk"
# In the following lectures we will introduce different strategy that aims to reduce portfolio drawdown during market drawdowns
#
# - Commodities Trend Following (Uncorrelated return during demand and supply shocks)
# - Risk Parity (Leveraged bets on interest rates going down)
# - Active Long Volatility (Time when to buy call/put options)
#
#
# + [markdown] id="89cgzzZOQxwr"
# ### Example of Market Regime model
#
# We will provide a very simple example of market regime model using Hidden Markov Model, which is equivalent conditioning stock returns on implied volatility (VIX).
#
# In the industry, much more sophiscated models are used. TwoSigma builds a GMM model which classify market into four different states based on the return of 17 risk-premium factors. More details will be provided in Lecture 5
# + colab={"base_uri": "https://localhost:8080/"} id="gUs5MTByVGqu" outputId="c91da2e3-a87c-4eb4-c2b4-8aa49bc8dccd"
# ! pip install yfinance
# ! pip install hmmlearn
# + id="m_40ji2fQ6nA"
from hmmlearn.hmm import GaussianHMM
import yfinance as yf
import numpy as np
selected_etf = yf.Ticker("SPY")
spyhist = selected_etf.history(period="max")
spyhist['Return'] = spyhist['Open'].pct_change()
trainrets = spyhist.loc['1996-01-01':'2017-01-01']
testrets = spyhist.loc['2018-01-01':'2022-02-25']
# + id="ggCPI4wwRMcS"
hmm_model = GaussianHMM(
n_components=2, covariance_type="full", n_iter=1000
).fit(trainrets['Return'].values.reshape(-1, 1))
# + colab={"base_uri": "https://localhost:8080/"} id="iczWj-ZHVtqd" outputId="9dd2a5d9-b91a-4863-a7a8-b437c2672b1c"
testrets['State'] = hmm_model.predict(testrets['Return'].values.reshape(-1, 1))
testrets['Filtered'] = testrets['State'] * testrets['Close']
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="IouNlEACQySv" outputId="71b439ce-be12-4ac8-f2ea-13002c4df750"
testrets['State'].loc['2020-01-01':].plot()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="ndyAi0xRV8m2" outputId="9d22d7e0-12cc-456f-acb1-e6d3066728fa"
testrets['Filtered'].loc['2020-01-01':].plot()
# + colab={"base_uri": "https://localhost:8080/"} id="yKwTe2weV8wX" outputId="75b5fb6e-6287-413e-c662-4394a44a8383"
## State 0 is bear market
## State 1 is bull market
testrets.groupby('State').mean()['Return']
# + colab={"base_uri": "https://localhost:8080/"} id="35lv3CNAYzW7" outputId="7b285e6a-b011-461e-ddb2-b2d4e5d46e29"
testrets.groupby('State').std()['Return']
# + [markdown] id="jqTBNuCqfOgc"
# Can we obtain a similar model by cosidering the value of VIX?
# + id="L3SV4y_Ddaa9"
selected_etf = yf.Ticker("SPY")
spyhist = selected_etf.history(period="max").loc['1996-01-01':'2022-01-01']
selected_etf = yf.Ticker("^VIX")
vixhist = selected_etf.history(period="max").loc['1996-01-01':'2022-01-01']
spyhist['Return'] = spyhist['Close'].pct_change()
spyhist['VIX'] = vixhist['Close']
spyhist['VIX_State'] = np.where(spyhist['VIX']<25,1,0)
trainrets = spyhist.loc['1996-01-01':'2015-01-01']
testrets = spyhist.loc['2018-01-01':'2022-01-01']
# + colab={"base_uri": "https://localhost:8080/"} id="dm8pdx5Edae3" outputId="5885e11c-0d2e-44c8-ba90-5121d9a23d23"
testrets.groupby('VIX_State').mean()['Return']
# + colab={"base_uri": "https://localhost:8080/"} id="UDZHcdCnW2E6" outputId="e8a5d6d6-21f6-4602-e6b2-ea4589256c43"
testrets.groupby('VIX_State').std()['Return']
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="vXe0NVDzdah-" outputId="289b2d06-2d9c-4676-f684-b893e324bcaf"
testrets['Filtered'] = testrets['Close'] * testrets['VIX_State']
testrets['Filtered'].loc['2018-01-01':].plot()
# + [markdown] id="LQVU1JYDXSJS"
# Splitting the test period by the value of VIX provides a similar model as HMM. It suggests machine learning methods, applied on price data only might not outperform simple models based on econometrics.
#
#
# + [markdown] id="cUjFAQA8KcRn"
# Changing the number of of hidden states in HMM model. The returns are clustered in three different states.
#
# 1. Low volatility states with positive return
# 2. Medium volatility states with near zero return
# 3. High volatility states with negative return
#
# + id="H4Vgt8lKV8-l"
hmm_model = GaussianHMM(
n_components=3, covariance_type="full", n_iter=1000
).fit(trainrets['Return'].dropna().values.reshape(-1, 1))
# + colab={"base_uri": "https://localhost:8080/"} id="UdaIkykerxQn" outputId="73f41c06-710d-4745-eaa7-6129cce10630"
testrets = spyhist.loc['2018-01-01':'2021-12-25']
testrets['Shifted_Return'] = testrets['Return'].shift(-1)
testrets['State'] = hmm_model.predict(testrets['Return'].values.reshape(-1, 1))
testrets['Filtered'] = testrets['State'] * testrets['Close']
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="eUDFPquPrxY4" outputId="fda02e6d-6ca6-4cac-e513-f5c9079af205"
testrets['State'].plot()
# + colab={"base_uri": "https://localhost:8080/"} id="LdWbBGkwsMUl" outputId="19912763-b9a7-40dc-9f72-51d0329b510e"
testrets.groupby('State').mean()['Shifted_Return'] * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/"} id="HjUfgEySsMgF" outputId="c96d636a-f2a9-4b5b-bbfd-4525c2b081d5"
testrets.groupby('State').std()['Shifted_Return']
# + colab={"base_uri": "https://localhost:8080/"} id="at2xVs1nrxcR" outputId="bcdad5c5-ae06-41a5-f2d5-5d614f177bfc"
testrets.groupby('State').count()['Return']
# + id="qUUDfSp-Fcep"
# + [markdown] id="T7A6jVVUHReq"
# ## Recommended readings
#
# - Market Regime Identification Using Hidden Markov Models https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3406068
#
# - Regime-Switching Factor Investing with Hidden Markov Models https://www.mdpi.com/1911-8074/13/12/311/htm
#
# - A Machine Learning Approach to Regime Modeling https://www.twosigma.com/articles/a-machine-learning-approach-to-regime-modeling/
#
# + [markdown] id="_D4UaX5zZ-jH"
# ## Lecture 3 Portfolio Optimisation
#
# Examples of portfolio construction
#
# - 60/40
# - mean-variance portfolio optimisation
# - Hierarchical Risk Parity (HRP) portfolio
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="tLryX0RZ2lJG" outputId="0d96e71f-c827-4ac3-bfed-751b4b3a7a1c"
# ! pip install yfinance
import yfinance as yf
import numpy as np
import pandas as pd
# + id="LpoUfR-9I2dc"
selected_etf = yf.Ticker("TLT")
tlthist = selected_etf.history(period="max")
selected_etf = yf.Ticker("SPY")
spyhist = selected_etf.history(period="max")
# + id="kil8XD4rJJW9"
tlthist['return'] = tlthist['Close'].loc['2008-01-01':].pct_change()
spyhist['return'] = spyhist['Close'].loc['2008-01-01':].pct_change()
# + id="JRG4CHy7I2mj"
# 60/40 portfolio
classic = 0.6 * spyhist['return'] + 0.4 * tlthist['return']
classic = classic.dropna()
# + id="FUpRRKqGI26D"
portfolio_classic = (1 + classic).cumprod()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="zOiQk-kcJilN" outputId="0828db24-bf45-4925-fcab-95dc7f1c6423"
portfolio_classic.plot()
# + colab={"base_uri": "https://localhost:8080/"} id="rU3DdFVOJlPe" outputId="3cc6a1d4-920b-4f79-fe39-eca3c4a1b50c"
classic.mean() / classic.std() * np.sqrt(252)
# + [markdown] id="W4p3hn4pJudp"
# The sharpe ratio of 60/40 portfolio is better than the buy and hold portfolio and volatility-scaled portfolio of SPY. Over the last 20 years, bond is a successful hedge for stocks.
#
# + colab={"base_uri": "https://localhost:8080/"} id="nMsm-u_GJ_B5" outputId="4b7d4b52-d090-4d43-8eaf-785cf934c32b"
# ! pip install PyPortfolioOpt
# + id="vHpxOR-2J_Hh"
from pypfopt import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
# + [markdown] id="7Ez9OqFqDkjg"
# We will demonstrate a very basic example of Mean-variance optimization using different ETFs
#
# ETF selection
# - SPY: US Stocks 500 (Core)
# - QQQ: US Tech Stocks 100 (Growth)
# - TLT: US 20Y+ Treasuries (Interest Rates)
# - LQD: US Investment Grade Corp Bonds (Credit)
# - GLD: Gold (Precious Metals)
# - DBC: Invesco DB Commodity Index Tracking Fund (Commodities)
# + id="fYsgbkp-Beem"
prices = list()
ETF_list = ['SPY','QQQ','TLT','LQD','GLD','DBC']
for ETF in ETF_list :
selected_etf = yf.Ticker(ETF)
etfhist = selected_etf.history(period="max")
prices.append(etfhist['Close'].loc['2005-01-01':])
# + id="dK2eVI7sBepC"
merged_prices = pd.DataFrame(pd.concat(prices,axis=1))
merged_prices.columns = ETF_list
# + id="diQufb6XJ_RK"
from pypfopt.expected_returns import mean_historical_return
from pypfopt.risk_models import CovarianceShrinkage
mu = mean_historical_return(merged_prices.loc['2008-01-01':])
S = CovarianceShrinkage(merged_prices).ledoit_wolf()
# + id="adPboLWMK598"
from pypfopt.efficient_frontier import EfficientFrontier
ef = EfficientFrontier(mu, S)
weights = ef.max_sharpe()
# + id="z7L5Ry7zK6HT" colab={"base_uri": "https://localhost:8080/"} outputId="c0c466dd-475f-4e11-8c35-e2bffaeca866"
cleaned_weights = ef.clean_weights()
cleaned_weights
# + colab={"base_uri": "https://localhost:8080/"} id="yg1h9qr7CpIM" outputId="13187dcd-01a9-476d-e2e9-d7b75794111c"
ef.portfolio_performance(verbose=True)
# + [markdown] id="Km2a-3GEDhp3"
# Lets use a different portfolio optimisation method, Hierarchical Risk Parity (HRP) portfolio.
#
# Risk Parity portfolio tends to overweights bonds (LQD,TLT).
# + id="d8IOG6JQCpR4"
from pypfopt.hierarchical_portfolio import HRPOpt
# + id="JyR7mdh_EapC"
ef = HRPOpt(merged_prices.loc['2008-01-01':].pct_change(), S)
# + colab={"base_uri": "https://localhost:8080/"} id="T2s8vobPErFk" outputId="09e06492-1148-4a2d-effb-ac28642b8d03"
ef.optimize()
# + colab={"base_uri": "https://localhost:8080/"} id="7VhPDaE1E5zx" outputId="7c0015c3-c8eb-4c81-e3aa-0882574543b4"
ef.portfolio_performance()
# + id="pz7UaVEnIerp"
# + [markdown] id="bX2wwULYGbZ7"
# We can also change the optimisation criteria for Efficiet Frontier to penalise downside volatility only. For long-only portfolios, it is often better to use Sortino Ratio which penalise downside volatility only.
# + id="ilVnFuSiGauN"
from pypfopt import expected_returns, EfficientSemivariance
# + colab={"base_uri": "https://localhost:8080/"} id="IycU26QcGa0X" outputId="792f7557-dbe6-463a-aec0-30f8ffb8101a"
mu = expected_returns.mean_historical_return(merged_prices.loc['2008-01-01':])
historical_returns = expected_returns.returns_from_prices(merged_prices.loc['2008-01-01':])
es = EfficientSemivariance(mu, historical_returns)
es.efficient_return(0.1)
# + colab={"base_uri": "https://localhost:8080/"} id="r3TEd7LEGwJ5" outputId="255e92c8-eaaf-4a03-a7c4-05342118a397"
weights = es.clean_weights()
print(weights)
es.portfolio_performance(verbose=True)
# + id="HeddyUyOHBNa"
target_portfolio = list()
for target_return in [0.05,0.1,0.15]:
es = EfficientSemivariance(mu, historical_returns)
es.efficient_return(target_return)
weights = es.clean_weights()
weights['Sortino'] = es.portfolio_performance()[-1]
weights['Target'] = target_return
target_portfolio.append(weights)
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="FPUTMBDIHnnm" outputId="a9b20038-cb27-4ccc-f0f4-1d2a2b28ebc7"
pd.DataFrame(target_portfolio)
# + [markdown] id="CSaM1P4-LEDh"
# There are other portfolio optimisation methods such as Black-Litterman which can incoporate our guess on portfolio returns
# + [markdown] id="TjVGl7Kd3kxY"
#
# #### Volatility Scaling is what you need for portfolio optimisation
#
# We will demonstrate a very simple but effective way to construct portfolio which can be applied to individual assets and strategies.
#
# We decide a lookback period (252 days) to calculate the rolling volatility of each asset. For a given risk level (15% p.a.), we scale the position we will take for each asset so that we will have equal risk in each asset, before taking into account of correlation between strategies.
#
# We then take the simple average over these strategies to form our portfolio.
#
# We can then calculate the overall volatility of the portfolio, which will be lower than the given risk level due to negative correlation between strategies. We can then leverage the whole portfolio to the target risk level.
#
# While this method is different from risk parity, as it does not explicitly consider the correlation between assets. The two-step optimisation process is more robust than risk parity when correlation structure between stratgies/assets changes.
#
# + id="d8hqD7d5LPyI"
merged_returns = merged_prices.pct_change().dropna()
merged_std = merged_returns.rolling(252).std() * np.sqrt(252)
# + id="789QBJbq2eOM"
merged_pos = 0.2 / merged_std
# + id="IEVFqD7Z2eRV"
merged_portfolio = merged_pos * merged_returns
# + id="WRM6BNOP2eUr"
portfolio = (1 + np.mean(merged_portfolio.loc['2008-01-01':,],axis=1)).cumprod()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="o1dKFWNn31Yp" outputId="c398288b-537a-4444-b134-1d992af23b2c"
portfolio.plot()
# + id="z56IfjYt4EaP"
average_return = np.mean(merged_portfolio.loc['2008-01-01':,],axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="xXEyzr1-4EdH" outputId="8e79d0ed-8023-498e-b26d-0e8c237cf33f"
average_return.mean() / average_return.std() * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/"} id="Uvaf_zW66VK9" outputId="856a5b5e-9c5f-484c-eb25-0d2e7361a39c"
average_return.mean() * 252, average_return.std() * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="9R4KgYdU60cr" outputId="5093c018-ffa2-4dc8-d4c5-20f0907add6a"
merged_pos.dropna().plot()
# + [markdown] id="g0AJgaET4S9w"
# The sharpe ratio we obtained is better than the one we obtained by Mean-Variance optimisation and other optimisation methods. Our method is also more robust as it is based on simple rules. Some portfolio optimisation methods might fail to converge due to solving a non-convex problem.
#
# It is also better than the traditional 60/40 portfolio.
#
# Taking a simple average of vol-scaled strategies can outperform more sophiscated portfolio optimisation methods unless one has a good understanding on the correlation structure of the strategies.
#
# As above, we assume we can take leverage without incurring additional costs. For our example, we assume we can leverage on US Investment Crop Bonds (LQD) up to 5x, which is unlikely for retail to achieve.
#
#
#
#
#
#
# + [markdown] id="3qiGkuL1rMLQ"
# ### Recent methods in portfolio optimisation
#
# The ETF we considered above are the building blocks of the global financial market. We have not included crypto due to its lack of history.
#
# Pensions funds often diversify into real estates and private equity and other illuquid assets. We will not offer an in-depth discussion here.
#
# Since the financial crisis, due to high fees and under-performance compared to the market, pensions funds are limiting their allocation to hedge funds. There are many hedge fund strategies, some requires sophicated market knowledge such as Merger Arbitrage and Credit Funds so we will not consider these here.
#
# In the following tutorials we will focus on CTA funds and long-short equity funds. As the turnover period of these strategies are often measured in days/months, it is still possible for retail to implement a crude version of these strategies, through the use of different ETFs, which we will demonstarte below.
#
#
# Many market-neutral equity funds to employ machine learning methods to build strategies and models. The data required to build a market-neutral and factor neutral fund is often not accessible to retail investors. Numerai allows retail traders to build their own market-neutral equity strategies with all the data and infrastructure provided.
#
#
# While the strategies themselves can be based on both momentum and mean-reversion, machine learning models works well when correlation is stationary. When there are shocks in the market, machine learning models in general will perform poorly. We suggest during market stress, trend following and other long volatility strategies would act as a good diversifier for machine learning based strategies.
#
#
# In the later lectures, we will explore how to include long volatility strategies as part of the portfolio to achieve long term growth. These ideas are suggested by <NAME> (Artemis Capital Management) and Mutiny Funds. Most of the time these strategies are only available for professional investors.
#
#
#
# An example allocation for retail investors
#
# - 50% Equity Index Fund
# - 15% Commodities/ETF Trend Following
# - 15% Machine-Learning based Market Neutral Fund (Numerai)
# - 20% Defensive Assets (Cash/Government Bonds/Gold)
#
#
# + [markdown] id="FNF3Vf_olt6f"
# ## Recommended Readings
#
#
# - The Allegory of the Hawk and Serpent https://www.artemiscm.com/research-market-views
# - The Cockroach Fund https://mutinyfund.com/cockroach/
# - Numerai https://numer.ai/
#
# + [markdown] id="cF2bLxKLKmH9"
# ## Lecture 4 Technical Analysis
#
# Technical analysis refers to the use of price data to predict future returns.
#
# Under efficient Market Theory Hypothesis, it is impossible to use technical analysis to outperform the market. However, systematic trend following has achieved outsizing returns during 1980s.
#
# In factor models (to be introduced in Lecture 5), we often consider style factors which refers to selecting assets based on certain known price and fundamentals based attributes.
#
# For price-based style factors, mean-reversion and momentum are often considered.
#
# Mean-reversion relies on the assumption that market over-reacts on news and shocks. Therefore, it is portfitable to buy the underperformers and sell them after they recover.
#
#
# Mean-reversion can generate alpha because they often take on liquidity risk (Example: Buying EM credit after some geo-political events).
# Another reason is due to positioning effect, liquidations of hedge funds can often result in high quality assets being sold at a discount. Mean-reversion take advantage of the temporary imbalance of order flow.
# Another example would be retail traders being forced out as stop losses are hit.
#
# The mantra of buy-the-dip is a form of mean-reversion which is implicitly supported by the FED.
#
# Momentum can generate profits because of investor's herding behaviour. Market participants have a tendency to chase winners, such as Tesla. For stocks that have an important weight in the index, a positive reinforcement loop can result as the stock price increases, its weighting increases, resulting a buying demand from passive investing funds.
#
# Momentum can also be consider as a way of avoiding losers. When share price drops continuously, it is often the case where the company or the industry is getting out of favour (Example: High-Street retail). A downward spiral will result as the financial conditions will get worse (Bonds will get downgraded when company cannot make a profit for a few years), which makes recovery even more difficult.
#
#
# For stocks, we often consider cross-sectional momentum, where we buy stocks that have best performance in the last year and sell stocks that have the worst performance in the last year. We can also rank momentum within each sector and industry to eliminate market risk.
#
#
#
#
#
#
#
#
#
#
#
#
# + [markdown] id="w-pf0it8aVGW"
# ### Moving Averages Crossover
#
# We will use Moving Averages Crossover, which is a very simple trend following strategy.
#
# The stratgy requires the input of two parameters. A short term lookback and long term lookback. We compute the moving average of price based on these two lookback periods. We long the asset when the short-term MA is greater than the long-term MA and short the asset when the short-term MA fallers below the long-term MA.
#
# How to make a robust trend following strategy
# - Increase the number of assets traded
# - Use different lookback periods to capture both short and long term trends
# - Use different entry and exit rules
# - Scale asset returns by volatility if leverage can be used
# - Consider to be long-only as it is more difficult to profit from down trends, especially for equities.
#
# + id="G9stvpxAK7NQ"
import pandas as pd
# + id="GXDQR_XmKmax"
prices = list()
ETF_list = ['SPY','TLT','GLD','SLV','DBC','UUP']
for ETF in ETF_list :
selected_etf = yf.Ticker(ETF)
etfhist = selected_etf.history(period="max")
etfhist['Return'] = etfhist['Open'].pct_change()
prices.append(etfhist['Return'])
# + id="kIdrVIv7KmkZ"
merged_prices = pd.DataFrame(pd.concat(prices,axis=1)).dropna()
merged_prices.columns = ETF_list
# + id="o_jQXjwpuGdU"
def Moving_Average(merged_prices,ticker='DBC',fast=5,slow=50,long_only=True,):
stock = pd.DataFrame(merged_prices[ticker])
stock['Shifted_Return'] = stock[ticker].shift(-1)
stock['Price'] = (1+stock[ticker]).cumprod()
stock['MA_{}'.format(fast)] = stock['Price'].rolling(fast).mean()
stock['MA_{}'.format(slow)] = stock['Price'].rolling(slow).mean()
if long_only:
stock['Portfolio_return'] = np.where(stock['MA_{}'.format(slow)]<stock['MA_{}'.format(fast)],1,0) * stock['Shifted_Return']
else:
stock['Portfolio_return'] = np.where(stock['MA_{}'.format(slow)]<stock['MA_{}'.format(fast)],1,-1) * stock['Shifted_Return']
return stock
# + [markdown] id="HGMGJ3RysCBM"
# Example of trend following strategies applied on Commodity ETF (DBC)
#
# Trend Following improves significantly the sharpe ratio compared to buy-and-hold.
# + id="eyZdXN_YK5s4"
stock = Moving_Average(merged_prices,ticker='DBC',fast=5,slow=50,long_only=False)
# + colab={"base_uri": "https://localhost:8080/"} id="zcHj6HI2K0CU" outputId="8b587f97-4789-41b5-cdeb-b6c6144b78f0"
stock['Portfolio_return'].loc['2005-01-01':].mean() / stock['Portfolio_return'].loc['2005-01-01':].std() * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/"} id="8dMDEpEeb17p" outputId="09be679d-d51a-4725-aee2-5460036dd596"
stock['Shifted_Return'].loc['2005-01-01':].mean() / stock['Shifted_Return'].loc['2005-01-01':].std() * np.sqrt(252)
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="FH9YgINxp832" outputId="cd437a9a-97af-43ad-a460-eb42fae185e8"
(1+stock['Portfolio_return'].loc['2005-01-01':]).cumprod().plot()
# + id="tB7gwP3SxQUV" colab={"base_uri": "https://localhost:8080/"} outputId="fd981133-2a39-4f0d-f119-5fbf60537c1f"
stock['Portfolio_return'].loc['2005-01-01':].describe()
# + colab={"base_uri": "https://localhost:8080/"} id="TvM3vPSdpnvX" outputId="bedbf518-83d3-4244-b41c-15504a9db933"
stock['Portfolio_return'].loc['2005-01-01':].skew()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="k6WFSvE_pnzn" outputId="a76af3fe-262a-4ac7-ec66-6fad7b89e319"
(1+stock['Shifted_Return'].loc['2005-01-01':]).cumprod().plot()
# + colab={"base_uri": "https://localhost:8080/"} id="_VMqUQo7pvIQ" outputId="81efbef6-0ebd-4dfc-b53c-f3e8442aeefb"
stock['Shifted_Return'].loc['2015-01-01':].skew()
# + id="1cpkUeUVySzm" colab={"base_uri": "https://localhost:8080/"} outputId="6f2422f7-311c-4d6b-d397-ae9d8be9b3e9"
stock['Shifted_Return'].loc['2005-01-01':].describe()
# + [markdown] id="r9gQ91jzsKrp"
# ### Statistical property of trend following strategy
#
# Compare the distribution of daily returns of the underlying asset and the trend following version, we conclude trend following improves the skewness of the strategy.
#
# We then compute the rolling 1 year return of both the underlying asset and the trend following strategy. The scatterplot shows a payoff similar to a long straddle. This is why trend following can be considered as a long volatility strategy.
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 352} id="S84AJ4sesw1H" outputId="a144ef9f-b84b-4144-9ed0-f0cad5c54592"
import seaborn as sns
sns.scatterplot(stock['Shifted_Return'].rolling(252).mean() * 252,stock['Portfolio_return'].rolling(252).mean()* 252)
# + id="3rTWj1DztGS5"
# + [markdown] id="Rs8pDzHbCsFa"
# ### Some reasons why technical analysis works
#
# Technical analysis are often used by retail and professionals in making trade decisions. Due to positioning of market participants, a self-reinforeced feedback loop is created when price follows a certain pattern.
#
# Here, we argue positioning, rather than psychology is the main driver of success for technical analysis. While it is important to analyse market sentiment, from the vast amount of news and social media data generated, sentiment only has an impact on the price after someone decides to take a trade follow/against it.
#
#
# Not all kinds of technical analysis are equally useful, candlestick patterns or some really fancy indicators would not be relevant.
#
# Widely used indicators for trends and mean-reversion would have predictive value due to their use in both strategy and risk management process. For example, market participants will often reduce position size of a long position when prices fall below 200 day moving averages.
#
#
#
#
# Recommendations from Krishnan (Krishnan 2021)
# - Take key support and resistance levels seriously, but verify where they are on your own
# - Build your own trend following, volatility control and risk parity models. Calibrate them to track the performance of individual
# funds or benchmark indices. Pay particular attention to levels where the model signal changes from BUY to SELL, or vice versa. These are your key levels.
# - Check the options markets in the assets you trade for large open interest at specific strikes. Infer whether dealers are likely to be long or short. When dealers are short, expect volatility expansion in a wide range around the strike.
#
#
#
#
#
#
#
#
# + [markdown] id="Pe3Ow4H8-B6Q"
# #### How to obtain data for market positioning
#
# For Commodities Futures, Trading Commission Reports would be a useful indicator for long term price movements as it provides an overview of how hedge funds and other market participants trade at a lag.
# https://data.nasdaq.com/data/CFTC-commodity-futures-trading-commission-reports
#
# For stocks that can be shorted and has options, short interest, borrowing rate and put-call ratios can be used. These data can often be obtained with little lag compared to other regulatory filings. There is also a concept of Gamma Exposure Index (GEX) which aims to the hedging activity of option market-makers based on the current stock price.
#
#
# Insiders and hedge funds with significant holdings need to disclose when they enter trades for stocks. Hedge funds are also required to their holdings each quarter. For US stocks, SEC filings (SEC Form 4, 13F Filings) provided the required data.
#
# There are academic papers suggesting following insiders and hedge funds can generate excess return. In particular, we can extend the definition of insiders to politicians who often has private information and the power to change economic policies.
#
#
#
# + [markdown] id="S1E7CYVgkey9"
# ## Recommended Reading
#
# - Market Tremors: Quantifying Structural Risks in Modern Financial Markets (<NAME> and <NAME> 2021)
# - Understanding the Momentum Risk Premium: An In-Depth Journey Through
# Trend-Following Strategies https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3042173
#
| Notebook/Algosoc_2021_Ch2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hayari/MaskedFace/blob/master/MaskedFaces.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="oG4LwWhpTfbk" colab_type="code" colab={}
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from sklearn.model_selection import train_test_split
from numpy import random
from sklearn.utils import shuffle
import cv2
from sklearn.metrics import confusion_matrix
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from tensorflow.keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Sequential
from keras.models import load_model
np.random.seed(100)
# %matplotlib inline
# + [markdown] id="n3DtskWPhR61" colab_type="text"
# # Load the DataSet
# + id="27JmdQbdMqo9" colab_type="code" colab={}
image_width=128
image_height=128
# + id="V2XIegHkWzd1" colab_type="code" colab={}
dataset=np.load('/content/drive/My Drive/DataSets/dataMasked.npz')
# + id="jqBXqwbBXFHU" colab_type="code" colab={}
data=dataset['arr_0']
labels=dataset['arr_1']
# + id="6afAi586XMCs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="f8213d39-eb29-4c32-8054-11d36504eb98"
print(data.shape)
print(labels.shape)
# + id="IwtTDvPGY-bC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="85974445-01b7-4b5d-929c-c4045ef19939"
type(data)
# + id="Pnt1PKasiK1Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="0454a608-fdd2-40b4-d841-c4af88c0116e"
sns.countplot(x='labels',data=pd.DataFrame(labels,columns=['labels']))
# + id="FTcxIDLAZIA2" colab_type="code" colab={}
X_train, X_test, Y_train, Y_test = train_test_split(data, labels, test_size=0.2, random_state=1)
del(data)
del(labels)
# + id="LUHbqIWoZvIF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="9df46893-4468-4a28-ba20-63b5919bc822"
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# + id="_MtRVKRYZufr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 373} outputId="7083a1f5-e8dc-4a15-f5b8-817e50972f44"
def show_images(X,Y,r,c):
fig, axs = plt.subplots(r,c, figsize=(15, 6))
fig.subplots_adjust(hspace = .3, wspace=.001)
axs = axs.ravel()
for i in range(r*c): # if rows = 2 and columns = 5 i should take 10 values
index = random.randint(0, len(X))
image = X[index]
axs[i].axis('off')
axs[i].imshow(image)
axs[i].set_title(['Masked' if Y[index]==1 else 'Non Masked'][0])
plt.show()
# show image of 10 random data points
rows = 5
columns = 4
show_images (X_train,Y_train,rows,columns)
# + id="50sW1c_UZt9k" colab_type="code" colab={}
# normalization the images
def normalize(x):
return (x.astype(float) - 128) / 128
#new_X_test = np.apply_along_axis(normalize,3,X_test)
#new_X_train = np.apply_along_axis(normalize,3,X_train)
# + id="FeRDqQKMhOkN" colab_type="code" colab={}
# Shuffle the data
#X_train, Y_train = shuffle(X_train, Y_train, random_state=0)
#X_test, Y_test = shuffle(X_test, Y_test, random_state=0)
# + [markdown] id="Ik4r_9H7qk8a" colab_type="text"
# # Data Augmentation
# + id="Qj7WI-fFhO1t" colab_type="code" colab={}
# Data augmentation
def random_translate(img):
rows,cols,_ = img.shape
# allow translation up to px pixels in x and y directions
px = 2
dx,dy = np.random.randint(-px,px,2)
M = np.float32([[1,0,dx],[0,1,dy]])
dst = cv2.warpAffine(img,M,(cols,rows))
dst = dst[:,:,np.newaxis]
return dst
# + id="6dIbUblVhOod" colab_type="code" colab={}
def random_scaling(img):
rows,cols,_ = img.shape
# transform limits
px = np.random.randint(-2,2)
# ending locations
pts1 = np.float32([[px,px],[rows-px,px],[px,cols-px],[rows-px,cols-px]])
# starting locations (4 corners)
pts2 = np.float32([[0,0],[rows,0],[0,cols],[rows,cols]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(rows,cols))
dst = dst[:,:,np.newaxis]
return dst
# + id="WwlIgTFAhOai" colab_type="code" colab={}
def random_warp(img):
rows,cols,_ = img.shape
# random scaling coefficients
rndx = np.random.rand(3) - 0.5
rndx *= cols * 0.06 # this coefficient determines the degree of warping
rndy = np.random.rand(3) - 0.5
rndy *= rows * 0.06
# 3 starting points for transform, 1/4 way from edges
x1 = cols/4
x2 = 3*cols/4
y1 = rows/4
y2 = 3*rows/4
pts1 = np.float32([[y1,x1],
[y2,x1],
[y1,x2]])
pts2 = np.float32([[y1+rndy[0],x1+rndx[0]],
[y2+rndy[1],x1+rndx[1]],
[y1+rndy[2],x2+rndx[2]]])
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows))
dst = dst[:,:,np.newaxis]
return dst
# + id="Q9rMaUwcqEvP" colab_type="code" colab={}
def random_brightness(img):
shifted = img + 1.0 # shift to (0,2) range
img_max_value = max(shifted.flatten())
max_coef = 2.0/img_max_value
min_coef = max_coef - 0.1
coef = np.random.uniform(min_coef, max_coef)
dst = shifted * coef - 1.0
return dst
# + [markdown] id="nGbwGIDgqp7E" colab_type="text"
# # Building the Model
# + id="gixgz0CrqpLN" colab_type="code" colab={}
model = Sequential([
Conv2D(filters=32,padding='same',kernel_size=(3,3), activation='relu',input_shape=(image_width, image_height, 3)),
BatchNormalization(),
MaxPooling2D(),
Flatten(),
Dense(1, activation='sigmoid')
])
# + id="LyTUjWXtqFB_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 344} outputId="13d28c96-fe3e-46bb-bca3-86c3e6c0f6f3"
model.summary()
# + id="6oXrL2vCqEyg" colab_type="code" colab={}
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=["accuracy"])
# + id="iMS5wsnGqEsB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a08d6c0d-15e4-4a1c-8c20-1aeac71d7f9b"
from keras.callbacks import EarlyStopping
stopcallback = EarlyStopping(monitor='loss', patience=10,min_delta=0.001)
history_model = model.fit(X_train,Y_train,epochs=100, batch_size=32, validation_split=0.1,callbacks=[stopcallback],shuffle=True)
# + [markdown] id="SxmZkTiD55qX" colab_type="text"
# # Evaluate / Test
# + id="9Z255O1ur7qh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="d52121d5-fad4-42a6-f85b-5c0c42a6ae3b"
train_accuracy = history_model.history['accuracy']
train_loss = history_model.history['loss']
count = range(len(train_accuracy))
plt.plot(count, train_accuracy, label='Training accuracy')
plt.plot(count, train_loss, label='Training Loss')
plt.title('epochs vs Training Loss')
plt.legend()
# + id="lTCB30BQr7TN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="a281f193-e9c3-4bdc-da9a-1f3b63ba62ee"
plt.plot(history_model.history['accuracy'])
plt.plot(history_model.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# + id="9E_XR95A6AMB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="65408315-90eb-43a8-ffca-0ca8be923b0e"
result= model.evaluate(
X_test,
Y_test.reshape(-1))
print(result)
# + id="Hbh4PsIE6AgG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="443b09e5-c78b-4368-c9e5-2e8418fe1727"
confusion_matrix(Y_test.reshape(-1),model.predict_classes(X_test))
# + id="oyiCQo2D5_-w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 729} outputId="1c05db1a-1d95-4a60-a6b0-2d5a290a8343"
text_labels={
0: "not masked",
1: "masked"
}
def plot_prediction_sample(model):
test_predictions=model.predict_classes(X_test)
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid('off')
img_index = np.random.randint(0, 150)
plt.imshow(X_test[img_index], cmap=plt.cm.gray)
actual_label = int(Y_test[img_index])
predicted_label = int(test_predictions[img_index])
plt.xlabel("image {} \n Actual: {} ({})\n Predicted: {} ({})".format(
img_index,actual_label, text_labels[actual_label], predicted_label, text_labels[predicted_label]
))
plt.tight_layout()
plt.show()
plot_prediction_sample(model)
# + id="NNWAh8kXMEzy" colab_type="code" colab={}
model.save('/content/drive/My Drive/DataSets/my_model_128.h5')
# + id="2La0TAYfMErc" colab_type="code" colab={}
# + id="QoSDU2hkMEgD" colab_type="code" colab={}
| MaskedFaces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# Third-party
from astropy.io import ascii
from astropy.table import Table, join, vstack
import astropy.coordinates as coord
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
from pyia import GaiaData
mgiants = Table.read('../../low-latitude-structures/data/mgiants-only.csv', format='ascii.csv')
mgiants.remove_column('ra')
mgiants.remove_column('dec')
xmatch = Table.read('../../low-latitude-structures/data/mgiants-gaiadr2.fits')
xmatch.rename_column('id', 'ID')
xmatch.rename_column('dist', 'angdist')
tbl = join(mgiants, xmatch, keys='ID')
tbl = tbl[tbl['structure'] == 'TriAnd']
c = coord.SkyCoord(ra=tbl['ra'], dec=tbl['dec'])
fk5 = c.transform_to(coord.FK5)
ta1 = ascii.read('../../low-latitude-structures/data/TA1_CaT.txt')
ta2 = ascii.read('../../low-latitude-structures/data/TA2_CaT.txt')
ta_cat = vstack((ta1, ta2))
# +
ta_cat_ra = []
ta_cat_dec = []
for row in ta_cat:
_ra = row['2MASSID'][:7]
_dec = row['2MASSID'][7:]
ra = coord.Longitude(':'.join([_ra[:2], _ra[2:4], '.'.join([_ra[4:6], _ra[6]])]),
unit=u.hourangle)
dec = coord.Latitude(':'.join([_dec[:3], _dec[3:5], _dec[5:]]),
unit=u.degree)
ta_cat_ra.append(ra)
ta_cat_dec.append(dec)
# print(_ra, ra, _ra[4:])
# print(_dec, dec, _dec[5:])
# print()
ta_cat_ra = coord.Longitude(ta_cat_ra)
ta_cat_dec = coord.Latitude(ta_cat_dec)
ta_cat_c = coord.SkyCoord(ra=ta_cat_ra, dec=ta_cat_dec)
# +
idx, sep, _ = fk5.match_to_catalog_sky(ta_cat_c)
plt.hist(sep.arcmin, bins=np.logspace(-3, 3, 100));
plt.xscale('log')
feh = np.full(len(c), np.nan)
feh[sep < 1*u.arcmin] = ta_cat['[Fe/H]'][idx[sep < 1*u.arcmin]]
dist = np.full(len(c), np.nan)
dist[sep < 1*u.arcmin] = ta_cat['dist'][idx[sep < 1*u.arcmin]]
dist_err = 0.15 * dist
tbl['feh'] = feh
tbl['dist'] = dist * u.kpc
tbl['dist_err'] = dist_err * u.kpc
# +
# # My attempt to compute distance using relation from Sheffield et al.
# M_K = (3.8 + 1.3*feh) - 8.4*(tbl['J-Ks'])
# apw_dist = coord.Distance(distmod=tbl['Ks'] - M_K).to(u.kpc)
# apw_dist_err = 0.15 * dist
# -
g = GaiaData(tbl[np.isfinite(tbl['dist'])])
all_c = g.get_skycoord(distance=g.dist,
radial_velocity=g.v_hel*u.km/u.s)
gal_c = all_c.transform_to(coord.Galactocentric(galcen_distance=8*u.kpc))
cyl = gal_c.represent_as('cylindrical')
vR = cyl.differentials['s'].d_rho.to(u.km/u.s)
vphi = (cyl.rho * cyl.differentials['s'].d_phi).to(u.km/u.s, u.dimensionless_angles())
vz = cyl.differentials['s'].d_z.to(u.km/u.s)
plt.hist(gal_c.transform_to(coord.Galactocentric).velocity.norm().value, bins=np.linspace(120, 250, 32));
plt.xlabel(r'$v_{\rm tot}$')
# +
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
ax.scatter(all_c.galactic.l.degree,
all_c.radial_velocity, color='#aaaaaa')
# ax.scatter(all_c.galactic.l.degree[mask],
# all_c.radial_velocity[mask],
# color='k')bb
l = np.linspace(95, 250, 128)
ax.plot(l, 2.3*(l-150) - 110)
ax.plot(l, 2.3*(l-150) - 35)
ax.set_ylim(-350, 350)
# +
nsamples = 1024
dist_samples = np.random.normal(g.dist, g.dist_err,
size=(nsamples, len(g))).T * u.kpc
pm = np.vstack((g.pmra.value, g.pmdec.value)).T
pmcov = g.get_cov()[:, 3:5, 3:5]
pm_samples = np.array([np.random.multivariate_normal(pm[i], pmcov[i], size=nsamples)
for i in range(len(pm))]) * g.pmra.unit
rv_samples = np.random.normal(g.v_hel, g.v_err, size=(nsamples, len(g))).T * u.km/u.s
c_samples = coord.SkyCoord(ra=g.ra[:, None],
dec=g.dec[:, None],
distance=dist_samples,
pm_ra_cosdec=pm_samples[...,0],
pm_dec=pm_samples[...,1],
radial_velocity=rv_samples)
gal_c_samples = c_samples.transform_to(coord.Galactocentric(galcen_distance=8*u.kpc))
# +
cyl_samples = gal_c_samples.represent_as('cylindrical')
vR_samples = cyl_samples.differentials['s'].d_rho.to(u.km/u.s)
vphi_samples = (cyl_samples.rho * cyl_samples.differentials['s'].d_phi).to(u.km/u.s, u.dimensionless_angles())
vz_samples = cyl_samples.differentials['s'].d_z.to(u.km/u.s)
vcyl = np.stack((vR_samples.value,
vphi_samples.value,
vz_samples.value))
med_vcyl = np.median(vcyl, axis=-1)
std_vcyl = 1.5 * np.median(np.abs(vcyl - med_vcyl[..., None]), axis=-1)
med_vtot = np.sqrt(np.sum(med_vcyl**2, axis=0))
vtot_mask = (med_vtot > 100) & (med_vtot < 320)
# -
med_xyz = np.median(gal_c_samples.cartesian.xyz, axis=-1)
std_xyz = 1.5 * np.median(np.abs(gal_c_samples.cartesian.xyz - med_xyz[..., None]), axis=-1).value
med_xyz = med_xyz.value
cyl_pos = np.stack((cyl_samples.rho.to(u.kpc).value,
cyl_samples.phi.to(u.deg).value,
cyl_samples.z.to(u.kpc).value))
med_cyl = np.median(cyl_pos, axis=-1)
std_cyl = 1.5 * np.median(np.abs(cyl_pos - med_cyl[..., None]), axis=-1)
style = dict(marker='o', ls='none', color='k',
ecolor='#aaaaaa', alpha=0.6)
# +
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
ax = axes[0]
ax.errorbar(med_xyz[0], med_xyz[1],
xerr=std_xyz[0], yerr=std_xyz[2],
**style)
ax.scatter(-8, 0, marker='o', color='yellow')
ax.set_xlim(-35, 0)
ax.set_ylim(0, 35)
for R in np.arange(15, 35+1, 5):
pa = mpl.patches.Circle((0,0), radius=R,
facecolor='none', edgecolor='tab:blue', alpha=0.5)
ax.add_patch(pa)
ax.set_xlabel('$x$ [kpc]')
ax.set_ylabel('$y$ [kpc]')
# ---
ax = axes[1]
ax.errorbar(med_cyl[0], med_cyl[2],
xerr=std_cyl[0], yerr=std_cyl[2],
**style)
ax.axhline(0)
ax.set_xlim(10, 40)
ax.set_ylim(-20, 10)
ax.set_xlabel('$R$ [kpc]')
ax.set_ylabel('$z$ [kpc]')
for ax in axes:
ax.set_aspect('equal')
fig.tight_layout()
fig.savefig('../plots/xy_Rz.png', dpi=250)
# +
fig, axes = plt.subplots(2, 2, figsize=(7.3, 7),
sharex='col', sharey='row')
axes[0, 0].errorbar(med_vcyl[0],
np.abs(med_vcyl[1]),
xerr=std_vcyl[0],
yerr=std_vcyl[1],
**style)
axes[1, 0].errorbar(med_vcyl[0],
med_vcyl[2],
xerr=std_vcyl[0],
yerr=std_vcyl[2],
**style)
axes[1, 1].errorbar(np.abs(med_vcyl[1]),
med_vcyl[2],
xerr=std_vcyl[1],
yerr=std_vcyl[2],
**style)
axes[1, 1].set_xlim(80, 320)
axes[1, 1].set_ylim(-120, 120)
axes[0, 0].set_xlim(-120, 120)
axes[0, 0].set_ylim(80, 320)
axes[1, 0].xaxis.set_ticks([-100, -50, 0, 50, 100])
axes[1, 0].yaxis.set_ticks([-100, -50, 0, 50, 100])
axes[1, 1].xaxis.set_ticks([100, 150, 200, 250, 300])
axes[0, 0].yaxis.set_ticks([100, 150, 200, 250, 300])
axes[0, 1].set_visible(False)
axes[0, 0].set_ylabel('$|v_\phi|$' + ' {0:latex_inline}'.format(u.km/u.s))
axes[1, 0].set_ylabel('$v_z$' + ' {0:latex_inline}'.format(u.km/u.s))
axes[1, 0].set_xlabel('$v_R$' + ' {0:latex_inline}'.format(u.km/u.s))
axes[1, 1].set_xlabel('$|v_\phi|$' + ' {0:latex_inline}'.format(u.km/u.s))
axes[0, 0].axhline(220., zorder=-10, lw=3,
color='tab:blue', alpha=0.5, marker='',
label=r'$v_\phi$ at solar circle')
axes[1, 1].axvline(220., zorder=-10, lw=3,
color='tab:blue', alpha=0.5, marker='')
axes[0, 0].axhspan(90, 120, color='tab:purple',
linewidth=0, alpha=0.4,
label='prediction from\nbaryonic mass')
axes[1, 1].axvspan(90, 120, color='tab:purple',
linewidth=0, alpha=0.4)
axes[0, 0].legend(loc='upper right', fontsize=11)
fig.tight_layout()
fig.savefig('../plots/vrphiz.png', dpi=250)
# +
fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
for i in range(3):
if i == 1:
bins = np.arange(-350+0.1, 50, 10.)
axes[i].axvline(-220)
else:
bins = np.arange(-150, 150+0.1, 10.)
axes[i].axvline(0)
axes[i].hist(med_vcyl[i], bins=bins)
fig.tight_layout()
# +
plt.errorbar(med_cyl[1][vtot_mask],
med_vcyl[2][vtot_mask],
xerr=std_cyl[1][vtot_mask],
yerr=std_vcyl[2][vtot_mask],
marker='o', ls='none')
plt.xlim(110, 180)
# plt.ylim(-15, 5)
# +
plt.errorbar(med_cyl[1][vtot_mask],
med_vcyl[2][vtot_mask],
xerr=std_cyl[1][vtot_mask],
yerr=std_vcyl[2][vtot_mask],
marker='', ls='none')
plt.scatter(med_cyl[1][vtot_mask],
med_vcyl[2][vtot_mask],
c=med_cyl[0][vtot_mask],
vmin=15, vmax=30, zorder=100)
plt.xlim(110, 180)
plt.ylim(-100, 200)
plt.colorbar()
# -
# ---
#
# First do XD to get initial guess
from xdgmm import XDGMM
gmm = XDGMM(n_components=3)
cov_cyl = np.array([std_vcyl.T[i] * np.eye(3)
for i in range(vcyl.shape[0])])
_ = gmm.fit(med_vcyl.T[vtot_mask], cov_cyl[vtot_mask])
gmm.mu
from astroML.plotting.tools import draw_ellipse
# +
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
for i in range(gmm.n_components):
mu = gmm.mu[i][[0, 2]]
V = gmm.V[i].copy()
V = np.delete(V, 1, axis=0)
V = np.delete(V, 1, axis=1)
draw_ellipse(mu, V, scales=[1], ax=ax,
ec='k', fc='gray', alpha=0.2)
ax.errorbar(med_vcyl[0],
med_vcyl[2],
xerr=std_vcyl[0],
yerr=std_vcyl[2],
**style)
ax.set_xlim(-150, 150)
ax.set_ylim(-150, 150)
# ax.set_ylim(-400, 0)
# -
| notebooks/Figure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Customizing datasets in fastai
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
# -
# In this tutorial, we'll see how to create custom subclasses of [`ItemBase`](/core.html#ItemBase) or [`ItemList`](/data_block.html#ItemList) while retaining everything the fastai library has to offer. To allow basic functions to work consistently across various applications, the fastai library delegates several tasks to one of those specific objets, and we'll see here which methods you have to implement to be able to have everything work properly. But first let's see take a step back to see where you'll use your end result.
# ## Links with the data block API
# The data block API works by allowing you to pick a class that is responsible to get your items and another class that is charged with getting your targets. Combined together, they create a pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) that is then wrapped inside a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). The training set, validation set and maybe test set are then all put in a [`DataBunch`](/basic_data.html#DataBunch).
#
# The data block API allows you to mix and match what class your inputs have, what clas you target have, how to do the split between train and validation set, then how to create the [`DataBunch`](/basic_data.html#DataBunch), but if you have a very specific kind of input/target, the fastai classes might no be sufficient to you. This tutorial is there to explain what is needed to create a new class of items and what methods are important to implement or override.
#
# It goes in two phases: first we focus on what you need to create a custom [`ItemBase`](/core.html#ItemBase) class (which the type of your inputs/targets) then on how to create your custom [`ItemList`](/data_block.html#ItemList) (which is basically a set of [`ItemBase`](/core.html#ItemBase)) while highlining which methods are called by the library.
# ## Creating a custom [`ItemBase`](/core.html#ItemBase) subclass
# The fastai library contains three basic type of [`ItemBase`](/core.html#ItemBase) that you might want to subclass:
# - [`Image`](/vision.image.html#Image) for vision applications
# - [`Text`](/text.data.html#Text) for text applications
# - [`TabularLine`](/tabular.data.html#TabularLine) for tabular applications
#
# Whether you decide to create your own item class or to subclass one of the above, here is what you need to implement:
# ### Basic attributes
# Those are the more importants attribute your custom [`ItemBase`](/core.html#ItemBase) needs as they're used everywhere in the fastai library:
# - `ItemBase.data` is the thing that is passed to pytorch when you want to create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). This is what needs to be fed to your model. Note that it might be different from the representation of your item since you might want something that is more understandable.
# - `ItemBase.obj` is the thing that truly represents the underlying object behind your item. It should be sufficient to create a copy of your item. For instance, when creating the test set, the basic label is the `obj` attribute of the first label (or y) in the training set.
# - `__str__` representation: if applicable, this is what will be displayed when the fastai library has to show your item.
#
# If we take the example of a [`MultiCategory`](/core.html#MultiCategory) object `o` for instance:
# - `o.obj` is the list of tags that object has
# - `o.data` is a tensor where the tags are one-hot encoded
# - `str(o)` returns the tags separated by ;
#
# If you want to code the way data augmentation should be applied to your custom `Item`, you should write an `apply_tfms` method. This is what will be called if you apply a [`transform`](/vision.transform.html#vision.transform) block in the data block API.
# ### Advanced show methods
# If you want to use methods such a `data.show_batch()` or `learn.show_results()` with a brand new kind of [`ItemBase`](/core.html#ItemBase) you will need to implement two other methods. In both cases, the generic function will grab the tensors of inputs, targets and predictions (if applicable), reconstruct the corespoding [`ItemBase`](/core.html#ItemBase) (see below) but it will delegate to the [`ItemBase`](/core.html#ItemBase) the way to display the results.
#
# ``` python
# def show_xys(self, xs, ys, **kwargs)->None:
#
# def show_xyzs(self, xs, ys, zs, **kwargs)->None:
# ```
# In both cases `xs` and `ys` represent the inputs and the targets, in the second case `zs` represent the predictions. They are lists of the same length that depend on the `rows` argument you passed. The kwargs are passed from `data.show_batch()` / `learn.show_results()`. As an example, here is the source code of those methods in [`Image`](/vision.image.html#Image):
#
# ``` python
# def show_xys(self, xs, ys, figsize:Tuple[int,int]=(9,10), **kwargs):
# "Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
# rows = int(math.sqrt(len(xs)))
# fig, axs = plt.subplots(rows,rows,figsize=figsize)
# for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
# xs[i].show(ax=ax, y=ys[i], **kwargs)
# plt.tight_layout()
#
# def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
# """Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
# `kwargs` are passed to the show method."""
# figsize = ifnone(figsize, (6,3*len(xs)))
# fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
# fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
# for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
# x.show(ax=axs[i,0], y=y, **kwargs)
# x.show(ax=axs[i,1], y=z, **kwargs)
# ```
# ### Example: ImageTuple
# For cycleGANs, we need to create a custom type of items since we feed the model tuples of images. Let's look at how to code this. The basis is to code the `obj` and [`data`](/vision.data.html#vision.data) attributes. We do that in the init. The object is the tuple of images and the data their underlying tensors normalized between -1 and 1.
class ImageTuple(ItemBase):
def __init__(self, img1, img2):
self.img1,self.img2 = img1,img2
self.obj,self.data = (img1,img2),[-1+2*img1.data,-1+2*img2.data]
# Then we want to apply data augmentation to our tuple of images. That's done by writing and `apply_tfms` method as we saw before. Here we just pass that call to the two underlying images then update the data.
def apply_tfms(self, tfms, **kwargs):
self.img1 = self.img1.apply_tfms(tfms, **kwargs)
self.img2 = self.img2.apply_tfms(tfms, **kwargs)
self.data = [-1+2*self.img1.data,-1+2*self.img2.data]
return self
# We define a last method to stack the two images next ot each other, which we will use later for a customized `show_batch`/ `show_results` behavior.
def to_one(self): return Image(0.5+torch.cat(self.data,2)/2)
# This is all your need to create your custom [`ItemBase`](/core.html#ItemBase). You won't be able to use it until you have put it inside your custom [`ItemList`](/data_block.html#ItemList) though, so you should continue reading the next section.
# ## Creating a custom [`ItemList`](/data_block.html#ItemList) subclass
# This is the main class that allows you to group your inputs or your targets in the data block API. You can then use any of the splitting or labelling methods before creating a [`DataBunch`](/basic_data.html#DataBunch). To make sure everything is properly working, her eis what you need to know.
# ### Class variables
# Whether you're directly subclassing [`ItemList`](/data_block.html#ItemList) or one of the particular fastai ones, make sure to know the content of the following three variables as you may need to adjust them:
# - `_bunch` contains the name of the class that will be used to create a [`DataBunch`](/basic_data.html#DataBunch)
# - `_processor` contains a class (or a list of classes) of [`PreProcessor`](/data_block.html#PreProcessor) that will then be used as the default to create processor for this [`ItemList`](/data_block.html#ItemList)
# - `_label_cls` contains the class that will be used to create the labels by default
#
# `_label_cls` is the first to be used in the data block API, in the labelling function. If this variable is set to `None`, the label class will be guessed between [`CategoryList`](/data_block.html#CategoryList), [`MultiCategoryList`](/data_block.html#MultiCategoryList) and [`FloatList`](/data_block.html#FloatList) depending on the type of the first item. The default can be overriden by passing a `label_cls` in the kwargs of the labelling function.
#
# `_processor` is the second to be used. The processors are called at the end of the labelling to apply some kind of function on your items. The default processor of the inputs can be overriden by passing a `processor` in the kwargs when creating the [`ItemList`](/data_block.html#ItemList), the default processor of the targets can be overriden by passing a `processor` in the kwargs of the labelling function.
#
# Processors are useful for pre-processing some data, but you also need to put in their state any variable you want to save for the call of `data.export()` before creating a [`Learner`](/basic_train.html#Learner) object for inference: the state of the [`ItemList`](/data_block.html#ItemList) isn't saved there, only their processors. For instance `SegmentationProcessor` only reason to exist is to save the dataset classes, and during the process call, it doesn't do anything apart from setting the `classes` and `c` attributes to its dataset.
# ``` python
# class SegmentationProcessor(PreProcessor):
# def __init__(self, ds:ItemList): self.classes = ds.classes
# def process(self, ds:ItemList): ds.classes,ds.c = self.classes,len(self.classes)
# ```
#
# `_bunch` is the last class variable usd in the data block. When you type the final `databunch()`, the data block API calls the `_bunch.create` method with the `_bunch` of the inputs.
# ### Keeping \_\_init\_\_ arguments
# If you pass additional arguments in your `__init__` call that you save in the state of your [`ItemList`](/data_block.html#ItemList), be wary to also pass them along in the `new` method as this one is used to create your training and validation set when splitting. The basic scheme is:
# ``` python
# class MyCustomItemList(ItemList):
# def __init__(self, items, my_arg, **kwargs):
# self.my_arg = my_arg
# super().__init__(items, **kwargs)
#
# def new(self, items, **kwargs):
# return super().new(items, self.my_arg, **kwargs)
# ```
# Be sure to keep the kwargs as is, as they contain all the additional stuff you can pass to an [`ItemList`](/data_block.html#ItemList).
# ### Important methods
# #### - get
# The most important method you have to implement is `get`: this one will explain your custom [`ItemList`](/data_block.html#ItemList) how to general an [`ItemBase`](/core.html#ItemBase) from the thign stored in its `items` array. For instance an [`ImageItemList`](/vision.data.html#ImageItemList) has the following `get` method:
# ``` python
# def get(self, i):
# fn = super().get(i)
# res = self.open(fn)
# self.sizes[i] = res.size
# return res
# ```
# The first line basically looks at `self.items[i]` (which is a filename). The second line opens it since the `open`method is just
# ``` python
# def open(self, fn): return open_image(fn)
# ```
# The third line is there for [`ImagePoints`](/vision.image.html#ImagePoints) or [`ImageBBox`](/vision.image.html#ImageBBox) targets that require the size of the input [`Image`](/vision.image.html#Image) to be created. Note that if you are building a custom target class and you need the size of an image, you should call `self.x.size[i]`.
# + hide_input=true
jekyll_note("""If you just want to customize the way an `Image` is opened, subclass `Image` and just change the
`open` method.""")
# -
# #### - reconstruct
# This is the method that is called in `data.show_batch()`, `learn.predict()` or `learn.show_results()` to transform a pytorch tensor back in an [`ItemBase`](/core.html#ItemBase). In a way, it does the opposite of calling `ItemBase.data`. It should take a tensor `t` and return the same king of thing as the `get` method.
#
# In some situations ([`ImagePoints`](/vision.image.html#ImagePoints), [`ImageBBox`](/vision.image.html#ImageBBox) for instance) you need to have a look at the corresponding input to rebuild your item. In this case, you should have a second argument called `x` (don't change that name). For instance, here is the `reconstruct` method of [`PointsItemList`](/vision.data.html#PointsItemList):
# ```python
# def reconstruct(self, t, x): return ImagePoints(FlowField(x.size, t), scale=False)
# ```
# #### - analyze_pred
# This is the method that is called in `learn.predict()` or `learn.show_results()` to transform predictions in an output tensor suitable for `reconstruct`. For instance we may need to take the maximum argument (for [`Category`](/core.html#Category)) or the predictions greater than a certain threshold (for [`MultiCategory`](/core.html#MultiCategory)). It should take a tensor, along with optional kwargs and return a tensor.
#
# For instance, here is the `anaylze_pred` method of [`MultiCategoryList`](/data_block.html#MultiCategoryList):
# ```python
# def analyze_pred(self, pred, thresh:float=0.5): return (pred >= thresh).float()
# ```
# `thresh` can then be passed as kwarg during the calls to `learn.predict()` or `learn.show_results()`.
# ### Advanced show methods
# If you want to use methods such a `data.show_batch()` or `learn.show_results()` with a brand new kind of [`ItemBase`](/core.html#ItemBase) you will need to implement two other methods. In both cases, the generic function will grab the tensors of inputs, targets and predictions (if applicable), reconstruct the coresponding (as seen before) but it will delegate to the [`ItemList`](/data_block.html#ItemList) the way to display the results.
#
# ``` python
# def show_xys(self, xs, ys, **kwargs)->None:
#
# def show_xyzs(self, xs, ys, zs, **kwargs)->None:
# ```
# In both cases `xs` and `ys` represent the inputs and the targets, in the second case `zs` represent the predictions. They are lists of the same length that depend on the `rows` argument you passed. The kwargs are passed from `data.show_batch()` / `learn.show_results()`. As an example, here is the source code of those methods in [`ImageItemList`](/vision.data.html#ImageItemList):
#
# ``` python
# def show_xys(self, xs, ys, figsize:Tuple[int,int]=(9,10), **kwargs):
# "Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
# rows = int(math.sqrt(len(xs)))
# fig, axs = plt.subplots(rows,rows,figsize=figsize)
# for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
# xs[i].show(ax=ax, y=ys[i], **kwargs)
# plt.tight_layout()
#
# def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
# """Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
# `kwargs` are passed to the show method."""
# figsize = ifnone(figsize, (6,3*len(xs)))
# fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
# fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
# for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
# x.show(ax=axs[i,0], y=y, **kwargs)
# x.show(ax=axs[i,1], y=z, **kwargs)
# ```
#
# Linked to this method is the class variable `_show_square` of an [`ItemList`](/data_block.html#ItemList). It defaults to `False` but if it's `True`, the `show_batch` method will send `rows * rows` `xs` and `ys` to `show_xys` (so that it shows a square of inputs/targets), like here for iamges.
# ### Example: ImageTupleList
# Continuing our custom item example, we create a custom [`ItemList`](/data_block.html#ItemList) class that will wrap those `ImageTuple` properly. The first thing is to write a custom `__init__` method (since we need to list of filenames here) which means we also have to change the `new` method.
class ImageTupleList(ImageItemList):
def __init__(self, items, itemsB=None, **kwargs):
self.itemsB = itemsB
super().__init__(items, **kwargs)
def new(self, items, **kwargs):
return super().new(items, itemsB=self.itemsB, **kwargs)
# We then specify how to get one item. Here we pass the image in the first list of items, and pick one randomly in the second list.
def get(self, i):
img1 = super().get(i)
fn = self.itemsB[random.randint(0, len(self.itemsB)-1)]
return ImageTuple(img1, open_image(fn))
# We also add a custom factory method to directly create an `ImageTupleList` from two folders.
@classmethod
def from_folders(cls, path, folderA, folderB, **kwargs):
itemsB = ImageItemList.from_folder(path/folderB).items
res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs)
res.path = path
return res
# Finally, we have to specify how to reconstruct the `ImageTuple` from tensors if we want `show_batch` to work. We recreate the images and denormalize.
def reconstruct(self, t:Tensor):
return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5))
# There is no need to write a `analyze_preds` method since the default behavior (returning the output tensor) is what we need here. However `show_results` won't work properly unless the target (which we don't really care about here) has the right `reconstruct` method: the fastai library uses the `reconstruct` method of the target on the outputs. That's why we create another custom [`ItemList`](/data_block.html#ItemList) with just that `reconstruct` method. The first line is to reconstruct our dummy targets, and the second one is the same as in `ImageTupleList`.
class TargetTupleList(ItemList):
def reconstruct(self, t:Tensor):
if len(t.size()) == 0: return t
return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5))
# To make sure our `ImageTupleList` uses that for labelling, we pass it in `_label_cls` and this is what the result looks like.
class ImageTupleList(ImageItemList):
_label_cls=TargetTupleList
def __init__(self, items, itemsB=None, **kwargs):
self.itemsB = itemsB
super().__init__(items, **kwargs)
def new(self, items, **kwargs):
return super().new(items, itemsB=self.itemsB, **kwargs)
def get(self, i):
img1 = super().get(i)
fn = self.itemsB[random.randint(0, len(self.itemsB)-1)]
return ImageTuple(img1, open_image(fn))
def reconstruct(self, t:Tensor):
return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5))
@classmethod
def from_folders(cls, path, folderA, folderB, **kwargs):
itemsB = ImageItemList.from_folder(path/folderB).items
res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs)
res.path = path
return res
# Lastly, we want to customize the behavior of `show_batch` and `show_results`. Remember the `to_one` method just puts the two images next to each other.
def show_xys(self, xs, ys, figsize:Tuple[int,int]=(12,6), **kwargs):
"Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
rows = int(math.sqrt(len(xs)))
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
xs[i].to_one().show(ax=ax, **kwargs)
plt.tight_layout()
def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
"""Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
`kwargs` are passed to the show method."""
figsize = ifnone(figsize, (12,3*len(xs)))
fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
for i,(x,z) in enumerate(zip(xs,zs)):
x.to_one().show(ax=axs[i,0], **kwargs)
z.to_one().show(ax=axs[i,1], **kwargs)
| docs_src/tutorial.itemlist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import urllib3
import certifi
import json
from tqdm.notebook import tqdm
from time import sleep
# -
from requests import get
def open_url(url):
# header = {
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
# }
# header = {
# "cookie": "WMF-Last-Access=15-May-2021; WMF-Last-Access-Global=15-May-2021; GeoIP=CN:::34.77:113.72:v4; wikidatawikiwmE-sessionTickLastTickTime=1621085606340; wikidatawikiwmE-sessionTickTickCount=20; wikidatawikiel-sessionId=5a8c5eaa5cefaadc2b6c",
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
# }
# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
# response = http.request('GET', url, None, header)
# return response.data.decode('utf-8')
data = get(url)
data.encoding="utf-8"
return data.text
def log_json(js):
print(json.dumps(js,indent=' '))
def wikiSearch(item_name):
url = "https://www.wikidata.org/w/api.php?action=wbsearchentities&search=%s&language=en&limit=1&format=json"%(item_name)
data = json.loads(open_url(url))
return data["search"][0] if len(data["search"]) > 0 else None
# +
# get("https://www.wikidata.org/w/api.php?action=wbgetentities&ids=Q15026&format=json&languages=en").text
# -
def getEntityById(qid):
url = "https://www.wikidata.org/w/api.php?action=wbgetentities&ids=%s&format=json&languages=en"%(qid)
# print(url)
data = json.loads(open_url(url))
return data['entities'][qid]
def getPropertyById(pid):
url = "https://www.wikidata.org/wiki/Property:%s"%(pid)
data = json.loads(open_url(url))
return data
save_properties = {"P186": "made_from_material", "P279": "subclass_of", "P1552": "has_quality"}
# description
# alias
def get_item_from_wiki(item_name = "mug"):
found_item = wikiSearch(item_name)
# log_json(found_item)
sleep(0.98)
item_entity = getEntityById(found_item["id"])
log_json(item_entity)
export_relationships = {}
export_relationships["wiki_label"] = item_entity["labels"]["en"]["value"]
export_relationships["wiki_id"] = item_entity["id"]
export_relationships["description"] = item_entity["descriptions"]["en"]["value"]
export_relationships["aliases"] = [] if "en" not in "aliases" else [i["value"] for i in item_entity["aliases"]["en"]]
for propert in save_properties:
# print(propert)
# log_json(item_entity["claims"][propert])
export_relationships[save_properties[propert]] = {}
if propert not in item_entity["claims"]:
continue
for relationship in item_entity["claims"][propert]:
tmp_item = relationship["mainsnak"]["datavalue"]["value"]["id"]
export_relationships[save_properties[propert]][tmp_item] = wikiSearch(tmp_item)["label"]
log_json(export_relationships)
return export_relationships
from google_images_download import google_images_download
from os.path import join
response = google_images_download.googleimagesdownload()
def download_img_from_google(img_id,img_name, download_num, show_log=False):
response.download(arguments={"image_directory":img_id, "keywords":img_name, "limit": download_num, "silent_mode":(not show_log)})
item_from_wiki = get_item_from_wiki("bottle")
download_img_from_google(item_from_wiki["wiki_id"],
item_from_wiki["wiki_label"],
10,True
)
# +
# property_id2name = {}
# +
# for i in tqdm(item_entity["claims"]):
# if i in property_id2name:
# continue
# tmpEntity = getEntityById(i)
# property_id2name[i] = tmpEntity["labels"]["en"]["value"].replace(' ','_')
# sleep(2)
# +
# log_json(property_id2name)
# -
| SearchObjectFromWiki.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#run this everytime before scraping reviews for a place
import pandas as pd
import numpy as py
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import requests #not used as of now
from bs4 import BeautifulSoup
import time
lst = []
df=pd.read_excel('D:\MainPage1.xlsx')
df
lst2=[]
lst2=df['link']
lst3=[]
lst3=df['pages']
lst4=[]
lst4=df['name']
link=[]
pages=[]
placename=[]
namePlace={}
for i in range(0,328):
link.append(lst2[i])
pages.append(lst3[i])
placename.append(lst4[i])
def tes(i):
html = driver.page_source
#print(driver.current_url)
soup = BeautifulSoup(html, "lxml")
try:
body = list(soup.find('div', class_='ppr_rup ppr_priv_location_reviews_list calloutReviewList'))
rev = (body[0].find_all('div', class_='review-container'))
for r in rev:
r_title = r.find('div', class_='quote').a.span.text
rbody = r.find('div', class_='entry').p.text
rname = r.find('div', class_='username mo').span.text
rating = r.find('div', class_='rating reviewItemInline').span['class'][1]
dt = r.find('span', class_='ratingDate relativeDate')['title']
rv = {'title':r_title, 'body': rbody, 'name': rname, 'ratings': rating, 'date': dt,'place':placename[i]}
lst.append(rv)
except TypeError:
body = list(soup.find('div', class_='ppr_rup ppr_priv_location_reviews_list_resp'))
rev = (body[0].find_all('div', class_='review-container'))
for r in rev:
r_title = r.find('div', class_='quote').a.span.text
rbody = r.find('div', class_='entry').p.text
rating = r.find('div', class_='ui_column is-9').span['class'][1]
rname = r.find('div', class_='info_text').div.text
dt = r.find('span', class_='ratingDate')['title']
rv = {'title':r_title, 'body': rbody, 'name': rname, 'ratings': rating, 'date': dt,'place':placename[i]}
lst.append(rv)
#print(rbody)
for i in range(69,328):
page=requests.get(link[i])
nameurl=BeautifulSoup(page.content,'lxml')
url = link[i]
driver = webdriver.Firefox(executable_path = 'D:/ChromeDriver/geckodriver.exe')#driver path
driver.get(url)
for j in range(0, pages[i]):#total review pages to iterate(here total 84 pages)
b1 = driver.find_elements_by_xpath('//p/span[contains(., "More")]')
if len(b1) > 0:
b1[0].click()
time.sleep(1)
tes(i)
#time.sleep(1)
if pages[i]!=1:
b2 = driver.find_element_by_xpath('.//a[contains(text(), "Next")]')
b2.click()
time.sleep(1)
driver.close()
#a={'title':' ', 'body': ' ', 'name': ' ', 'ratings': ' ', 'date': ' '}
#lst.append(a)
df1 = pd.DataFrame(lst)
df1
df1.to_excel('D:\Testing.xlsx')
placename[0]
| Reviews/final_nb_for_reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#The first cell is just to align our markdown tables to the left vs. center
# + language="html"
# <style>
# table {float:left}
# </style>
# -
# # Python Dictionaries
# ## Student Notes
# ***
# ## Learning Objectives
# In this lesson you will:
#
# 1. Learn the fundamentals of dictionaries in Python
# 2. Work with dictionaries in Python
# 3. Access data that is stored in a dictionary data structure
# 4. Analyze data that is stored in dictionaries
#
# ## Modules covered in this lesson:
# >- `pprint`, used to "pretty print" a dictionary's values
#
# ## Links to topics and functions:
# >- <a id='Lists'></a>[Dictionary Notes](#Initial-Notes-on-Dictionaries)
# >- <a id='methods'></a>[Dictionary methods](#Methods)
# >- <a id='pretty'></a>[Pretty Print with pprint](#pprint)
# >- <a id='sort'></a>[Sorting Dictionaries](#Sorting)
# >- <a id='lambda1'></a>[lambda Function intro](#lambda)
# >- <a id='analytics'></a>[Analytics with Dictionaries](#Analytics-with-Dictionaries)
# >- <a id='markdown'></a>[Markdown Exec Summary](#Markdown)
# >>- This is a handy markdown install that allows us to create nicely formatted reports within jupyter
# >- <a id='HW'></a> [Homework](#Homework)
#
# ### References: Sweigart(2015, pp. 105-121)
# #### Don't forget about the Python visualizer tool: http://pythontutor.com/visualize.html#mode=display
#
# ## Dictionary Methods and New Functions covered in this lesson:
# |Dict Methods | Functions |
# |:-----------: |:----------:|
# |keys() | pprint() |
# |values() | pformat() |
# |items() | |
# |get() | |
# |setdefault() | |
#
# ### Narration videos:
#
# - https://youtu.be/tlk0GNA6JYA
# - https://youtu.be/uXR2nL3Tc14
# - https://youtu.be/IE9mFU69cDs
# - https://youtu.be/S4sefldpq9Q
# - https://youtu.be/PhQXwvXUhsU
# - https://youtu.be/5quaodwBRB8
# - https://youtu.be/qMxXeXn6KpU
# - https://youtu.be/XmTEeEQtSts
# + [markdown] slideshow={"slide_type": "slide"}
# # Initial Notes on Dictionaries
# >- Dictionaries offer us a way to store and organize data in Python programs much like a database
# >>- `List Definition`: a *dictionary* is a data structure that allows for storage of almost any data type for indexes
# >>- *Dictionaries* use a *key* vs an index as in lists to make *key-value* pairs
# >>- Unlike lists, the items are unordered meaning there is no "first" item like we see with a list at index 0.
# >>>- Because dictionaries are unordered we can't slice them like we do with lists
# >>- However, because we can use virtually any value as a key we have much more flexibility in how we can organize our data
# >>- The key-value pairs in a dictionary are similar to how databases are used to store and organize data
# >>- Dictionaries start with a `{` and end with a `}`
# >>- Dictionaries can be nested within other dictionaries
#
# # When do we typically use dictionaries?
# >- When you want to map (associate) some value to another
# >>- For example, states full name to abbreviation: states = {'Oregon': 'OR'}
# >>- Or customers of a company: customers = {'fName':'Micah','lName':'McGee', 'email':'<EMAIL>'}
# >- Dictionaries can be used when we need to "look up" a value ('Micah') from another value ('fName')
# >>- We can can think of dictionaries as "look up" tables
#
#
# ## What are the main difference between lists and dictionaries?
# >- A list is an ordered list of items that we can access and slice by the index numbers
# >- A dictionary is used for matching some items (keys) to other items (values)
#
# -
# #### Let's work through some examples to get familiar with dictionaries
# ### Another way to get values with the `get()` method
# ### What if we want to add a city key with a value to our customers dictionary?
#
# ### Can we add integer key values?
# ### Note: end of video 1
# # Methods
# ## Some common dictionary methods
# ### How can we print all the values in a dictionary?
# ### How can we print all the keys in a dictionary?
# ### How about printing out the `key:value` pairs?
# ### Another way to print out `key:value` pairs
# ### How do we check if a key or value is already in a dictionary?
# ### If a key in a dictionary doesn't have a value what can we do so we don't get error codes?
# >- The `setdefault()` method is used to set a default value for a key so that all keys will have a value
#
# ## An example of why using `setdefault()` comes in handy
# >- We will write a short program to count the number of occurrences for each letter in a given string
# #### Commented out code for the previous example
# +
#Define a string and put any thing in it
text = "I wonder how many times each letter comes up in this short text string"
#Define an empty dictionary to store our key (letter) and values (counts of letters)
count = {}
#Write for loop to iterate through our string and count the letters
#This for loop "builds" our count dictionary
for letter in text: #Here we are defining our key variable, letter
if letter != ' ': #This is here to exclude our spaces
count.setdefault(letter,0) #We will see what not having the default value does in the next example
count[letter] = count[letter] + 1
print(count)
# -
# #### And here is why we set a default value using `setdefault`
# >- Note the error code that is returned when we run the next cell
# ## Let's a look at the previous program in the visualizer tool
# http://pythontutor.com/visualize.html#mode=display
#
# ### Note: end of video 2
# # `pprint`
# ## Now, how do we get our dictionary of counted letters to print in an easier to read format?
# >- "Pretty" printing using the pprint module and its functions
# # Sorting
# ## We can sort dictionaries using the `sorted()` function
#
# >- The general syntax for `sorted()` is: sorted(*iterable*, key = *key*, reverse=*reverse*)
# where,
# >>- *iterable* is the sequence to sort: list, dictionary, tuple, etc.
# >>- *key* is optional and represents a function to execute which decides the order. Default is None
# >>- *reverse* is optional where False will sort ascending and True will sort descending. Default is False
#
# ### Sort by keys
# ### Sort by values using a `lambda` function in the *key* argument
# >- Here we will introduce `lambda` functions
# >- `lambda` functions are small anonymous functions which can take any number of arguments but can only have one expression
# >>- The general syntax is: lambda *arguments* : *expression*
# >- Usually lambda functions are used inside of other functions
# ### `lambda`
# #### Some quick examples using `lambda` functions
#
# 1. Using a lambda to add 10 to any number passed in
# 2. Using a lambda to multiple two numbers
# 3. Using a lambda to add three numbers
# ### Now back to our example of sorting a dictionary by the values
# #### Sort in descending order
# #### Note: the `sorted()` function did not change our dictionary in place
# >- If we want to store the sorted dictionary we would need to assign a new dictionary variable
# ### Note: end of video 3
# # Analytics with Dictionaries
# ### Let's do some analytics on our `count3` dictionary
# >- Q: How many unique letters were in our text3 string?
# >- Q: How many total letters were in our text3 string?
# >- Q: What is the average number of occurrences of letters in our text3 string?
#
# After answering these questions print out a message in a full sentences describing the results
# #### How many unique letters were in our `text3` string?
# #### How many total letters were in our `text3` string?
# #### What is the average number of occurrences of letters in the `text3` string?
# #### Good analytics never ends with simple output or tables but with a written report/statement
# >- So lets write a summary statement for our findings
# ### Note: End of video 4
# ## Dictionaries with lists embedded in them
# >- We will create a dictionary to store product prices
# >>- The general format of our dictionary will be record number (as the key)
# >>- The list will store product type, product brand, and price data
#
# #### What is the value of the 3rd item in the dictionary `products`?
# #### Why is a list the value returned from the previous cell?
# #### What is the value of the 6th item in the dictionary?
#
# #### How many total products are in the products dictionary? (pretend you can't count them manually)
#
# ### Q: How do we return values of a list that is embedded in a dictionary?
# #### What is the price of the 5th item in the dictionary?
# #### Return the price of the 3rd item in the dictionary
# #### Return the item type of the 4th item in products
# #### Return the brand of the 2nd item in products
# ### Now write out what was going on in the previous cells:
# 1. First, we list the dictionary name: `products`
# 2. Next, the first value in brackets refers to the key value to look up in `products`
# 3. Finally, the second value in brackets refers to the index number to look up in the embedded list
# >- On your own, write out what using the syntax `products[5][2]` tells Python to do
#
#
# ### What could our product dictionary look like in a database for a company?
#
# |prodID | prodType | prodBrand | prodPrice |
# |:-----------: |:----------:|:---------:|:----------|
# |1 | TV | TCL |200 |
# |2 | PC | HP |500 |
# |3 | TV | Visio |250 |
# |4 | Fridge | Samsung |1000 |
# |5 | TV | LG |850 |
# ### Note: End of video 5
# ## Let's do some analytics for the company that sells items from products
# ### First, analytics always starts with questions so let's write some
# 1. How many total products do we have?
# 2. Whats the total of all prices?
# 3. What is the average price all products?
# 4. What is the average price of TVs?
# #### How many total products do we have?
#
# #### What is the total of all prices?
# #### What is the average price of all products rounded to 2 decimals?
# #### To answer product specific questions like `Q4` we need to do a bit more
# >- Let's break that question into subquestions
# >>- How many total TVs are in products?
# >>- What is the total price of the TVs?
# >>- Then what is the average price of all TVs?
# #### First, how many total TVs are there?
# #### Next, what is the total price of all TVs?
# #### Now, we can find average price for all TVs?
# ## Ok, we got the answer in multiple steps but can we do this in one cell?
# >- Let's use the individual cells we used above to help us answer our question in one cell
# ### But we aren't done yet... analytics doesn't stop at simple output
# ### Note: End of video 6
# ## We could also create a TV only price list and then analyze the list data
# #### What is our max TV price?
# #### What is our average TV price?
# ## Our product pricing example in one code cell
# >- Run this code through the Python tutor to help see how the code works
# >- http://pythontutor.com/visualize.html#mode=display
# ### Note: End of video 7
# # Build a dictionary using a for loop
# ## Task: create a dictionary where,
# >- The keys are integers from 1-5
# >- The values are multiples of 10 starting at 10
# # Markdown
# ## A better way to print data using markdown cells
# >- Follow the steps below to install a module that will allow you to make nicely formatted summary reports
# ## We can describe and print our results in a better format using markdown cells
# To be able to do this we have to install some notebook extensions using the Anaconda shell
# 1. If you have installed Anaconda on your machine then...
# 2. Search for "Anaconda Powershell prompt"
# >- On Macs you would use your terminal
# 3. Open up the Anaconda Powershell and type the following commands
# >- `pip install jupyter_contrib_nbextensions`
# >- `jupyter contrib nbextension install --user`
# >- `jupyter nbextension enable python-markdown/main`
# 4. After that all installs on your machine, you will need to reload Anaconda and juptyer
# ### The next cell is a markdown cell that calls the variable values defined in this type-along
# >- To call the values for variables in a markdown cell use double curly braces,`{`{var}`}` around the variable name
# + [markdown] variables={"avgPrice": "560.0", "sumPrice": "2800", "totProds": "5", "tvAvg": "433.33", "tvCount": "3", "tvSum": "1300"}
# Hi boss, here is a summary of our products and TVs:
# >- {{totProds}} total products
# >- \${{sumPrice}} total price of products
# >- \${{avgPrice}} average price of products
# >- {{tvCount}} total TVs
# >- \${{tvSum}} total price of TVs
# >- \${{tvAvg}} average price of TVs
# -
# ### Note: end of video 8
# # Homework
#
# tbd
# <a id='top'></a>[TopPage](#Teaching-Notes)
| Week 6/Dictionaries_Type-Along_student.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Utility
import urllib
import os
import datetime
#Data Science
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from hmmlearn import hmm
#Google API
#import google.auth
#from google.cloud import bigquery
#from google.cloud import bigquery_storage_v1beta2
# +
#ACCOUNT IS SUSPENDED AT THE MOMENT ----------------------------------------------------
#Big query, requires API json
#pip install --upgrade google-cloud-bigquery[bqstorage,pandas]
#pip install --upgrade google-auth
# #%load_ext google.cloud.bigquery
#os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="Covid-19-be0adf05131c.json"
# #%%bigquery jhu_summary
#SELECT * FROM `bigquery-public-data.covid19_jhu_csse.summary`
#jhu_summary['country_region'].value_counts()
# -
# ## Mobility
# +
#Import mobility from url
url = 'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv'
mobility = pd.read_csv(url)
#Transform mobility to time series
startdate = datetime.datetime.strptime('2020-02-15','%Y-%m-%d').date()
enddate = datetime.datetime.strptime('2020-04-11','%Y-%m-%d').date()
filterlist = ['retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline']
clist = ['retail_rec_mobility','grocery_pharmacy_mobility','parks_mobility','transit_mobility','workplaces_mobility','residential_mobility']
# +
country = mobility['country_region'].value_counts()
countrylist = country.index.to_list()
usmobility = mobility[mobility['country_region']=='United States']
#Change column names
usmobility = usmobility.rename(columns={"sub_region_1":"Province_State", "sub_region_2": "Admin2"})
#Split county text out of county
countylist = []
txt = usmobility['Admin2'].to_list()
for i in txt:
i = str(i)
countylist.append(i.replace(' County', ''))
usmobility['Admin2'] = countylist
usmobility['Lookup'] = usmobility['Admin2'] + usmobility['Province_State'] + usmobility['date']
lookuplist = usmobility['Lookup'].to_list()
usmobility = usmobility.transpose()
usmobility.columns = lookuplist
usmobility = usmobility.transpose()
usmobility.loc[:,filterlist].astype(float)
usmobility = usmobility.loc[:,filterlist]
# -
# ## Cases from <NAME>
# + code_folding=[]
#Import confirmed cases from url
confirmed = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv")
deaths = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv")
recovered = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv")
# -
# +
df = confirmed.copy()
df.columns
xlist = ['UID', 'iso2', 'iso3', 'code3', 'FIPS','Country_Region', 'Lat', 'Long_','Combined_Key']
df['Lookup'] = df['Admin2'] + df['Province_State']
includelist = [x for x in df.columns if x not in xlist]
datelist = includelist.copy()
datelist.pop(0)
datelist.pop(0)
datelist.remove('Lookup')
df = df.loc[:,includelist]
lookuplist = df['Lookup'].to_list()
df = df.transpose()
df.columns = lookuplist
df = df.transpose()
lookuplist = df['Lookup'].value_counts().index.to_list()
lookuplist = sorted(lookuplist)
# -
# ## Contructing Main Dataframe
# +
#Construction of maindf
colnames = ['Lookup','State','County','Date','Confirmed','retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline']
maindf = pd.DataFrame(index=range(0,len(lookuplist)*len(datelist)), columns=colnames)
#Start loop for confirmed
j = 0
for i in lookuplist:
testdf = df.loc[i,:]
timeseries = testdf[2:-1]
date = timeseries.index.to_list()
timeseries = timeseries.to_list()
length = len(timeseries)
maindf.iloc[j:(j+length),colnames.index('Lookup')] = i
maindf.iloc[j:(j+length),colnames.index('State')] = testdf[1]
maindf.iloc[j:(j+length),colnames.index('County')] = testdf[0]
maindf.iloc[j:(j+length),colnames.index('Date')] = date
maindf.iloc[j:(j+length),colnames.index('Confirmed')] = timeseries
j = j+length
maindf.loc[:,'Date1'] = pd.to_datetime(maindf['Date'],format='%m/%d/%y')
#test = maindf['Date1'].astype(str).tolist()
maindf['Lookup'] = maindf['Lookup'] + maindf['Date1'].astype(str).tolist()
labels = maindf['Lookup'].to_list()
maindf = maindf.transpose()
maindf.columns = labels
maindf = maindf.transpose()
# -
maindf.loc[:, "Confirmed"].value_counts()
maindf.loc[:, "state-county"] = maindf.loc[:, "State"] + "-" + maindf.loc[:, "County"]
maindf.to_csv("maindf.csv", index = False)
| starter_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../data/external/googleplaystore.csv')
data
## Converting the String Installs to number by removing unwanted characters
data.Installs = data['Installs'].map(lambda x: x.rstrip('+').replace(',','').replace('Free', '0'))
data.Installs = data.Installs.astype(int)
data.sample(5)
# #### The null values present in the data set are
# +
data.isnull().sum()
# -
# Since the count of ratings which are null are high and some of them greater than 5 lets remove them
data = data[(~data.Rating.isnull())]
data = data[data.Rating < 5]
data.Rating.describe()
data.info()
data.drop_duplicates()
data.shape
# +
data=data.drop(["Last Updated","Android Ver","Current Ver","Type","Size",],1)
# +
data.info()
# -
data.Rating.describe()
data.shape
data.info()
data2 = pd.read_csv('../data/external/googleplaystore_user_reviews.csv')
data2.sample(10)
data2.info()
data2.dropna(thresh=2, inplace=True)
data2.reset_index(drop=True, inplace=True)
data2.sample(10)
data3=pd.merge(data, data2, on='App')
data3.sample(10)
ss = data3.where( data3.Category == 'ART_AND_DESIGN')
ss.Sentiment.value_counts()['Positive']
# # Analysis
# ### Which apps got highest installs ?
data.sort_values(by='Installs', ascending=False).head(5)
# ### How many apps fall under different content ratings
data.groupby(['Content Rating']).size()
# ## Graph Analysis
# ### Sentiments vs Catagory
#
# wecould be able to see the relative sentiment of users on a given category of apps by using this.
# +
## Data modification for sentiment vs Category
finalFrame = pd.DataFrame(columns=['Category', 'Sentiment', 'Count'])
for eachCategory in data3['Category'].unique():
tempRow = dict()
tempRow['Category'] = eachCategory
for sent in ['Neutral', 'Positive', 'Negative']:
dfOfCategory = data3.where( data3.Category == eachCategory)
tempRow['Sentiment'] = sent
tempRow['Count'] = dfOfCategory.Sentiment.value_counts()[sent]
finalFrame = finalFrame.append(tempRow, ignore_index = True)
finalFrame.sample(10)
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(20,9))
finalFrame.Count = finalFrame.Count.astype(float)
g = sns.barplot(x=finalFrame.Category, y=finalFrame.Count, hue=finalFrame.Sentiment)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
g
# +
## Category: Game is dominating the graph lets try by removing that out
finalFrameWithoutGame = finalFrame.drop([54,55,56])
plt.figure(figsize=(15,9))
g = sns.barplot(x=finalFrameWithoutGame.Category, y=finalFrameWithoutGame.Count, hue=finalFrameWithoutGame.Sentiment)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
g
# -
# ### Ratings Vs Installs
# +
graphFrame = pd.DataFrame(columns=['Range', 'AverageInstalls'])
for i in range(0, 5):
addRow = dict()
addRow['Range'] = str(i) + ' - ' + str(i+1)
tempData = data3[data3.Rating > i]
tempData = tempData[tempData.Rating < (i+1)]
avg = tempData.Installs.mean()
addRow['AverageInstalls'] = avg
graphFrame = graphFrame.append(addRow, ignore_index =True)
graphFrame
sns.barplot(x=graphFrame.Range, y=graphFrame.AverageInstalls)
# -
# ### Rating vs category
plt.figure(figsize=(15,9))
g = sns.boxplot(x=data3.Category, y=data3.Rating)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
g
# ### How many apps in each Category ??
plt.figure(figsize=(15,9))
g = sns.countplot(x='Category', data=data3, palette='Paired')
g.set_xticklabels(g.get_xticklabels(), rotation=90)
g
corr = data3.corr()
# plot the heatmap
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns)
data3.dtypes
# +
plt.figure(figsize=(15,9))
j = sns.scatterplot(data3['Sentiment_Polarity'],data3['Sentiment_Subjectivity'],hue=data3['Sentiment'], edgecolor='red',palette="Paired")
plt.xlabel('Sentiment Polarity of Reviewers', fontsize=20)
plt.ylabel('Sentiment Subjectivity of Reviewers', fontsize=20)
plt.title("Sentiment Analysis of Reviewers", fontsize=20)
plt.show()
# -
| notebooks/playstore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python388jvsc74a57bd0635cacf90e2ebd933d69347cf5f0e366ac0738a02becbb069b8cf9e68d978af5
# ---
# +
from PIL import Image
import numpy as np
import cv2
def convert_to_thresh(img):
gray = cv2.cvtColor(np.array(img.convert('RGB'))[:, :, ::-1].copy(), cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
thresh[thresh == 255] = 1
return thresh
class MyImage:
def __init__(self, src):
self.raw = Image.open(src)
self.thresh = convert_to_thresh(self.raw)
self.height, self.width = self.thresh.shape
(_, _, self.ccs, _) = cv2.connectedComponentsWithStats(self.thresh, 8, cv2.CV_32S)
assert len(self.ccs) == 2
self.neume_loc = self.ccs[0]
if self.ccs[1][-1] < self.neume_loc[-1]:
self.neume_loc = self.ccs[1]
(self.x, self.y, self.w, self.h, self.a) = self.neume_loc
self.neume = self.thresh[self.y : self.y + self.h, self.x : self.x + self.w]
def resize_neume(self, other):
rh = other.h / self.h
rw = other.w / self.w
print(f'rh={rh}, rw={rw}, self.a={self.a}, other.a={other.a}')
self.resized_a = self.a * rh * rw
self.resized = cv2.resize(self.neume, other.neume.shape[::-1], interpolation = cv2.INTER_AREA)
def compute_similarity(self, other):
from scipy import signal
# return np.sum(self.resized * other.neume) / max(other.a
def compute_hist_loss(a):
h1 = np.sum(self.resized, axis=a)
h2 = np.sum(other.neume, axis=a)
return np.mean(np.square(h1 - h2))
return compute_hist_loss(0) + compute_hist_loss(1)
new_img = signal.fftconvolve(self.resized, other.neume, mode='valid')
print(f'shape={new_img.shape}')
print(f'self.resized_a={self.resized_a}, other.a={other.a}')
print(f'value={new_img[0][0]}')
return new_img[0][0] / max(self.resized_a, other.a)
def plot_neume(self):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Display the image
x0, x1 = self.x, self.x + self.w
y0, y1 = self.y, self.y + self.h
# Create figure and axes
fig, ax = plt.subplots(figsize=(self.h / 10, self.w / 10))
ax.imshow(self.thresh[y0 : y1, x0 : x1])
plt.show()
def plot_resized_neume(self):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Create figure and axes
print(f'shape={self.resized.shape}')
fig, ax = plt.subplots(figsize=(self.resized.shape[0] / 10, self.resized.shape[1] / 10))
ax.imshow(self.resized)
plt.show()
def plot_ccs(self):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Create figure and axes
fig, ax = plt.subplots(figsize=(self.height / 10, self.width / 10))
# Display the image
ax.imshow(self.raw)
# Create a Rectangle patch
for index, (x, y, w, h, a) in enumerate(self.ccs):
rect = patches.Rectangle((x, y), w, h, linewidth=1, edgecolor='r', facecolor='none', label=f'{index}')
ax.add_patch(rect)
# rx, ry = rect.get_xy()
# cx = rx + rect.get_width() / 2.0
# cy = ry + rect.get_height() / 2.0
# ax.annotate(f'{index}', (cx, cy), color='green', weight='bold', fontsize=16, ha='center', va='center')
plt.show()
img1 = MyImage('neumes/psiphiston/psiphiston_1.png')
img2 = MyImage('neumes/petasti/petasti_1.png')
# -
img1.plot_neume()
img2.plot_neume()
img2.resize_neume(img1)
img2.plot_resized_neume()
# import matplotlib.pyplot as plt
# from scipy import signal
# print(resized.shape)
# print(img1[:,:,np.newaxis].shape)
# new_img = signal.fftconvolve(resized, img1, mode='same')
# index = np.unravel_index(np.argmax(new_img), new_img.shape)
# print(f'index={index}')
# plt.figure()
# plt.imshow(new_img)
print(img2.compute_similarity(img1))
| Resize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
header_click = ['Session ID','Timestamp','Item ID','Category']
header_buys = ['Session ID','Timestamp','Item ID','Price','Quantity']
buys_df = pd.read_csv('./yoochoose-data/yoochoose-buys.dat',names=header_buys)
buys_df.head()
click_df = pd.read_csv('./yoochoose-data/yoochoose-clicks.dat',names=header_click)
click_df.head()
test_df = pd.read_csv('./yoochoose-data/yoochoose-test.dat',names=header_click)
test_df.head()
click_df.Category.value_countscounts()
| Recomend/Untitled.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.0
# language: julia
# name: julia-1.3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Factor Graphs
# + [markdown] slideshow={"slide_type": "slide"}
#
# ### Preliminaries
#
# - Goal
# - Introduction to Forney-style factor graphs and message passing-based inference
# - Materials
# - Mandatory
# - These lecture notes
# - Loeliger (2007), [The factor graph approach to model based signal processing](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Loeliger-2007-The-factor-graph-approach-to-model-based-signal-processing.pdf), pp. 1295-1302 (until section V)
# - Optional
# - <NAME> (2015), [Probabilistic graphical models: Factor graphs and more](https://www.youtube.com/watch?v=Fv2YbVg9Frc&t=31) video lecture (**highly recommended**)
# - References
# - Forney (2001), [Codes on graphs: normal realizations](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Forney-2001-Codes-on-graphs-normal-realizations.pdf)
# - Zhang et al. (2017), [Unifying Message Passing Algorithms Under the Framework of Constrained Bethe Free Energy Minimization](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Zhang-2017-Unifying-Message-Passing-Algorithms.pdf)
# - Dauwels (2007), [On variational message passing on factor graphs](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Dauwels-2007-on-variational-message-passing-on-factor-graphs)
# - Caticha (2010), [Entropic Inference](https://arxiv.org/abs/1011.0723)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Why Factor Graphs?
#
# - A probabilistic inference task gets its computational load mainly through the need for marginalization (i.e., computing integrals). E.g., for a model $p(x_1,x_2,x_3,x_4,x_5)$, the inference task $p(x_2|x_3)$ is given by
#
# $$\begin{align*}
# p(x_2|x_3) = \frac{\idotsint p(x_1,x_2,x_3,x_4,x_5) \, \mathrm{d}x_1 \mathrm{d}x_4 \mathrm{d}x_5}{\idotsint p(x_1,x_2,x_3,x_4,x_5) \, \mathrm{d}x_1 \mathrm{d}x_2 \mathrm{d}x_4 \mathrm{d}x_5}
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Since these computations suffer from the "curse of dimensionality", we often need to solve a simpler problem in order to get an answer.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Factor graphs provide a computationally efficient approach to solving inference problems **if the generative distribution can be factorized**.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Factorization helps. For instance, if $p(x_1,x_2,x_3,x_4,x_5) = p(x_1)p(x_2,x_3)p(x_4)p(x_5|x_4)$, then
#
# $$\begin{align*}
# p(x_2|x_3) &= \frac{\idotsint p(x_1)p(x_2,x_3)p(x_4)p(x_5|x_4) \, \mathrm{d}x_1 \mathrm{d}x_4 \mathrm{d}x_5}{\idotsint p(x_1)p(x_2,x_3)p(x_4)p(x_5|x_4) \, \mathrm{d}x_1 \mathrm{d}x_2 \mathrm{d}x_4 \mathrm{d}x_5}
# = \frac{p(x_2,x_3)}{\int p(x_2,x_3) \mathrm{d}x_2}
# \end{align*}$$
#
# which is computationally much cheaper than the general case above.
# + [markdown] slideshow={"slide_type": "fragment"}
# - In this lesson, we discuss how computationally efficient inference in *factorized* probability distributions can be automated by message passing-based inference in factor graphs.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Factor Graph Construction Rules
#
# - Consider a function
# $$
# f(x_1,x_2,x_3,x_4,x_5) = f_a(x_1,x_2,x_3) \cdot f_b(x_3,x_4,x_5) \cdot f_c(x_4)
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - The factorization of this function can be graphically represented by a **Forney-style Factor Graph** (FFG):
#
# <img src="./figures/ffg-example-1.png" width="400px">
# + [markdown] slideshow={"slide_type": "fragment"}
# - An FFG is an **undirected** graph subject to the following construction rules ([Forney, 2001](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Forney-2001-Codes-on-graphs-normal-realizations.pdf))
#
# 1. A **node** for every factor;
# 1. An **edge** (or **half-edge**) for every variable;
# 1. Node $g$ is connected to edge $x$ **iff** variable $x$ appears in factor $g$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Some FFG Terminology
#
# - $f$ is called the **global function** and $f_\bullet$ are the **factors**.
#
# - A **configuration** is an assigment of values to all variables.
#
# - The **configuration space** is the set of all configurations, i.e., the domain of $f$
#
# - A configuration $\omega=(x_1,x_2,x_3,x_4,x_5)$ is said to be **valid** iff $f(\omega) \neq 0$
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Equality Nodes for Branching Points
#
#
# - Note that a variable can appear in maximally two factors in an FFG (since an edge has only two end points).
# + [markdown] slideshow={"slide_type": "fragment"}
# - Consider the factorization (where $x_2$ appears in three factors)
#
# $$
# f(x_1,x_2,x_3,x_4) = f_a(x_1,x_2)\cdot f_b(x_2,x_3) \cdot f_c(x_2,x_4)
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - For the factor graph representation, we will instead consider the function $g$, defined as
# $$\begin{align*}
# g(x_1,x_2&,x_2^\prime,x_2^{\prime\prime},x_3,x_4)
# = f_a(x_1,x_2)\cdot f_b(x_2^\prime,x_3) \cdot f_c(x_2^{\prime\prime},x_4) \cdot f_=(x_2,x_2^\prime,x_2^{\prime\prime})
# \end{align*}$$
# where
# $$
# f_=(x_2,x_2^\prime,x_2^{\prime\prime}) \triangleq \delta(x_2-x_2^\prime)\, \delta(x_2-x_2^{\prime\prime})
# $$
#
# <img src="./figures/ffg-wEquality-node.png" width="400px">
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Equality Nodes for Branching Points, cont'd
#
# - Note that through introduction of auxiliary variables $X_2^\prime$ and $X_2^{\prime\prime}$ and a factor $f_=(x_2,x_2^\prime,x_2^{\prime\prime})$ each variable in $g$ appears in maximally two factors.
# + [markdown] slideshow={"slide_type": "fragment"}
# - The constraint $f_=(x,x^\prime,x^{\prime\prime})$ enforces that $X=X^\prime=X^{\prime\prime}$ **for every valid configuration**.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Since $f$ is a marginal of $g$, i.e.,
# $$
# f(x_1,x_2,x_3,x_4) = \iint g(x_1,x_2,x_2^\prime,x_2^{\prime\prime},x_3,x_4)\, \mathrm{d}x_2^\prime \mathrm{d}x_2^{\prime\prime}
# $$
# it follows that any inference problem on $f$ can be executed by a corresponding inference problem on $g$, e.g.,
# $$\begin{align*}
# f(x_1 \mid x_2) &\triangleq \frac{\iint f(x_1,x_2,x_3,x_4) \,\mathrm{d}x_3 \mathrm{d}x_4 }{ \idotsint f(x_1,x_2,x_3,x_4) \,\mathrm{d}x_1 \mathrm{d}x_3 \mathrm{d}x_4} \\
# &= \frac{\idotsint g(x_1,x_2,x_2^\prime,x_2^{\prime\prime},x_3,x_4) \,\mathrm{d}x_2^\prime \mathrm{d}x_2^{\prime\prime} \mathrm{d}x_3 \mathrm{d}x_4 }{ \idotsint g(x_1,x_2,x_2^\prime,x_2^{\prime\prime},x_3,x_4) \,\mathrm{d}x_1 \mathrm{d}x_2^\prime \mathrm{d}x_2^{\prime\prime} \mathrm{d}x_3 \mathrm{d}x_4} \\
# &= g(x_1 \mid x_2)
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - $\Rightarrow$ **Any factorization of a global function $f$ can be represented by a Forney-style Factor Graph**.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Probabilistic Models as Factor Graphs
#
# - FFGs can be used to express conditional independence (factorization) in probabilistic models.
# + [markdown] slideshow={"slide_type": "fragment"}
# - For example, the (previously shown) graph for
# $f_a(x_1,x_2,x_3) \cdot f_b(x_3,x_4,x_5) \cdot f_c(x_4)$
# could represent the probabilistic model
# $$
# p(x_1,x_2,x_3,x_4,x_5) = p(x_1,x_2|x_3) \cdot p(x_3,x_5|x_4) \cdot p(x_4)
# $$
# where we identify
# $$\begin{align*}
# f_a(x_1,x_2,x_3) &= p(x_1,x_2|x_3) \\
# f_b(x_3,x_4,x_5) &= p(x_3,x_5|x_4) \\
# f_c(x_4) &= p(x_4)
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - This is the graph
# <img src="./figures/ffg-example-prob-model.png" width="400px">
# + [markdown] slideshow={"slide_type": "slide"}
# ### Inference by Closing Boxes
#
# - Factorizations provide opportunities to cut on the amount of needed computations when doing inference. In what follows, we will use FFGs to process these opportunities in an automatic way by message passing between the nodes of the graph.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Assume we wish to compute the marginal
# $$
# \bar{f}(x_3) = \sum_{x_1,x_2,x_4,x_5,x_6,x_7}f(x_1,x_2,\ldots,x_7)
# $$
# where $f$ is factorized as given by the following FFG (we will discuss the usage of directed edges below).
#
# <img src="./figures/ffg-message-passing-1.png" width="500px">
# + [markdown] slideshow={"slide_type": "subslide"}
# - Due to the factorization, we can decompose this sum by the **distributive law** as
# $$\begin{align*}
# \bar{f}(x_3) = & \underbrace{ \left( \sum_{x_1,x_2} f_a(x_1)\,f_b(x_2)\,f_c(x_1,x_2,x_3)\right) }_{\overrightarrow{\mu}_{X_3}(x_3)} \\
# & \underbrace{ \cdot\left( \sum_{x_4,x_5} f_d(x_4)\,f_e(x_3,x_4,x_5) \cdot \underbrace{ \left( \sum_{x_6,x_7} f_f(x_5,x_6,x_7)\,f_g(x_7)\right) }_{\overleftarrow{\mu}_{X_5}(x_5)} \right) }_{\overleftarrow{\mu}_{X_3}(x_3)}
# \end{align*}$$
# which is computationally (much) lighter than executing the full sum $\sum_{x_1,\ldots,x_7}f(x_1,x_2,\ldots,x_7)$
#
# <img src="./figures/ffg-message-passing-2.png" width="600px">
# + [markdown] slideshow={"slide_type": "fragment"}
# - Note that $\overleftarrow{\mu}_{X_5}(x_5)$ is obtained by multiplying all enclosed factors ($f_f$, $f_g$) by the green dashed box, followed by marginalization over all enclosed variables ($x_6$, $x_7$).
# + [markdown] slideshow={"slide_type": "fragment"}
# - This is the **Closing the Box**-rule, which is a general recipe for marginalization of hidden variables and leads to a new factor with outgoing (sum-product) **message**
# $$ \mu_{\text{SP}} = \sum_{ \stackrel{ \textrm{enclosed} }{ \textrm{variables} } } \;\prod_{\stackrel{ \textrm{enclosed} }{ \textrm{factors} }}
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# - Crucially, all message update rules can be computed from information that is **locally available** at each node.
# + [markdown] slideshow={"slide_type": "fragment"}
# - We drew _directed edges_ in the FFG in order to distinguish forward messages $\overrightarrow{\mu}_\bullet(\cdot)$ (in the same direction as the arrow of the edge) from backward messages $\overleftarrow{\mu}_\bullet(\cdot)$ (in opposite direction). This is just a notational convenience since an FFG is computationally an undirected graph.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Evaluating the Closing-the-Box Rule for Individual Nodes
#
# - Terminal nodes can be used to represent observations, e.g., use a factor $f(𝑦)=𝛿(𝑦−3)$ to terminate the edge for variable $𝑌$ if $𝑦=3$ is observed.
#
# - The message out of a terminal node is the factor itself. For instance, closing a box around the **terminal nodes** leads to $\overrightarrow{\mu}_{X_1}(x_1) \triangleq f_a(x_1)$, $\overrightarrow{\mu}_{X_2}(x_2) \triangleq f_b(x_2)$ etc.
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# - The messages from **internal nodes** evaluate to:
# $$\begin{align*}
# \overrightarrow{\mu}_{X_3}(x_3) &= \sum_{x_1,x_2} f_a(x_1) \,f_b(x_2) \,f_c(x_1,x_2,x_3) \\
# &= \sum_{x_1,x_2} \overrightarrow{\mu}_{X_1}(x_1) \overrightarrow{\mu}_{X_2}(x_2) \,f_c(x_1,x_2,x_3) \\
# \overleftarrow{\mu}_{X_5}(x_5) &= \sum_{x_6,x_7} f_f(x_5,x_6,x_7)\,f_g(x_7) \\
# &= \sum_{x_6,x_7} \overrightarrow{\mu}_{X_7}(x_7)\, f_f(x_5,x_6,x_7) \\
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sum-Product Algorithm
#
# - This recursive pattern for computing messages applies generally and is called the **Sum-Product update rule**, which is really just a special case of the closing-the-box rule: For any node, the outgoing message is obtained by taking the product of all incoming messages and the node function, followed by summing out (marginalization) all incoming variables. What is left (the outgoing message) is a function of the outgoing variable only ([Loeliger (2007), pg.1299](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Loeliger-2007-The-factor-graph-approach-to-model-based-signal-processing.pdf)):
#
# $$ \boxed{
# \overrightarrow{\mu}_{Y}(y) = \sum_{x_1,\ldots,x_n} \overrightarrow{\mu}_{X_1}(x_1)\cdots \overrightarrow{\mu}_{X_n}(x_n) \,f(y,x_1,\ldots,x_n) }
# $$
#
# <img src="./figures/ffg-sum-product.png" width="400px">
# + [markdown] slideshow={"slide_type": "fragment"}
# - If the factor graph for a function $f$ has **no cycles**, then the marginal $\bar{f}(x_3) = \sum_{x_1,x_2,x_4,x_5,x_6,x_7}f(x_1,x_2,\ldots,x_7)$ is given by the **Sum-Product Theorem**:
#
# $$
# \bar{f}(x_3) = \overrightarrow{\mu}_{X_3}(x_3)\cdot \overleftarrow{\mu}_{X_3}(x_3)
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# - It follows that the marginal $\bar{f}(x_3) = \sum_{x_1,x_2,x_4,x_5,x_6,x_7}f(x_1,x_2,\ldots,x_7)$ can be efficiently computed through sum-product messages. Executing inference through SP message passing is called the **Sum-Product Algorithm**.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Example: Bayesian Linear Regression by Message Passing
#
# - Recall: the goal of regression is to estimate an unknown function from a set of (noisy) function values.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Assume we want to estimate some function $f: \mathbb{R}^D \rightarrow \mathbb{R}$ from data set $D = \{(x_1,y_1), \ldots, (x_N,y_N)\}$, where $y_i = f(x_i) + \epsilon_i$.
# + [markdown] slideshow={"slide_type": "fragment"}
# ##### model specification
#
# - We will assume a linear model with white Gaussian noise and a Gaussian prior on the coefficients $w$:
# $$\begin{align*}
# y_i &= w^T x_i + \epsilon_i \\
# \epsilon_i &\sim \mathcal{N}(0, \sigma^2) \\
# w &\sim \mathcal{N}(0,\Sigma)
# \end{align*}$$
# or equivalently
# $$\begin{align*}
# p(D,w) &= \overbrace{p(w)}^{\text{weight prior}} \prod_{i=1}^N \overbrace{p(y_i\,|\,x_i,w,\epsilon_i)}^{\text{regression model}} \overbrace{p(\epsilon_i)}^{\text{noise model}} \\
# &= \mathcal{N}(w\,|\,0,\Sigma) \prod_{i=1}^N \delta(y_i - w^T x_i - \epsilon_i) \mathcal{N}(\epsilon_i\,|\,0,\sigma^2)
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# ##### Inference
#
# - We are interested in inferring the posterior $p(w|D)$. We will execute inference by message passing on the FFG for the model.
# + [markdown] slideshow={"slide_type": "subslide"}
# - The left figure shows the factor graph for this model.
# - The right figure shows the message passing scheme. Terminal nodes that carry observations are denoted by small black boxes.
#
# <img src="./figures/ffg-bayesian-linear-regression.png" width="500px">
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### CODE EXAMPLE
#
# Let's solve this problem by message passing-based inference with Julia's FFG toolbox [ForneyLab](http://forneylab.org).
# + slideshow={"slide_type": "subslide"}
using Pkg;Pkg.activate("probprog/workspace/");Pkg.instantiate();
IJulia.clear_output();
# + slideshow={"slide_type": "subslide"}
using PyPlot, ForneyLab, LinearAlgebra
# Parameters
Σ = 1e5 * Diagonal(I,3) # Covariance matrix of prior on w
σ2 = 2.0 # Noise variance
# Generate data set
w = [1.0; 2.0; 0.25]
N = 30
z = 10.0*rand(N)
x_train = [[1.0; z; z^2] for z in z] # Feature vector x = [1.0; z; z^2]
f(x) = (w'*x)[1]
y_train = map(f, x_train) + sqrt(σ2)*randn(N) # y[i] = w' * x[i] + ϵ
scatter(z, y_train); xlabel(L"z"); ylabel(L"f([1.0, z, z^2]) + \epsilon")
# + [markdown] slideshow={"slide_type": "slide"}
#
# Now build the factor graph in ForneyLab, perform sum-product message passing and plot results (mean of posterior).
# + slideshow={"slide_type": "subslide"}
# Build factorgraph
fg = FactorGraph()
@RV w ~ GaussianMeanVariance(constant(zeros(3)), constant(Σ, id=:Σ), id=:w) # p(w)
for t=1:N
x_t = Variable(id=:x_*t)
d_t = Variable(id=:d_*t) # d=w'*x
DotProduct(d_t, x_t, w) # p(f|w,x)
@RV y_t ~ GaussianMeanVariance(d_t, constant(σ2, id=:σ2_*t), id=:y_*t) # p(y|d)
placeholder(x_t, :x, index=t, dims=(3,))
placeholder(y_t, :y, index=t);
end
# Build and run message passing algorithm
eval(Meta.parse(sumProductAlgorithm(w)))
data = Dict(:x => x_train, :y => y_train)
w_posterior_dist = step!(data)[:w]
# Plot result
println("Posterior distribution of w: $(w_posterior_dist)")
scatter(z, y_train); xlabel(L"z"); ylabel(L"f([1.0, z, z^2]) + \epsilon");
z_test = collect(0:0.2:12)
x_test = [[1.0; z; z^2] for z in z_test]
for sample=1:10
w = ForneyLab.sample(w_posterior_dist)
f_est(x) = (w'*x)[1]
plot(z_test, map(f_est, x_test), "k-", alpha=0.3);
end
# + [markdown] slideshow={"slide_type": "slide"}
# ### Other Message Passing Algorithms
#
# - The Sum-Product (SP) update rule implements perfect Bayesian inference.
# - Sometimes, the SP update rule is not analytically solvable.
# - Fortunately, for many well-known Bayesian approximation methods, a message passing update rule can be created, e.g. [Variational Message Passing](https://en.wikipedia.org/wiki/Variational_message_passing) (VMP) for variational inference.
# - In general, all of these message passing algorithms can be interpreted as minimization of a constrained free energy (e.g., see [Zhang et al. (2017)](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Zhang-2017-Unifying-Message-Passing-Algorithms.pdf), and hence these message passing schemes comply with [Caticha's Method of Maximum Relative Entropy](https://arxiv.org/abs/1011.0723), which, as discussed in the [variational Bayes lesson](https://nbviewer.jupyter.org/github/bertdv/BMLIP/blob/master/lessons/notebooks/09-Latent-Variable-Models-and-VB.ipynb) is the proper way for updating beliefs.
# - Different message passing updates rules can be combined to get a hybrid inference method in one model.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Summary
#
# - The foregoing message update rules can be worked out in closed-form and put into tables (e.g., see Tables 1 through 6 in [Loeliger, 2007](./files/Loeliger-2007-The-factor-graph-approach-to-model-based-signal-processing.pdf) for many standard factors such as additions, fixed-gain multiplications and branching (equality nodes), thus creating a completely **automatable inference framework**.
# + [markdown] slideshow={"slide_type": "fragment"}
# - If the update rules for all node types in a graph have been tabulated, then inference by message passing comes down to executing a set of table-lookup operations. This also works for large graphs (where 'manual' inference becomes intractable).
# + [markdown] slideshow={"slide_type": "fragment"}
# - If the graph contains no cycles, the Sum-Product Algorithm computes **exact** marginals for all hidden variables.
# + [markdown] slideshow={"slide_type": "fragment"}
# - If the graph contains cycles, we have in principle an infinite tree without terminals. In this case, the SP Algorithm is not guaranteed to find exact marginals. In practice, if we apply the SP algorithm for just a few iterations we often find satisfying approximate marginals.
# + [markdown] slideshow={"slide_type": "slide"}
# ## <center>OPTIONAL SLIDES</center>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sum-Product Messages for the Equality Node
#
# - Let´s compute the SP messages for the **equality node** $f_=(x,y,z) = \delta(z-x)\delta(z-y)$:
#
# <img src="./figures/ffg-equality-node.png" width="200px">
# + [markdown] slideshow={"slide_type": "fragment"}
# $$\begin{align*}
# \overrightarrow{\mu}_{Z}(z) &= \iint \overrightarrow{\mu}_{X}(x) \overrightarrow{\mu}_{Y}(y) \,\delta(z-x)\delta(z-y) \,\mathrm{d}x \mathrm{d}y \\
# &= \overrightarrow{\mu}_{X}(z) \int \overrightarrow{\mu}_{Y}(y) \,\delta(z-y) \,\mathrm{d}y \\
# &= \overrightarrow{\mu}_{X}(z) \overrightarrow{\mu}_{Y}(z)
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - By symmetry, this also implies (for the same equality node) that
#
# $$\begin{align*}
# \overleftarrow{\mu}_{X}(x) &= \overrightarrow{\mu}_{Y}(x) \overleftarrow{\mu}_{Z}(x) \quad \text{and} \\
# \overleftarrow{\mu}_{Y}(y) &= \overrightarrow{\mu}_{X}(y) \overleftarrow{\mu}_{Z}(y)\,.
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "subslide"}
# - Let us now consider the case of Gaussian messages $\overrightarrow{\mu}_{X}(x) = \mathcal{N}(\overrightarrow{m}_X,\overrightarrow{V}_X)$, $\overrightarrow{\mu}_{Y}(y) = \mathcal{N}(\overrightarrow{m}_Y,\overrightarrow{V}_Y)$ and $\overrightarrow{\mu}_{Z}(z) = \mathcal{N}(\overrightarrow{m}_Z,\overrightarrow{V}_Z)$. Let´s also define the precision matrices $\overrightarrow{W}_X \triangleq \overrightarrow{V}_X^{-1}$ and similarly for $Y$ and $Z$. Then applying the SP update rule leads to multiplication of two Gaussian distributions, resulting in
#
# $$\begin{align*}
# \overrightarrow{W}_Z &= \overrightarrow{W}_X + \overrightarrow{W}_Y \\
# \overrightarrow{W}_Z \overrightarrow{m}_z &= \overrightarrow{W}_X \overrightarrow{m}_X + \overrightarrow{W}_Y \overrightarrow{m}_Y
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - It follows that **message passing through an equality node is similar to applying Bayes rule**, i.e., fusion of two information sources. Does this make sense?
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sum-Product Messages for the Addition Node
#
# - Next, consider an **addition node** $f_+(x,y,z) = \delta(z-x-y)$:
# <img src="./figures/ffg-addition-node.png" width="200px">
# + [markdown] slideshow={"slide_type": "fragment"}
# $$\begin{align*}
# \overrightarrow{\mu}_{Z}(z) &= \iint \overrightarrow{\mu}_{X}(x) \overrightarrow{\mu}_{Y}(y) \,\delta(z-x-y) \,\mathrm{d}x \mathrm{d}y \\
# &= \int \overrightarrow{\mu}_{X}(x) \overrightarrow{\mu}_{Y}(z-x) \,\mathrm{d}x \,,
# \end{align*}$$
# i.e., $\overrightarrow{\mu}_{Z}$ is the convolution of the messages $\overrightarrow{\mu}_{X}$ and $\overrightarrow{\mu}_{Y}$.
# + [markdown] slideshow={"slide_type": "fragment"}
# - Of course, for Gaussian messages, these update rules evaluate to
#
# $$\begin{align*}
# \overrightarrow{m}_Z = \overrightarrow{m}_X + \overrightarrow{m}_Y \,,\,\text{and}\,\,\overrightarrow{V}_z = \overrightarrow{V}_X + \overrightarrow{V}_Y \,.
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - <div class="exercise"><b>Exercise</b>: For the same summation node, work out the SP update rule for the *backward* message $\overleftarrow{\mu}_{X}(x)$ as a function of $\overrightarrow{\mu}_{Y}(y)$ and $\overleftarrow{\mu}_{Z}(z)$? And further refine the answer for Gaussian messages. </div>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sum-Product Messages for Multiplication Nodes
# - Next, let us consider a **multiplication** by a fixed (invertible matrix) gain $f_A(x,y) = \delta(y-Ax)$
#
# <img src="./figures/ffg-gain-node.png" width="200px">
# + [markdown] slideshow={"slide_type": "fragment"}
# $$\begin{align*}
# \overrightarrow{\mu}_{Y}(y) = \int \overrightarrow{\mu}_{X}(x) \,\delta(y-Ax) \,\mathrm{d}x = \overrightarrow{\mu}_{X}(A^{-1}y) \,.
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "fragment"}
# - For a Gaussian message input message $\overrightarrow{\mu}_{X}(x) = \mathcal{N}(\overrightarrow{m}_{X},\overrightarrow{V}_{X})$, the output message is also Gaussian with
# $$\begin{align*}
# \overrightarrow{m}_{Y} = A\overrightarrow{m}_{X} \,,\,\text{and}\,\,
# \overrightarrow{V}_{Y} = A\overrightarrow{V}_{X}A^T
# \end{align*}$$
# since
# $$\begin{align*}
# \overrightarrow{\mu}_{Y}(y) &= \overrightarrow{\mu}_{X}(A^{-1}y) \\
# &\propto \exp \left( -\frac{1}{2} \left( A^{-1}y - \overrightarrow{m}_{X}\right)^T \overrightarrow{V}_{X}^{-1} \left( A^{-1}y - \overrightarrow{m}_{X}\right)\right) \\
# &= \exp \left( -\frac{1}{2} \left( y - A\overrightarrow{m}_{X}\right)^T A^{-T}\overrightarrow{V}_{X}^{-1} A \left( y - A\overrightarrow{m}_{X}\right)\right) \\
# &\propto \mathcal{N}(A\overrightarrow{m}_{X},A\overrightarrow{V}_{X}A^T) \,.
# \end{align*}$$
# + [markdown] slideshow={"slide_type": "subslide"}
# - <div class="exercise"><b>Excercise</b>: Proof that, for the same factor $\delta(y-Ax)$ and Gaussian messages, the (backward) sum-product message $\overleftarrow{\mu}_{X}$ is given by
# $$\begin{align*}
# \overleftarrow{\xi}_{X} &= A^T\overleftarrow{\xi}_{Y} \\
# \overleftarrow{W}_{X} &= A^T\overleftarrow{W}_{Y}A
# \end{align*}$$
# where $\overleftarrow{\xi}_X \triangleq \overleftarrow{W}_X \overleftarrow{m}_X$ and $\overleftarrow{W}_{X} \triangleq \overleftarrow{V}_{X}^{-1}$ (and similarly for $Y$).</div>
# + [markdown] slideshow={"slide_type": "slide"}
#
# #### CODE EXAMPLE
#
# Let's calculate the Gaussian forward and backward messages for the addition node in ForneyLab. <img src="./figures/ffg-addition-node.png" width="200px">
# + slideshow={"slide_type": "subslide"}
# Forward message towards Z
fg = FactorGraph()
@RV x ~ GaussianMeanVariance(constant(1.0), constant(1.0), id=:x)
@RV y ~ GaussianMeanVariance(constant(2.0), constant(1.0), id=:y)
@RV z = x + y; z.id = :z
eval(Meta.parse(sumProductAlgorithm(z, name="_z_fwd")))
msg_forward_Z = step_z_fwd!(Dict())[:z]
print("Forward message on Z: $(msg_forward_Z)")
# Backward message towards X
fg = FactorGraph()
@RV x = Variable(id=:x)
@RV y ~ GaussianMeanVariance(constant(2.0), constant(1.0), id=:y)
@RV z = x + y
GaussianMeanVariance(z, constant(3.0), constant(1.0), id=:z)
eval(Meta.parse(sumProductAlgorithm(x, name="_x_bwd")))
msg_backward_X = step_x_bwd!(Dict())[:x]
print("Backward message on X: $(msg_backward_X)")
# + [markdown] slideshow={"slide_type": "slide"}
# #### CODE EXAMPLE
#
# In the same way we can also investigate the forward and backward messages for the gain node <img src="./figures/ffg-gain-node.png" width="200px">
# + slideshow={"slide_type": "subslide"}
# Forward message towards Y
fg = FactorGraph()
@RV x ~ GaussianMeanVariance(constant(1.0), constant(1.0), id=:x)
@RV y = constant(4.0) * x; y.id = :y
eval(Meta.parse(sumProductAlgorithm(y, name="_y_fwd")))
msg_forward_Y = step_y_fwd!(Dict())[:y]
print("Forward message on Y: $(msg_forward_Y)")
# + [markdown] slideshow={"slide_type": "fragment"}
#
# ### Example
#
# - Consider a generative model
# $$p(x,y_1,y_2) = p(x)\,p(y_1|x)\,p(y_2|x) .$$
# - This model expresses the assumption that $Y_1$ and $Y_2$ are independent measurements of $X$.
#
# <img src="./figures/ffg-observations.png" width="300px">
# + [markdown] slideshow={"slide_type": "subslide"}
#
# - Assume that we are interested in the posterior for $X$ after observing $Y_1= \hat y_1$ and $Y_2= \hat y_2$. The posterior for $X$ can be inferred by applying the sum-product algorithm to the following graph:
#
# <img src="./figures/ffg-observations-2.png" width="450px">
# + [markdown] slideshow={"slide_type": "fragment"}
# - (Note that) we usually draw terminal nodes for observed variables in the graph by smaller solid-black squares. This is just to help the visualization of the graph, since the computational rules are no different than for other nodes.
# + [markdown] slideshow={"slide_type": "slide"}
# #### CODE EXAMPLE
#
# We'll use ForneyLab, a factor graph toolbox for Julia, to build the above graph, and perform sum-product message passing to infer the posterior $p(x|y_1,y_2)$. We assume $p(y_1|x)$ and $p(y_2|x)$ to be Gaussian likelihoods with known variances:
# $$\begin{align*}
# p(y_1\,|\,x) &= \mathcal{N}(y_1\,|\,x, v_{y1}) \\
# p(y_2\,|\,x) &= \mathcal{N}(y_2\,|\,x, v_{y2})
# \end{align*}$$
# Under this model, the posterior is given by:
# $$\begin{align*}
# p(x\,|\,y_1,y_2) &\propto \overbrace{p(y_1\,|\,x)\,p(y_2\,|\,x)}^{\text{likelihood}}\,\overbrace{p(x)}^{\text{prior}} \\
# &=\mathcal{N}(x\,|\,\hat{y}_1, v_{y1})\, \mathcal{N}(x\,|\,\hat{y}_2, v_{y2}) \, \mathcal{N}(x\,|\,m_x, v_x)
# \end{align*}$$
# so we can validate the answer by solving the Gaussian multiplication manually.
# + slideshow={"slide_type": "subslide"}
using ForneyLab
# Data
y1_hat = 1.0
y2_hat = 2.0
# Construct the factor graph
fg = FactorGraph()
@RV x ~ GaussianMeanVariance(constant(0.0), constant(4.0), id=:x) # Node p(x)
@RV y1 ~ GaussianMeanVariance(x, constant(1.0)) # Node p(y1|x)
@RV y2 ~ GaussianMeanVariance(x, constant(2.0)) # Node p(y2|x)
Clamp(y1, y1_hat) # Terminal (clamp) node for y1
Clamp(y2, y2_hat) # Terminal (clamp) node for y2
# draw(fg) # draw the constructed factor graph
# Perform sum-product message passing
eval(Meta.parse(sumProductAlgorithm(x, name="_algo1"))) # Automatically derives a message passing schedule
x_marginal = step_algo1!(Dict())[:x] # Execute algorithm and collect marginal distribution of x
println("Sum-product message passing result: p(x|y1,y2) = 𝒩($(mean(x_marginal)),$(var(x_marginal)))")
# Calculate mean and variance of p(x|y1,y2) manually by multiplying 3 Gaussians (see lesson 4 for details)
v = 1 / (1/4 + 1/1 + 1/2)
m = v * (0/4 + y1_hat/1.0 + y2_hat/2.0)
println("Manual result: p(x|y1,y2) = 𝒩($(m), $(v))")
# + slideshow={"slide_type": "subslide"}
# Backward message towards X
fg = FactorGraph()
x = Variable(id=:x)
@RV y = constant(4.0) * x
GaussianMeanVariance(y, constant(2.0), constant(1.0))
eval(Meta.parse(sumProductAlgorithm(x, name="_x_fwd2")))
msg_backward_X = step_x_fwd2!(Dict())[:x]
print("Backward message on X: $(msg_backward_X)")
# + [markdown] slideshow={"slide_type": "slide"}
# ### The Local Free Energy in a Factor Graph
#
# - Consider an edge $x_j$ in a Forney-style factor graph for a generative model $p(x) = p(x_1,x_2,\ldots,x_N)$.
#
#
#
# - Assume that the graph structure (factorization) is specified by
# $$
# p(x) = \prod_{a=1}^M p_a(x_a)
# $$
# where $a$ is a set of indices.
# - Also, we assume a mean-field approximation for the posterior:
# $$
# q(x) = \prod_{i=1}^N q_i(x_i)
# $$
# and consequently a corresponding free energy functional
# $$\begin{align*}
# F[q] &= \sum_x q(x) \log \frac{q(x)}{p(x)} \\
# &= \sum_i \sum_{x_i} \left(\prod_{i=1}^N q_i(x_i)\right) \log \frac{\prod_{i=1}^N q_i(x_i)}{\prod_{a=1}^M p_a(x_a)}
# \end{align*}$$
#
# - With these assumptions, it can be shown that the FE evaluates to (exercise)
# $$
# F[q] = \sum_{a=1}^M \underbrace{\sum_{x_a} \left( \prod_{j\in N(a)} q_j(x_j)\cdot \left(-\log p_a(x_a)\right) \right) }_{\text{node energy }U[p_a]} - \sum_{i=1}^N \underbrace{\sum_{x_i} q_i(x_i) \log \frac{1}{q_i(x_i)}}_{\text{edge entropy }H[q_i]}
# $$
#
# - In words, the FE decomposes into a sum of (expected) energies for the nodes minus the entropies on the edges.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Variational Message Passing
#
# - Let us now consider the local free energy that is associated with edge corresponding to $x_j$.
#
# <img src="./figures/VMP-two-nodes.png" width="600">
#
#
# - Apparently (see previous slide), there are three contributions to the free energy for $x_j$:
# - one entropy term for the edge $x_j$
# - two energy terms: one for each node that attaches to $x_j$ (in the figure: nodes $p_a$ and $p_b$)
#
# - The local free energy for $x_j$ can be written as (exercise)
# $$
# F[q_j] \propto \sum_{x_j} q(x_j) \log \frac{q_j(x_j)}{\nu_a(x_j)\cdot \nu_b(x_j)}
# $$
# where
# $$\begin{align*}
# \nu_a(x_j) &\propto \exp\left( \mathbb{E}_{q_{k}}\left[ \log p_a(x_a)\right]\right) \\
# \nu_b(x_j) &\propto \exp\left( \mathbb{E}_{q_{l}}\left[ \log p_b(x_b)\right]\right)
# \end{align*}$$
# and $\mathbb{E}_{q_{k}}\left[\cdot\right]$ is an expectation w.r.t. all $q(x_k)$ with $k \in N(a)\setminus {j}$.
#
# - $\nu_a(x_j)$ and $\nu_b(x_j)$ can be locally computed in nodes $a$ and $b$ respectively and can be interpreted as colliding messages over edge $x_j$.
#
# - Local free energy minimization is achieved by setting
# $$
# q_j(x_j) \propto \nu_a(x_j) \cdot \nu_b(x_j)
# $$
#
# - Note that message $\nu_a(x_j)$ depends on posterior beliefs over incoming edges ($k$) for node $a$, and in turn, the message from node $a$ towards edge $x_k$ depends on the belief $q_j(x_j)$. I.o.w., direct mutual dependencies exist between posterior beliefs over edges that attach to the same node.
#
# - These considerations lead to the [Variational Message Passing](https://en.wikipedia.org/wiki/Variational_message_passing) procedure, which is an iterative free energy minimization procedure that can be executed completely through locally computable messages.
#
# - Procedure VMP, see [Dauwels (2007), section 3](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Dauwels-2007-on-variational-message-passing-on-factor-graphs)
# > 1. Initialize all messages $q$ and $ν$, e.g., $q(\cdot) \propto 1$ and $\nu(\cdot) \propto 1$. <br/>
# > 2. Select an edge $z_k$ in the factor graph of $f(z_1,\ldots,z_m)$.<br/>
# > 3. Compute the two messages $\overrightarrow{\nu}(z_k)$ and $\overleftarrow{\nu}(z_k)$ by applying the following generic rule:
# $$
# \overrightarrow{\nu}(y) \propto \exp\left( \mathbb{E}_{q}\left[ \log g(x_1,\dots,x_n,y)\right] \right)
# $$
# > 4. Compute the marginal $q(z_k)$
# $$
# q(z_k) \propto \overrightarrow{\nu}(z_k) \overleftarrow{\nu}(z_k)
# $$
# and send it to the two nodes connected to the edge $x_k$.<br/>
# > 5. Iterate 2–4 until convergence.
# + [markdown] slideshow={"slide_type": "slide"}
# ### The Bethe Free Energy and Belief Propagation
#
# - We showed that, under mean field assumptions, the FE can be decomposed into a sum of local FE contributions for the nodes ($a$) and edges ($i$):
# $$
# F[q] = \sum_{a=1}^M \underbrace{\sum_{x_a} \left( \prod_{j\in N(a)} q_j(x_j)\cdot \left(-\log p_a(x_a)\right) \right) }_{\text{node energy }U[p_a]} - \sum_{i=1}^N \underbrace{\sum_{x_i} q_i(x_i) \log \frac{1}{q_i(x_i)}}_{\text{edge entropy }H[q_i]}
# $$
#
# - The mean field assumption is very strong and may lead to large inference costs ($\mathrm{KL}(q(x),p(x|\text{data}))$). A more relaxed assumption is to allow joint posterior beliefs over the variables that attach to a node. This idea is expressed by the _Bethe_ Free Energy:
# $$
# F_B[q] = \sum_{a=1}^M \left( \sum_{x_a} q_a(x_a) \log \frac{q_a(x_a)}{p_a(x_a)} \right) - \sum_{i=1}^N (d_i - 1) \sum_{x_i} q_i(x_i) \log {q_i(x_i)}
# $$
# where $q_a(x_a)$ is the posterior *joint* belief over the variables $x_a$ (i.e., the set of variables that attach to node $a$), $q_i(x_i)$ is the posterior marginal belief over the variable $x_i$ and $d_i$ is the number of factor nodes that link to edge $i$. Moreover, $q_a(x_a)$ and $q_i(x_i)$ are constrained to obey the following equalities:
# $$
# \sum_{x_a \backslash x_i} q_a(x_a) = q_i(x_i), ~~~ \forall i, \forall a \\
# \sum_{x_i} q_i(x_i) = 1, ~~~ \forall i
# $$
#
# - Given these constraints, we form the Lagrangian by augmenting the _Bethe_ Free Energy functional with the constraints
# $$
# L[q] = F_B[q] + \sum_{a,i} \lambda_{ai}(x_i) \left(q_i(x_i) - \sum_{x_a\backslash x_i} q(x_a) \right) + \sum_{i} \lambda_i \left( 1 - \sum_{x_i}q_i(x_i) \right)
# $$
#
# - The stationary solutions for this Lagrangian are given by
# $$
# q_a(x_a) \propto f_a(x_a) \exp\left(\sum_{i \in N(a)} \lambda_{ai}(x_i)\right) \\
# q_i(x_i) \propto \exp\left(\sum_{a \in N(i)} \lambda_{ai}(x_i)\right) ^{\frac{1}{d_i - 1}}
# $$
# where $N(i)$ denotes the factor nodes that have $x_i$ in their arguments and $N(a)$ denotes the set of variables in the argument of $f_a$.
#
# - <font color=red> TO BE FINISHED ...</font>
#
# - For a more complete overview of message passing as Bethe Free Energy minimization, see [Zhang (2017)](https://github.com/bertdv/BMLIP/blob/master/lessons/notebooks/files/Zhang-2017-Unifying-Message-Passing-Algorithms.pdf).
# + slideshow={"slide_type": "skip"}
open("../../styles/aipstyle.html") do f display("text/html", read(f, String)) end
# -
| lessons/notebooks/10-Factor-Graphs.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.2
# language: julia
# name: julia-1.7
# ---
# # Why Julia?
# Julia is an is a general-purpose, open-source, dynamic, and high-performance language. By leveraging a clever design around a just in time <b>(JIT) compiler</b>, Julia manages to <span style="color:red">combine the speed of languages like C or Fortran</span>, with the <span style="color:red"> ease of use of Matlab or Python</span>
#
# ## Some Features of Julia
#
# Julia has many interesting features, but here is a small sample of what I think is most basic characteristics that make Julia attractive for Science and Engineering.
# <ul>
# <li>JIT Compiled. Write code that looks interpreted, and yet it gets to run as just as fast as compiled code. No need to vectorize code for performance, devectorized code is fast</li>
# <li>Optionally Typed. Do some rapid prototyping with maximum flexibility, and then optimize for performance.</li>
# <li> Nice Mathematical Syntax. Builds upon and goes much further than classical mathematical languages like Fortran, Matlab, and Mathematica</li>
# <li>General purpose. Get code from the package manager to perform all sort of tasks, from reading multiple types of databases, to data visualization, or running an HTTP server </li>
# </ul>
# ### Alright..... Let's say Hello!
println("Hello World!")
# ## Installing packages
# One of the salient characteristics of Julia is its excellent package distribution system, and the open-source community built around it.
#
# ### Packages can be easily installed from the <span style="color:blue"> REPL</span>.
#
# Let’s install, for example, the Linear Algebra package. Simply do:
#
#
import Pkg; Pkg.add("LinearAlgebra")
# ### For using the packages in our projects (analogous to importing in python), we need to include them with a using statement:
using LinearAlgebra
| Notebooks/Julia Time 1.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Objective: Retrieving a simulated dataset
# - scPOST is a simulation framework that estimates a study design's power to detect differentially abundant cell states (e.g. an expansion of a cell state in disease samples compared to healthy).
# - scPOST allows users to control the simulated data's characteristics, including: effect size, the number of samples, the number of cells per sample, the batch multiplexing structure, and the magnitude of simulated noise.
# - Users may retrieve simulated datasets (without performing association testing) to be used in multiple analyses
#
# Here, we'll simulate a realistic dataset by using a rheumatoid arthritis (RA) dataset described in <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, *et al.*, *Nature Immunol* (2020). The metadata and PC embeddings for this batch-corrected dataset is provided in the pre-loaded data: ra_HarmObj.
#
# In this tutorial, we'll:
# - Retrieve the simulated dataset and visuallize how it compares to the original input data
# - See how cf_scale affects the observed fold-change in a cell state between cases/ctrls
# ## Load packages
# +
suppressPackageStartupMessages({
# imports for analyses
library(scpost)
library(dplyr)
# imports for figures
library(ggplot2)
library(RColorBrewer)
library(patchwork)
})
fig.size <- function (height, width) {
options(repr.plot.height = height, repr.plot.width = width)
}
# -
# ## Minimum input
# To simulate data, scPOST uses an input prototype dataset (such as public or pilot data). scPOST estimates two types of variation often found in multi-sample single-cell data:
#
# 1. Variation in a cell state's frequency across samples (Cell state frequency variation)
# 1. Variation in a cell state's gene expression. We estimate and simulate gene expression with principal components (PCs), because PCs are a summary of gene expression that also takes into account gene covariation. This also reduces computational burden.
#
# scPOST requires the following inputs for each cell:
#
# 1. Cell state annotations (in single-cell these are often obtained from clustering algorithms, such as the Louvain method)
# 1. Sample annotations (the sample each cell comes from)
# 1. Batch annotations (if no batch information for the data is available, it is sufficient to treat every sample as it's own batch)
# 1. Principal component values (these are obtained from PCA)
#
# Let's take a look at a prototype dataset that we will apply scPOST to.
ra_HarmObj$meta %>% str
ra_HarmObj$embeddings %>% head(2)
# In the metadata, we have:
#
# - Cell state annotations (harmClus)
# - Sample annotations (sample)
# - Batch annotations (batch)
#
# In the embeddings, we have:
#
# - Principal component values for 20 PCs
# # Step 1: Parameter estimation
system.time({
ra_freqEstimates <- estimateFreqVar(meta = ra_HarmObj$meta, clusCol = 'harmClus', sampleCol = 'sample', logCov = TRUE)
})
system.time({
ra_pcEstimates <- estimatePCVar(pca = ra_HarmObj$embeddings, npcs = 20, meta = ra_HarmObj$meta, clusCol = 'harmClus',
sampleCol = 'sample', batchCol = 'batch')
})
# # Step 2: Simulate dataset
# Here, we'll simulate a realistic dataset like we did in the "Getting Started" tutorial. However, we now use the simDataset.base function, which simulates a dataset based on the estimated parameters; it does not perform association testing like the simDataset.withMASC function.
# +
set.seed(23)
# Set the number of samples, number of cells per sample, and create batch structure
ncases <- 17
nctrls <- 4
nbatches <- 4
batchStructure <- distribSamples(ncases = ncases, nctrls = nctrls, nbatches = nbatches)
ncells <- rep(250, times = ncases + nctrls)
names(ncells) <- batchStructure$sample_names
# -
# Next, we'll set up a parameter table with the "createParamTable" function that we'll use to run multiple simulations:
# - We'll simulate realistic levels of variation by setting "b_scale", "s_scale", and "cf_scale" equal to 1.
# - We'll won't induce a fold-change, so we'll set fc = 1, and just choose a random cluster to to induce the fold-change into
# - We'll set up a folder where we will save our results
# +
params <- createParamTable(
nreps = 1,
clus = "clus0",
fc = 1,
ncases = ncases,
nctrls = nctrls,
nbatches = nbatches,
b_scale = 1,
s_scale = 1,
cf_scale = 1,
res_use = 0.6,
cond_induce = "cases",
save_path = file.path(getwd(), "scpostSims/retrievingSimulations")
)
params %>% head(2)
# -
# Here, we want to return the dataset PC embeddings, so we set the "returnPCs" argument to TRUE. We do not need to re-cluster the simulated data, so we set the "clusterData" argument to FALSE.
suppressWarnings({
lapply(seq(nrow(params)), function(x){
simDataset.base(
save_path = params[x, 'save_path'],
rep = params[x, 'rep'],
seed = params[x, 'seed'],
ncases = params[x, 'ncases'],
nctrls = params[x, 'nctrls'],
nbatches = params[x, 'nbatches'],
batchStructure = batchStructure,
ncells = ncells,
centroids = ra_pcEstimates$centroids,
pc_cov_list = ra_pcEstimates$pc_cov_list,
batch_vars = ra_pcEstimates$batch_vars,
b_scale = params[x, 'b_scale'],
sample_vars = ra_pcEstimates$sample_vars,
s_scale = params[x, 's_scale'],
cfcov = ra_freqEstimates$cfcov,
cf_scale = params[x, 'cf_scale'],
meanFreqs = ra_freqEstimates$meanFreq,
clus = params[x, 'clus'],
fc = params[x, 'fc'],
cond_induce = params[x, 'cond_induce'],
res_use = params[x, 'res_use'],
mc.cores = 1,
clusterData = FALSE,
returnPCs = TRUE
)
})
})
# # Retrieve simulated dataset
dir <- file.path(getwd(), "scpostSims/retrievingSimulations/")
sim_filenames <- list.files(path = dir,
full.names = T) %>% basename
sim_data <- lapply(sim_filenames, function(x){
readRDS(file.path(dir, x))
})
sim_data[[1]] %>% str
# From the saved data, we see that we simulated 5,252 cells. Let's visualize how the PC embeddings of our simulated dataset compare to the original data
#
# Now let's combine our simulated data with the real data
# +
ra_pcs <- cbind.data.frame(ra_HarmObj$meta, ra_HarmObj$embeddings) %>% dplyr::select(c(harmClus, sample, batch,
paste0("PC", 1:20)))
sim_pcs <- cbind.data.frame(sim_data[[1]]$meta, sim_data[[1]]$new_pcs) %>% dplyr::select(-condition)
colnames(ra_pcs) <- c("cellstate", "sample", "batch", paste0("PC", 1:20))
colnames(sim_pcs) <- c("cellstate", "sample", "batch", paste0("PC", 1:20))
comb_pcs <- rbind.data.frame(ra_pcs, sim_pcs)
comb_pcs$dataset <- c(rep("Real", nrow(ra_pcs)), rep("Sim", nrow(sim_pcs)))
comb_pcs %>% head(2)
# -
# ## Visualize the real input RA dataset with the simulated dataset
system.time({
umap_comb <- uwot::umap(comb_pcs %>% dplyr::select(paste0("PC", 1:20)))
colnames(umap_comb) <- paste0("UMAP", 1:2)
plot_comb <- cbind.data.frame(umap_comb, comb_pcs)
})
# +
# create color palette
plotPal <- colorRampPalette(brewer.pal(9, 'Set1'))
fig.size(6,16)
plot_comb %>% sample_frac %>% ggplot(aes(x = UMAP1, y = UMAP2, col = cellstate)) +
geom_point(size = 0.6) +
theme_classic() +
labs(title = 'Real and simulated RA datasets in UMAP space', col = 'Cell state') +
scale_color_manual(values = plotPal(plot_comb$cellstate %>% unique %>% length)) +
guides(col = guide_legend(override.aes = list(stroke = 1, alpha = 1, shape = 19, size = 4))) +
facet_wrap(~dataset)
# -
# It looks like the simulated cells were placed into a similar PC space when compared to the real data. A notable difference between the simulated dataset and the input real data is the number of each state. This is because the simulated dataset also generates cell state frequency distributions for each simulated sample, which will be different from the real data. For example, our simulated dataset generated more cells as cell state 2, but fewer cells as cell state 3.
plot_comb %>% filter(dataset == 'Real') %>% pull(cellstate) %>% table
plot_comb %>% filter(dataset == 'Sim') %>% pull(cellstate) %>% table
# # Retrieving the observed fold-change in a simulated dataset
# scPOST simulates cell state frequency distributions for each simulated sample. Because our model includes variance in these distributions, the actual observed fold-change may be slightly different from the fold-change we wanted to induce. This is how cell state frequency variation contributes to a decrease in power; if the variance is high, random sampling can mask the true fold-change by resulting in a smaller observed fold-change.
#
# Let's simulate a few datasets with no cell state frequency variation, and then a few with realistic levels of cell state frequency variation. Then, we can check what the observed fold-change is. We do this by setting "cf_scale" to 0. A realistic level of variation (as estimated from the real data) would be "cf_scale" set to 1.
# +
set.seed(23)
# Set the number of samples, number of cells per sample, and create batch structure
ncases <- 10
nctrls <- 10
nbatches <- 4
batchStructure <- distribSamples(ncases = ncases, nctrls = nctrls, nbatches = nbatches)
ncells <- rep(250, times = ncases + nctrls)
names(ncells) <- batchStructure$sample_names
# -
# Next, we'll set up a parameter table with the "createParamTable" function that we'll use to run multiple simulations:
# - We'll simulate realistic levels of variation by setting "b_scale", "s_scale". We'll simulate either 0 or realistic levels of cell state frequency variation by setting and "cf_scale" equal to 0 or 1 respectively.
# - We'll induce a fold-change of 2 in "clus0"
# - We'll set up a folder where we will save our results
#
# With the following parameter table, we'll run:
#
# - 5 simulations with cf_scale = 0
# - 5 simulations with cf_scale = 1
# +
params <- createParamTable(
nreps = 5,
clus = "clus0",
fc = 2,
ncases = ncases,
nctrls = nctrls,
nbatches = nbatches,
b_scale = 1,
s_scale = 1,
cf_scale = c(0,1),
res_use = 0.6,
cond_induce = "cases",
save_path = file.path(getwd(), "scpostSims/retrievingCFscale")
)
params %>% dim
params %>% head(2)
# -
# Here, we want to return the dataset PC embeddings, so we set the "returnPCs" argument to TRUE. We do not need to re-cluster the simulated data, so we set the "clusterData" argument to FALSE.
suppressWarnings({
lapply(seq(nrow(params)), function(x){
simDataset.base(
save_path = params[x, 'save_path'],
rep = params[x, 'rep'],
seed = params[x, 'seed'],
ncases = params[x, 'ncases'],
nctrls = params[x, 'nctrls'],
nbatches = params[x, 'nbatches'],
batchStructure = batchStructure,
ncells = ncells,
centroids = ra_pcEstimates$centroids,
pc_cov_list = ra_pcEstimates$pc_cov_list,
batch_vars = ra_pcEstimates$batch_vars,
b_scale = params[x, 'b_scale'],
sample_vars = ra_pcEstimates$sample_vars,
s_scale = params[x, 's_scale'],
cfcov = ra_freqEstimates$cfcov,
cf_scale = params[x, 'cf_scale'],
meanFreqs = ra_freqEstimates$meanFreq,
clus = params[x, 'clus'],
fc = params[x, 'fc'],
cond_induce = params[x, 'cond_induce'],
res_use = params[x, 'res_use'],
mc.cores = 1,
clusterData = FALSE,
returnPCs = TRUE
)
})
})
# ## Retrieve simulated datasets
dir <- file.path(getwd(), "scpostSims/retrievingCFscale/")
sim_filenames <- list.files(path = dir,
full.names = T,) %>% basename
sim_data <- lapply(sim_filenames, function(x){
readRDS(file.path(dir, x))[["meta"]]
})
# Now let's see what our observed fold-changes are. We induced a fold-change of 2 in cell state 0. When cf_scale = 0 (zero cell state frequency variation), we should see an exact fold-change of 2 in cell state 0, while we'll see some variance when cf_scale = 1
# +
getFC <- function(meta_data, cluster){
tbl <- meta_data %>% subset(cellstate == paste0(cluster))
tbl %<>% group_by(condition) %>% summarize(freq = dplyr::n(), .groups = 'drop')
fc <- tbl %>% summarize(obs_fc = freq[condition == 'case'] / freq[condition == 'ctrl'], .groups = 'drop') %>% dplyr::pull(obs_fc)
return(fc)
}
obs_fcs <- data.frame(obs_fc = sapply(sim_data, function(x){
getFC(x, cluster = 0)
}))
obs_fcs$cf_scale <- c(rep(0, 5), rep(1, 5))
mean_fcs <- obs_fcs %>% group_by(cf_scale) %>% summarise(mean_obs = mean(obs_fc), sd = sd(obs_fc), .groups = 'drop') %>% data.frame
mean_fcs$cf_scale <- factor(mean_fcs$cf_scale, levels = c(0,1))
fig.size(6,8)
mean_fcs %>% ggplot(aes(x = cf_scale, y = mean_obs)) +
geom_bar(stat = 'identity', fill = 'gray') +
geom_errorbar(aes(ymin = mean_obs - sd, ymax = mean_obs + sd), width = 0.2) +
theme_classic() +
labs(x = 'cf_scale', y = 'Observed fold-change')
# -
# As expected, when cf_scale = 0, there is no variance. However, when we have a realistic level of cell state frequency variation, we do see some variance. If variance is higher, it could result in decreased power, because the observed fold-change might be lower than the true fold-change. Over more replicates, our average observed fold-changes would begin to converge to 2.
# # Session information
sessionInfo()
| vignettes/Visualizing_SimulatedData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to Form a Good Cointegrating (and Mean-Reverting) Pair of Stocks
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import coint
from statsmodels.api import OLS
from scipy.stats import pearsonr
df1=pd.read_excel('KO.xls')
df2=pd.read_excel('PEP.xls')
df=pd.merge(df1, df2, on='Date', suffixes=('_KO', '_PEP'))
df.set_index('Date', inplace=True)
df.sort_index(inplace=True)
# ## Run cointegration (Engle-Granger) test
coint_t, pvalue, crit_value=coint(df['Adj Close_KO'], df['Adj Close_PEP'])
(coint_t, pvalue, crit_value) # abs(t-stat) < critical value at 90%. pvalue says probability of null hypothesis (of no cointegration) is 73%
# ## Determine hedge ratio
model=OLS(df['Adj Close_KO'], df['Adj Close_PEP'])
results=model.fit()
hedgeRatio=results.params
hedgeRatio
# ## spread = KO - hedgeRatio*PEP
spread=df['Adj Close_KO']-hedgeRatio[0]*df['Adj Close_PEP']
plt.plot(spread) # Figure 7.2
# ## Correlation test
dailyret=df.loc[:, ('Adj Close_KO', 'Adj Close_PEP')].pct_change()
dailyret.corr()
dailyret_clean=dailyret.dropna()
pearsonr(dailyret_clean.iloc[:,0], dailyret_clean.iloc[:,1]) # first output is correlation coefficient, second output is pvalue.
| book1/example7_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spectral Opponency
#
# This notebook plots the distribution of spectrally opponent, non-opponent and unresponsive cells
# in different layers of our model as a function of bottleneck size. It corresponds to Figure 1 in the paper.
#
# **Note**: The easiest way to use this is as a colab notebook, which allows you to dive in with no setup.
#
# ## Load Dependencies - Colab Only
from os.path import exists
if not exists('opponency.zip'):
# !wget -O opponency.zip https://github.com/ecs-vlc/opponency/archive/master.zip
# !unzip -qq opponency.zip
# !mv opponency-master/* ./
# !rm -r opponency-master
# ## Generate Plots
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.font_manager
rc('font',**{'family':'serif','serif':['Computer Modern Roman'],'size':13})
rc('text', usetex=True)
import pandas as pd
import numpy as np
from statistics import load
def plot(ax, frame, cell_type, d_vvs, legend=False):
frame = frame[frame['d_vvs'] == d_vvs]
opps = load.spectral(frame, cell_type.lower())
retina2 = opps[opps['layer'] == 'retina_relu2']
ventral0 = opps[opps['layer'] == 'ventral_relu0']
ventral1 = opps[opps['layer'] == 'ventral_relu1']
ax.plot(retina2['n_bn'], retina2['mean_rel_amount'], label='Retina 2', linestyle=':')
ax.fill_between(
retina2['n_bn'],
retina2['mean_rel_amount'] + retina2['std_rel_amount'],
retina2['mean_rel_amount'] - retina2['std_rel_amount'],
alpha=0.1
)
ax.plot(ventral0['n_bn'], ventral0['mean_rel_amount'], label='Ventral 1', linestyle='--')
ax.fill_between(
ventral0['n_bn'],
ventral0['mean_rel_amount'] + ventral0['std_rel_amount'],
ventral0['mean_rel_amount'] - ventral0['std_rel_amount'],
alpha=0.1
)
ax.plot(ventral1['n_bn'], ventral1['mean_rel_amount'], label='Ventral 2', linestyle='-.')
ax.fill_between(
ventral1['n_bn'],
ventral1['mean_rel_amount'] + ventral1['std_rel_amount'],
ventral1['mean_rel_amount'] - ventral1['std_rel_amount'],
alpha=0.1
)
if legend:
ax.legend(frameon=False)
if d_vvs == 0:
ax.set_title(cell_type, pad=25)
if cell_type == 'Spectrally Opponent':
ax.set_ylabel(f'Depth {d_vvs}', labelpad=25, fontsize='large')
ax.set_xlim(1, 32)
ax.set_ylim(0, 1)
plt.draw()
labels = ax.get_yticklabels()
if len(labels) > 0:
labels[-1] = ""
ax.set_yticklabels(labels)
# +
frame = pd.read_pickle('statistics/devalois.pd')
cell_types = ['Spectrally Opponent', 'Spectrally Non-opponent', 'Spectrally Unresponsive']
fig, axs = plt.subplots(5, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})
fig.set_size_inches(9, 12.5)
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.xlabel('Bottleneck Size')
plt.ylabel('Percentage')
for d_vvs in range(5):
for c, cell_type in enumerate(cell_types):
plot(axs[d_vvs, c], frame, cell_type, d_vvs, d_vvs == 0 and cell_type == 'Spectrally Unresponsive')
plt.savefig('figures/spectral_opponency.pdf', bbox_inches='tight')
# -
| spectral_opponency.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial for flexx.ui - creating widgets and user interfaces
from flexx import app, ui, react
app.init_notebook()
# ## Displaying widgets
# Widgets can be inserted into the notebook by making them a cell output:
b = ui.Button(text='foo')
b
# Widgets have many input signals to modify their appearance and behavior:
b.text('Push me!')
# ## Layout
# Layout is done using special layout widgets, and written in a structured way using the `with` statement:
with ui.HBox() as hbox:
slider = ui.Slider(flex=0)
label = ui.Label(flex=1, text='xx')
hbox
# ## React
# Defining reactions is easy:
@react.connect('slider.value')
def show_slider_value(v):
label.text(str(v))
# ## Compound widgets
# The above quickly gets messy. You can better create new widgets by wrapping together other widgets:
# +
class MyWidget(ui.Widget):
def init(self):
with ui.HBox():
self._slider = ui.Slider(flex=0)
self._label = ui.Label(flex=1, text='xx')
@react.connect('_slider.value')
def show_slider_value(self, v):
self._label.text(str(v))
w = MyWidget()
w
# -
# ## Reactions in JS
# If you want reactions to be handled in JS, use a `JS` nested class:
# +
class MyWidget2(ui.Widget):
def init(self):
with ui.HBox():
self.slider = ui.Slider(flex=0)
self.label = ui.Label(flex=1, text='xx')
class JS:
@react.connect('slider.value')
def show_slider_value(self, v):
self.label.text(str(v))
w = MyWidget2()
w
| examples/notebooks/flexx_tutorial_ui.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp preprocessing
# -
# # pre_processing
#
#
# > processing
#hide
from nbdev.showdoc import *
# 
#export
import pydicom,kornia,skimage
from fastai.vision.all import *
from fastai.medical.imaging import *
from torchvision.utils import save_image
# ### Mask & Save
# > Note: View Mask & Save tutorial on Kaggle (click on Kaggle icon)
#
# [](https://www.kaggle.com/avirdee/mask-and-save-tutorial/)
#export
def mask_and_save_path(file: (L), source=None, show=False, window=dicom_windows.lungs, sigma:float=0.1,\
thresh:float=0.9, save=False, save_path=None):
"Helper to create masks based on dicom window with the option to save the updated image from path"
image_list = []
for i in file:
##This line will have to be changed depending on what platform is being used
str_file = str(i); file_name = str_file.split('.')[0].split('\\')[-1] #windows
#str_file = str(i); file_name = str_file.split('/')[-1].split('.')[0] #kaggle
dcm = dcmread(i)
wind = dcm.windowed(*window)
mask = dcm.mask_from_blur(window, sigma=sigma, thresh=thresh, remove_max=False)
bbs = mask2bbox(mask)
lo,hi = bbs
imh = wind[lo[0]:hi[0],lo[1]:hi[1]]
if save is not False:
if not os.path.exists(save_path):
os.makedirs(save_path)
save_image(imh, f'{save_path}/{file_name}.png')
else:
pass
image_list.append(imh)
if show is not False:
show_images(image_list[:10], nrows=1)
else:
pass
show_doc(mask_and_save_path)
#export
def mask_and_save_df(file: (pd.DataFrame), source=None, show=False, window=dicom_windows.lungs, sigma:float=0.1,\
thresh:float=0.9, save=False, save_path=None):
"Helper to create masks based on dicom window with the option to save the updated image from a dataframe"
image_list = []
for i in file.index:
file_path = f"{source}/{file.iloc[i]['PatientID']}/{file.iloc[i]['InstanceNumber']}.dcm"
file_name = file.iloc[i]['InstanceNumber']
dcm = dcmread(file_path)
wind = dcm.windowed(*window)
mask = dcm.mask_from_blur(window, sigma=sigma, thresh=thresh, remove_max=False)
bbs = mask2bbox(mask)
lo,hi = bbs
imh = wind[lo[0]:hi[0],lo[1]:hi[1]]
if save is not False:
if not os.path.exists(save_path):
os.makedirs(save_path)
save_image(imh, f'{save_path}/{file_name}.png')
else:
pass
image_list.append(imh)
if show is not False:
show_images(image_list[:10], nrows=1)
else:
pass
show_doc(mask_and_save_df)
# `fastai` has a handy method `from.dicoms` that can access dicom metadata and display this in a dataframe.
m_items = get_dicom_files('D:/Datasets/osic-pulmonary-fibrosis-progression/train/ID00007637202177411956430')
source = 'D:/Datasets/osic-pulmonary-fibrosis-progression/train/'
dicom_dataframe = pd.DataFrame.from_dicoms(m_items)
dicom_dataframe[:2]
# To see how `mask_and_save` works here are 10 original images (we can use `get_dicom_image` to view the images). To save the images in `png` format change save to `True` and set a `save_path`
# Setting the `sigma` value to 0.1 reduces the image area only to the areas that are important
# ### Dicom metadata dict
# Updated `from_dicoms` method to `from_dicoms2` that allows you to choose the window setting
#export
@patch
def updated_dict(self:DcmDataset, windows=[dicom_windows.lungs]):
pxdata = (0x7fe0,0x0010)
vals = [self[o] for o in self.keys() if o != pxdata]
its = [(v.keyword, v.value) for v in vals]
res = dict(its)
res['fname'] = self.filename
stats = 'min', 'max', 'mean', 'std'
pxs = self.pixel_array
for f in stats: res['img_'+f] = getattr(pxs, f)()
res['img_pct_window'] = self.pct_in_window(*windows)
res['file_path'] = f'{self.PatientID}/{self.InstanceNumber}.dcm'
return res
#export
def _dcm2dict2(fn, windows, **kwargs): return fn.dcmread().updated_dict(windows, **kwargs)
#export
@delegates(parallel)
def _from_dicoms2(cls, fns, n_workers=0, **kwargs):
return pd.DataFrame(parallel(_dcm2dict2, fns, n_workers=n_workers, **kwargs))
pd.DataFrame.from_dicoms2 = classmethod(_from_dicoms2)
# `from_dicoms2` allows you to set the dicom window, for example in this case the `mediastinum` window is used
dicom_dataframe = pd.DataFrame.from_dicoms2(m_items, windows=dicom_windows.mediastinum)
dicom_dataframe[:2]
# In this case the `lungs` window was used
dicom_dataframe = pd.DataFrame.from_dicoms2(m_items, windows=dicom_windows.lungs)
dicom_dataframe[:2]
# ## Move Files
# > Note: View dicom dataframe and `move_files` tutorial on Kaggle (click on Kaggle icon)
#
# [](https://www.kaggle.com/avirdee/dicom-dataframe-tutorial/)
# > Note: Take note of this.
#export
def move_files(df, source, save_path):
"helper to move files"
for i in df.index:
#patient ID
patid = str(df.PatientID[i])
window = str(df.img_pct_window[i])
#fname
filename = str(df.fname[i]).split('/')[-1]
img = filename.split('.')[0]
print(f'ID: {patid} window: {window} instance: {img}')
folder_path = save_path + patid
if not os.path.exists(folder_path):
os.makedirs(folder_path)
img_file = Path(f'{source}/train/{patid}/{img}.dcm')
shutil.copy(img_file, folder_path, follow_symlinks=True)
show_doc(move_files)
# ### dicom convert 3channel
#export
def dicom_convert_3channel(fn:(Path,str), save_dir:(str), win1=dicom_windows.lungs, \
win2=dicom_windows.liver, win3=dicom_windows.brain):
"Split a dicom image into 3 windows with one window per channel and saved as jpg"
data = dcmread(fn)
file_name = str(fn); name = file_name.split('\\')[-1].split('.')[0]
chan_one = np.expand_dims(data.windowed(*win1), axis=2)
chan_two = np.expand_dims(data.windowed(*win2), axis=2)
chan_three = np.expand_dims(data.windowed(*(win3)), axis=2)
image = np.concatenate([chan_one, chan_two, chan_three], axis=2)
ten_image = TensorImage(image).permute(2,0,1)
save_image(ten_image, f'{save_dir}/{name}.jpg')
show_doc(dicom_convert_3channel)
# To see how `dicom_convert_3channel works`, specify a save directory and choose a test file
save_dir = 'D:/Datasets/osic-pulmonary-fibrosis-progression/test3c/'
test1 = m_items[12]
test1
# Choose 3 windows, one for each channel, in this case `lungs`, `mediastinum` and `pe`
dicom_convert_3channel(test1, save_dir, win1=dicom_windows.lungs, win2=dicom_windows.mediastinum, win3=dicom_windows.pe)
# Load the saved image which saves with the same name as the input image name
saved_image = PILImage.create('D:/Datasets/osic-pulmonary-fibrosis-progression/test3c/20.jpg')
saved_ten = TensorImage(saved_image)
saved_ten.shape
# ### Dicom Splitter
# > Note: View dicom splitter tutorial on kaggle (click Kaggle icon)
#
# [](https://www.kaggle.com/avirdee/dicom-splitter-tutorial/)
#export
def dicomsplit(valid_pct=0.2, seed=None, **kwargs):
"Splits `items` between train/val with `valid_pct`"
"and checks if identical patient IDs exist in both the train and valid sets"
def _inner(o, **kwargs):
train_list = []; valid_list = []
if seed is not None: torch.manual_seed(seed)
rand_idx = L(int(i) for i in torch.randperm(len(o)))
cut = int(valid_pct * len(o))
trn = rand_idx[cut:]; trn_p = o[rand_idx[cut:]]
val = rand_idx[:cut]; val_p = o[rand_idx[:cut]]
train_patient = []; train_images = []
for i, tfile in enumerate(trn_p):
file = dcmread(tfile)
tpat = file.PatientID
train_patient.append(tpat)
file_array = dcmread(tfile).pixel_array
train_images.append(file_array)
val_patient = []; val_images = []
for i, vfile in enumerate(val_p):
file2 = dcmread(vfile)
vpat = file2.PatientID
val_patient.append(vpat)
val_array = dcmread(vfile).pixel_array
val_images.append(val_array)
print(rand_idx)
print(f'Train: {trn}, {train_patient}')
show_images(train_images[:20])
print(f'Val: {val}, {val_patient}')
show_images(val_images[:20])
is_duplicate = set(train_patient) & set(val_patient)
print(f'Duplicate: {set(train_patient) & set(val_patient)}')
new_list = []
if bool(is_duplicate) is not False:
print('duplicate exists')
new_list = [elem for elem in train_patient if elem not in val_patient ]
print(f'New List: {new_list}')
else:
print('duplicate does NOT exist')
new_list = trn
return new_list, val
return _inner
show_doc(dicomsplit)
#export
def check_duplicate(items, seed=5):
trn, val = dicomsplit(valid_pct=0.2, seed=seed)(items)
return trn, val
#export
def dicom_splitter(items, valid_pct=0.2, seed=77):
trn, val = dicomsplit(valid_pct=valid_pct)(items)
valid_idx = val
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
print(f'train:{train_idx} val:{valid_idx}')
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
show_doc(dicom_splitter)
# Check to see how `dicom_splittter` works. First create a random generating function that will choose 10 random numbers between a range of 0 and the length of the number of items
def random_(items, value=10):
randomList = []
for i in range(0,value):
randomList.append(random.randint(0,len(items)))
return items[randomList]
items = get_dicom_files(source)
rand_items = random_(items)
rand_items
# `check_duplicate` shows the indices of the random 10 images chosen above. By default the train/valid split is 80/20 so the train set has 8 images and the valid set has 2 images. We can view the the 8 images in the train set and the 2 images in the valid set.
#hide
from nbdev.export import notebook2script
notebook2script()
| 03_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
# %matplotlib inline
play = pd.read_csv('../working/playlevel/during_play/2016-5-3129.csv', index_col=[0])
play['xy'] = list(zip(play.x, play.y))
pp = play.pivot(index='time', columns='role', values=['xy','dis','dir','mph'])
pp.columns = [col[1]+'_'+col[0] for col in pp.columns.values]
pp = pp[np.sort(pp.columns.values)]
for xy_col in pp.columns:
if xy_col[-2:] == 'xy':
print(pp[xy_col])
pp.iloc[0]
| notebooks/Play_Danger_Level.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sleepless-se/Colab/blob/master/ImageMaker.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="r8nppy4sbDIE" colab_type="code" outputId="be52b235-edd9-4642-a0ce-883fb35e65c2" colab={"base_uri": "https://localhost:8080/", "height": 139}
import google.colab
import keras
google.colab.drive.mount('/content/gdrive')
# + id="oxNCeFNkzcyh" colab_type="code" outputId="9e2ea9ea-8a4d-48ce-e200-cb5ab58561d5" colab={"base_uri": "https://localhost:8080/", "height": 4117}
# !git clone https://github.com/matterport/Mask_RCNN.git
# %cd Mask_RCNN
# !pip install -r requirements.txt
# %run -i setup.py install
# !wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5
# !git clone https://github.com/waleedka/coco.git
# %cd coco/PythonAPI
# %run -i setup.py build_ext --inplace
# %run -i setup.py build_ext install
# %cd /content/Mask_RCNN
# + [markdown] id="ioMWpLv02_Ei" colab_type="text"
# https://github.com/matterport/Mask_RCNN/blob/master/samples/demo.ipynb
# + id="S-qz3JMo20mR" colab_type="code" colab={}
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("/content/Mask_RCNN")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
# sys.path.append(os.path.join(ROOT_DIR, "coco/PythonAPI")) # To find local version
# import coco
from samples.coco import coco
# %matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
# + id="fMMYdMsi3K6e" colab_type="code" outputId="d20922eb-a78e-45b1-e6d5-c43a6628ba9c" colab={"base_uri": "https://localhost:8080/", "height": 921}
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# + id="ZhBV5qVx53y8" colab_type="code" outputId="fa5952ee-e80c-456f-9ccc-c5e6b54ab1ad" colab={"base_uri": "https://localhost:8080/", "height": 241}
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# + id="hiUpUgMC7tWV" colab_type="code" colab={}
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# + id="2IqQ_Br-72Ad" colab_type="code" colab={}
# + [markdown] id="_XZ3zz--Hlti" colab_type="text"
# # 共通関数
# + id="vAmKXderutnZ" colab_type="code" colab={}
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import google.colab
import os
def show_results(results):
# Visualize results
r = results[0]
print(r['rois'])
print(r['class_ids'])
print(r['scores'])
def resize(img,new_hight,rate=1):
width,height = img.size
rate = new_hight / height
new_width = width * rate
return img.resize((int(new_width),int(new_hight)),Image.LANCZOS)
def resize_to_target(img,new_size):
short ,long = img.size
if short < long:
rate = new_size / short
new_height = long * rate
new_width = new_size
else:
rate = new_size / long
new_width = short * rate
new_height = new_size
return img.resize((int(new_width),int(new_height)),Image.LANCZOS)
def get_center(img):
return int(img.size[0] / 2)
def shift_to(img_center,target_position):
gap = img_center - target_position
shift = gap * -1
print('shift',shift)
return shift
def predict(image):
array_image = keras.preprocessing.image.img_to_array(image)
results = model.detect([array_image], verbose=1)
return results
def get_object_center(image):
results = predict(image)
rois = results[0]['rois']
height = image.size[0]
width = image.size[1]
center = width / 2
# print(width,height)
# print('center',center)
max_area = 0
for i in range(len(rois)):
# print(rois[i])
start_height = rois[i][0]
start_width = rois[i][1]
end_height = rois[i][2]
end_width = rois[i][3]
height = end_height - start_height
width = end_width - start_width
area = height * width
# print('area',area)
if area < max_area:continue
max_area = area
center = start_width + width / 2
# print('update center',center)
return int(center)
def to_RGB(image):
if image.mode == 'RGB':return image
image.load() # required for png.split()
background = Image.new("RGB", image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
file_name = 'tmp.jpg'
background.save(file_name, 'JPEG', quality=80)
img = Image.open(file_name)
return img
# + [markdown] id="frOg8h2euTnk" colab_type="text"
# # 左右の画像加工
# + id="3RChLsGGve_X" colab_type="code" colab={}
def left_right_image(left_img_path,right_img_path, canvas_size = 500):
# load image
iml = Image.open(left_img_path)
imr = Image.open(right_img_path)
iml = to_RGB(iml)
imr = to_RGB(imr)
# make back ground
bg = Image.new("RGB",(canvas_size,canvas_size),(250,250,250))
# target position
left_target = int(canvas_size / 4 * 1)
right_target = int(canvas_size / 4 * 3)
# resize image
iml2 = resize(iml,canvas_size)
imr2 = resize(imr,canvas_size)
# get object centor
iml2_center = get_object_center(iml2)
imr2_center = get_object_center(imr2)
# print('left_img_object_centor',iml2_center)
# print('right_img_object_centor',imr2_center)
# make mask
mask_width = canvas_size / 2
mask1 = Image.new("L",iml2.size,0)
draw1 = ImageDraw.Draw(mask1)
draw1.rectangle((iml2_center - mask_width / 2, 0, canvas_size, canvas_size), fill=255)
mask2 = Image.new("L",imr2.size,0)
draw2 = ImageDraw.Draw(mask2)
draw2.rectangle((imr2_center - mask_width / 2 ,0, imr2_center+ mask_width / 2, canvas_size), fill=255)
# composite image
bg.paste(iml2,(shift_to(iml2_center,left_target),0),mask1)
bg.paste(imr2,(shift_to(imr2_center,right_target),0),mask2)
return bg
# + id="VzOWidk0hoSQ" colab_type="code" outputId="85e2d78a-0ea7-4597-efe2-3617c8dab3cf" colab={"base_uri": "https://localhost:8080/", "height": 2819}
IMG_ROOT = '/content/gdrive/My Drive/data/fashon_item_images'
EDITED_IMG_ROOT = '/content/gdrive/My Drive/data/edited_images'
images = os.listdir(IMG_ROOT)
for i in range(1,11):#len(images)-1
print('i',i)
left_img_path = os.path.join(IMG_ROOT, images[i])
right_img_path = os.path.join(IMG_ROOT, images[i+1])
print('left_img_path',left_img_path)
print('right_img_path',right_img_path)
bg = left_right_image(left_img_path,right_img_path,500)
plt.imshow(bg)
file_name = f'left_right_{i}.png'
save_file_path = os.path.join(EDITED_IMG_ROOT,file_name)
bg.save(save_file_path)
saved_image = Image.open(save_file_path)
saved_image.show()
# + [markdown] id="d13GZ4RLuXIu" colab_type="text"
# # 3枚 画像加工
# + id="pgmHDBbwi-TJ" colab_type="code" colab={}
def left_right_up_down_image(left_img_path,right_up_img_path,right_down_img_path, canvas_size = 500):
# make back ground
bg = Image.new("RGB",(canvas_size,canvas_size),(250,250,250))
# target position
left_target = int(canvas_size / 10 * 3)
right_target = int(canvas_size /10 * 8)
# load image
im1 = Image.open(left_img_path)
im2 = Image.open(right_up_img_path)
im3 = Image.open(right_down_img_path)
im1 = to_RGB(im1)
im2 = to_RGB(im2)
im3 = to_RGB(im3)
# resize image
im1 = resize(im1,canvas_size)
im2 = resize_to_target(im2,canvas_size/20*10)
im3 = resize_to_target(im3,canvas_size/20*10)
# get object centor
im1_center = get_object_center(im1)
im2_center = get_object_center(im2)
im3_center = get_object_center(im3)
# print('left_img_object_centor',iml2_center)
# print('right_img_object_centor',imr2_center)
# make mask
mask_width = canvas_size / 2
mask1 = Image.new("L",im1.size,0)
draw1 = ImageDraw.Draw(mask1)
draw1.rectangle((im1_center - mask_width / 10 * 6, 0, im1.size[0], im1.size[1]), fill=255)
mask2 = Image.new("L",im2.size,0)
draw2 = ImageDraw.Draw(mask2)
draw2.rectangle((im2_center - mask_width / 10 * 4 ,0, im2.size[0], canvas_size/2), fill=255)
mask3 = Image.new("L",im3.size,0)
draw3 = ImageDraw.Draw(mask3)
draw3.rectangle((im3_center - mask_width / 10 * 4 ,0, im3.size[0], im3.size[1]), fill=255)
# composite image
bg.paste(im1,(shift_to(im1_center,left_target),0),mask1)
bg.paste(im2,(shift_to(im2_center,right_target),0),mask2)
bg.paste(im3,(shift_to(im3_center,right_target),int(canvas_size/2)),mask3)
return bg
# + id="jvu_dWA9xqRb" colab_type="code" outputId="26cd4532-849e-48cf-8910-88d8cf80fef7" colab={"base_uri": "https://localhost:8080/", "height": 24976}
images = os.listdir(IMG_ROOT)
for i in range(40,len(images)-2):
left_img_path = os.path.join(IMG_ROOT, images[i])
right_up_img_path = os.path.join(IMG_ROOT, images[i+1])
right_down_img_path = os.path.join(IMG_ROOT, images[i+2])
print('left_img_path',left_img_path)
print('right_up_img_path',right_up_img_path)
print('right_down_img_path',right_down_img_path)
bg = left_right_up_down_image(left_img_path,right_up_img_path,right_down_img_path, canvas_size = 500)
file_name = f'3layout_{i}.png'
save_file_path = os.path.join(EDITED_IMG_ROOT,file_name)
bg.save(save_file_path)
saved_image = Image.open(save_file_path)
saved_image
# + [markdown] id="XhObqilBHqWS" colab_type="text"
# # 横3枚の画像加工
# + id="j8znOYyUHqHS" colab_type="code" colab={}
def left_middle_right_image(left_img_path,middle_img_path,right_img_path, canvas_size = 500):
# make back ground
bg = Image.new("RGB",(canvas_size,canvas_size),(250,250,250))
# load image
im1 = Image.open(left_img_path)
im2 = Image.open(middle_img_path)
im3 = Image.open(right_img_path)
im1 = to_RGB(im1)
im2 = to_RGB(im2)
im3 = to_RGB(im3)
# resize image
rim1 = resize(im1,canvas_size)
rim2 = resize(im2,canvas_size)
rim3 = resize(im3,canvas_size)
# get object centor
rim1_center = get_object_center(rim1)
rim2_center = get_object_center(rim2)
rim3_center = get_object_center(rim3)
# print('left_img_object_centor',iml2_center)
# print('right_img_object_centor',imr2_center)
# make mask
mask_width = canvas_size / 3
mask1 = Image.new("L",rim1.size,0)
draw1 = ImageDraw.Draw(mask1)
draw1.rectangle((rim1_center - mask_width / 2, 0, canvas_size, canvas_size), fill=255)
mask2 = Image.new("L",rim2.size,0)
draw2 = ImageDraw.Draw(mask2)
draw2.rectangle((rim2_center - mask_width / 2 ,0, rim2_center+ mask_width / 2, canvas_size), fill=255)
mask3 = Image.new("L",rim3.size,0)
draw3 = ImageDraw.Draw(mask3)
draw3.rectangle((rim3_center - mask_width / 2 ,0, rim3_center+ mask_width / 2, canvas_size), fill=255)
# target position
left_target = int(canvas_size / 6 * 1)
middle_target = int(canvas_size / 6 * 3)
right_target = int(canvas_size / 6 * 5)
# composite image
bg.paste(rim1,(shift_to(rim1_center,left_target),0),mask1)
bg.paste(rim2,(shift_to(rim2_center,middle_target),0),mask2)
bg.paste(rim3,(shift_to(rim3_center,right_target),0),mask3)
return bg
# + id="ZNYkLwMSLD9R" colab_type="code" colab={}
images = os.listdir(IMG_ROOT)
for i in range(40,len(images)-2):
left_img_path = os.path.join(IMG_ROOT, images[i])
middle_img_path = os.path.join(IMG_ROOT, images[i+1])
right_img_path = os.path.join(IMG_ROOT, images[i+2])
print('left_img_path',left_img_path)
print('middle_img_path',middle_img_path)
print('right_img_path',right_img_path)
bg = left_middle_right_image(left_img_path,middle_img_path,right_img_path, canvas_size = 500)
file_name = f'3line_{i}.png'
save_file_path = os.path.join(EDITED_IMG_ROOT,file_name)
bg.save(save_file_path)
saved_image = Image.open(save_file_path)
saved_image
# + [markdown] id="s_d44z8fLg6X" colab_type="text"
# # 左右1:2
# + id="MMLGs8-tLgdN" colab_type="code" colab={}
def split1_2(left_img_path,right_img_path, canvas_size = 500):
# make back ground
bg = Image.new("RGB",(canvas_size,canvas_size),(250,250,250))
# load image
im1 = Image.open(left_img_path)
im3 = Image.open(right_img_path)
im1 = to_RGB(im1)
im3 = to_RGB(im3)
# resize image
rim1 = resize(im1,canvas_size)
rim3 = resize(im3,canvas_size)
# get object centor
rim1_center = get_object_center(rim1)
rim3_center = get_object_center(rim3)
# print('left_img_object_centor',iml2_center)
# print('right_img_object_centor',imr2_center)
# target position
left_target = int(canvas_size / 6 * 1)
right_target = int(canvas_size / 6 * 4)
shift1 = shift_to(rim1_center,left_target)
shift3 = shift_to(rim3_center,right_target)
mask_edge = rim1_center + left_target
print('mask edge',mask_edge)
print('1/3',canvas_size / 3)
# make mask
mask_width = canvas_size / 3 * 2
mask1 = Image.new("L",rim1.size,0)
draw1 = ImageDraw.Draw(mask1)
draw1.rectangle((0, 0, mask_edge, canvas_size), fill=255)
# composite image
bg.paste(rim3,(shift3,0))
bg.paste(rim1,(shift1,0),mask1)
return bg
# + id="FLVBheDbLKIY" colab_type="code" colab={}
images = os.listdir(IMG_ROOT)
i = 11
for i in range(0,len(images)-1):
left_img_path = os.path.join(IMG_ROOT, images[i])
right_img_path = os.path.join(IMG_ROOT, images[i+1])
print('left_img_path',left_img_path)
print('right_img_path',right_img_path)
bg = split1_2(left_img_path,right_img_path, canvas_size = 500)
file_name = f'split1_2_{i}.png'
save_file_path = os.path.join(EDITED_IMG_ROOT,file_name)
bg.save(save_file_path)
saved_image = Image.open(save_file_path)
saved_image
# + id="5prBQL_XMOGx" colab_type="code" colab={}
| ImageMaker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
from SmartAnno.utils.ReviewRBInit import ReviewRBInit
from SmartAnno.utils.ReviewRBLoop import ReviewRBLoop
from SmartAnno.utils.ReviewMLInit import ReviewMLInit
from SmartAnno.utils.ReviewMLLoop import ReviewMLLoop
from SmartAnno.gui.Workflow import Workflow
from sqlalchemy_dao import Dao
from SmartAnno.db.ORMs import Document
from SmartAnno.utils.IntroStep import IntroStep
from SmartAnno.gui.PreviousNextWidgets import PreviousNextHTML
import sqlalchemy_dao
import os
from conf.ConfigReader import ConfigReader
from SmartAnno.utils.KeywordsEmbeddingExtender import KeywordsEmbeddingExtender
from SmartAnno.utils.KeywordsEmbeddingExtenderSetup import KeywordsEmbeddingExtenderSetup
from SmartAnno.utils.TreeSet import TreeSet
import logging
logging.getLogger().setLevel(logging.DEBUG)
ConfigReader('../conf/smartanno_conf.json')
from SmartAnno.models.GloveModel import GloveModel
from conf.ConfigReader import ConfigReader
from threading import Thread
def prepareGloveModel():
ConfigReader('../conf/smartanno_conf.json')
glove_path = ConfigReader.getValue('glove/model_path')
glove_vocab = ConfigReader.getValue('glove/vocab')
glove_vector = ConfigReader.getValue('glove/vector')
GloveModel(word2vec_file=glove_path, vocab=glove_vocab, vect=glove_vector)
gm=GloveModel.glove_model
thread_gm = Thread(target=prepareGloveModel)
thread_gm.start()
wf=Workflow()
wf.filters={'TypeA':TreeSet(['English']),'TypeB':TreeSet(['Spanish'])}
wf.append(KeywordsEmbeddingExtenderSetup(name='w_e_extender_setup'))
wf.append(KeywordsEmbeddingExtender(name='w_e_extender', max_query=40))
wf.append(KeywordsEmbeddingExtenderSetup(name='w_e_extender_setup'))
wf.start()
# -
wf.filters
type(extending[0])
# +
import logging
import sqlalchemy_dao
from sqlalchemy_dao import Dao
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.db.ORMs import Filter
from SmartAnno.gui.Workflow import Workflow
from SmartAnno.utils.AnnotationTypeDef import AnnotationTypeDef
from SmartAnno.utils.IntroStep import IntroStep
from SmartAnno.utils.KeywordsFiltering import KeywordsFiltering
from SmartAnno.utils.KeywordsEmbeddingExtender import KeywordsEmbeddingExtender
from SmartAnno.utils.KeywordsEmbeddingExtenderSetup import KeywordsEmbeddingExtenderSetup
logging.getLogger().setLevel(logging.DEBUG)
ConfigReader('../conf/smartanno_conf.json')
from SmartAnno.models.GloveModel import GloveModel
from threading import Thread
def prepareGloveModel():
ConfigReader('../conf/smartanno_conf.json')
glove_path = ConfigReader.getValue('glove/model_path')
glove_vocab = ConfigReader.getValue('glove/vocab')
glove_vector = ConfigReader.getValue('glove/vector')
GloveModel(word2vec_file=glove_path, vocab=glove_vocab, vect=glove_vector)
gm=GloveModel.glove_model
thread_gm = Thread(target=prepareGloveModel)
thread_gm.start()
wf = Workflow(config_file=ConfigReader.config_file)
wf.api_key = ConfigReader.getValue("api_key")
wf.dao = Dao('sqlite+pysqlite:///../data/test.sqlite', sqlalchemy_dao.POOL_DISABLED)
wf.task_name = 'language'
wf.append(AnnotationTypeDef(
'<h3>Annotation types:</h3><p>List all the types you want to identify below. Each type per line.<br/>If you'
'have too many types, try set up them separately, so that you won't need to choose from a long list '
'for each sample. </p>', name='types'))
wf.append(KeywordsFiltering(
name='keywords'))
wf.append(KeywordsEmbeddingExtenderSetup(name='w_e_extender_setup'))
wf.append(KeywordsEmbeddingExtender(name='w_e_extender', max_query=40))
wf.start()
wf.steps[0].complete()
with wf.dao.create_session() as session:
records = session.query(Filter).filter(Filter.task_id == wf.task_id) \
.filter(Filter.type_name == 'Eng')
record = records.first()
record.keyword = 'Eng\nEnglish'
wf.steps[1].complete()
# -
| SmartAnno/test/TestKeywordsEmbeddingExtender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ur8xi4C7S06n"
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="JAPoU8Sm5E6e"
# <table align="left">
#
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/pytorch_lightning_custom_container_training.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/pytorch_lightning_custom_container_training.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] id="tvgnzT1CKxrO"
# ## Overview
#
# This tutorial demonstrates how to use the Vertex AI SDK for Python to train a ResNet model using custom containers and PyTorch Lightning. The model training code is from the CIFAR-10 training example on PyTorch Lightning's documentation page:
# https://pytorch-lightning.readthedocs.io/en/stable/notebooks/lightning_examples/cifar10-baseline.html
#
# Two training approaches are taken: 1) Multiple GPU training on a single machine 2) Multiple machine training wtih a single GPU on each
#
# ### Dataset
#
# Here's the description from the website: The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
# https://www.cs.toronto.edu/~kriz/cifar.html
#
# The dataset will be loaded using the Lightning Bolts datamodules
#
# ### Objective
#
# In this notebook, you learn how to take an existing example of a model trained using PyTorch Lighting, and use Vertex AI to distribute training across GPUs and multiple machines
#
# * Install and import libraries to test model training locally
# * Initialize the Vertex AI SDK
# * Create a custom container for training
# * Create a Vertex AI TensorBoard
# * Modify the code for pass in arguments, log to the TensorBoard, and save the model to Cloud Storage
# * Run a Vertex AI training job on a single machine with GPUs
# * Run a Vertex AI training job on multiple machines with single GPUs attached
#
#
# ### Costs
#
# This tutorial uses billable components of Google Cloud:
#
# * Vertex AI
# * Cloud Storage
#
# Learn about [Vertex AI
# pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="ze4-nDLfK4pw"
# ### Set up your local development environment
#
# **If you are using Colab or Google Cloud Notebooks**, your environment already meets
# all the requirements to run this notebook. You can skip this step.
# + [markdown] id="gCuSR8GkAgzl"
# **Otherwise**, make sure your environment meets this notebook's requirements.
# You need the following:
#
# * The Google Cloud SDK
# * Git
# * Python 3
# * virtualenv
# * Jupyter notebook running in a virtual environment with Python 3
#
# The Google Cloud guide to [Setting up a Python development
# environment](https://cloud.google.com/python/setup) and the [Jupyter
# installation guide](https://jupyter.org/install) provide detailed instructions
# for meeting these requirements. The following steps provide a condensed set of
# instructions:
#
# 1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
#
# 1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
#
# 1. [Install
# virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
# and create a virtual environment that uses Python 3. Activate the virtual environment.
#
# 1. To install Jupyter, run `pip3 install jupyter` on the
# command-line in a terminal shell.
#
# 1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
#
# 1. Open this notebook in the Jupyter Notebook Dashboard.
# + [markdown] id="i7EUnXsZhAGF"
# ### Install additional packages
#
# Install additional package dependencies not installed in your notebook environment
# + id="2b4ef9b72d43"
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# + id="wyy5Lbnzg5fi"
# ! pip3 install {USER_FLAG} --upgrade "torch>=1.6, <1.9"
# ! pip3 install {USER_FLAG} --upgrade "lightning-bolts"
# ! pip3 install {USER_FLAG} --upgrade git+https://github.com/PyTorchLightning/pytorch-lightning
# ! pip3 install {USER_FLAG} --upgrade "torchmetrics>=0.3"
# ! pip3 install {USER_FLAG} --upgrade "torchvision"
# ! pip3 install {USER_FLAG} --upgrade google-cloud-aiplatform
# ! pip3 install {USER_FLAG} --upgrade ipywidgets
# + [markdown] id="hhq5zEbGg0XX"
# ### Restart the kernel
#
# After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="EzrelQZ22IZj"
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="lWEdiXsJg0XY"
# ## Before you begin
#
# ### Select a GPU runtime
#
# **Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select "Runtime --> Change runtime type > GPU"**
# + [markdown] id="BF1j6f9HApxa"
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
#
# 1. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). {TODO: Update the APIs needed for your tutorial. Edit the API names, and update the link to append the API IDs, separating each one with a comma. For example, container.googleapis.com,cloudbuild.googleapis.com}
#
# 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
#
# 1. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + [markdown] id="WReHDGG5g0XY"
# #### Set your project ID
#
# **If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
# + id="oM1iC_MfAts1"
PROJECT_ID = ""
import os
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
# + [markdown] id="qJYoRfYng0XZ"
# Otherwise, set your project ID here.
# + id="riG_qUokg0XZ"
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="set_service_account"
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
# + id="575f7c610f83"
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
# shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].strip().replace("*", "").replace(" ", "")
print("Service Account:", SERVICE_ACCOUNT)
# + [markdown] id="06571eb4063b"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
# + id="697568e92bd6"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="dr--iN2kAylZ"
# ### Authenticate your Google Cloud account
#
# **If you are using Google Cloud Notebooks**, your environment is already
# authenticated. Skip this step.
# + [markdown] id="sBCra4QMA2wR"
# **If you are using Colab**, run the cell below and follow the instructions
# when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# 1. In the Cloud Console, go to the [**Create service account key**
# page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
#
# 2. Click **Create service account**.
#
# 3. In the **Service account name** field, enter a name, and
# click **Create**.
#
# 4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"
# into the filter box, and select
# **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# 5. Click *Create*. A JSON file that contains your key downloads to your
# local environment.
#
# 6. Enter the path to your service account key as the
# `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
# + id="PyQmSRbKA8r-"
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="zgPO1eR3CYjk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
#
# When you submit a training job using the Cloud SDK, you upload a Python package
# containing your training code to a Cloud Storage bucket. Vertex AI runs
# the code from this package. In this tutorial, Vertex AI also saves the
# trained model that results from your job in the same bucket. Using this model artifact, you can then
# create Vertex AI model and endpoint resources in order to serve
# online predictions.
#
# Set the name of your Cloud Storage bucket below. It must be unique across all
# Cloud Storage buckets.
#
# You may also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. We suggest that you [choose a region where Vertex AI services are
# available](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions).
# + id="MzGDU7TWdts_"
BUCKET_URI = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
# + id="cf221059d072"
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
if REGION == "[your-region]":
REGION = "us-central1"
# + [markdown] id="-EcIXiGsCePi"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="NIq7R4HZCfIc"
# ! gsutil mb -l $REGION -p $PROJECT_ID $BUCKET_URI
# + [markdown] id="ucvCsknMCims"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="vhOb7YnwClBb"
# ! gsutil ls -al $BUCKET_URI
# + [markdown] id="XoEqT2Y4DJmf"
# ### Import libraries and define constants
# + id="pRUOFELefqf1"
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import OneCycleLR
from torchmetrics.functional import accuracy
seed_everything(7)
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
AVAIL_GPUS = min(1, torch.cuda.device_count())
BATCH_SIZE = 256 if AVAIL_GPUS else 64
NUM_WORKERS = int(os.cpu_count() / 2)
print(PATH_DATASETS)
print(AVAIL_GPUS)
print(BATCH_SIZE)
print(NUM_WORKERS)
# + [markdown] id="XoEqT2Y4DJmf"
# ### Define training functions for local testing
# + id="bca47ea0e3c3"
train_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
cifar10_normalization(),
]
)
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
cifar10_normalization(),
]
)
cifar10_dm = CIFAR10DataModule(
data_dir=PATH_DATASETS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms,
)
def create_model():
model = torchvision.models.resnet18(pretrained=False, num_classes=10)
model.conv1 = nn.Conv2d(
3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False
)
model.maxpool = nn.Identity()
return model
class LitResnet(LightningModule):
def __init__(self, lr=0.05):
super().__init__()
self.save_hyperparameters()
self.model = create_model()
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
self.log("train_loss", loss)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def validation_step(self, batch, batch_idx):
self.evaluate(batch, "val")
def test_step(self, batch, batch_idx):
self.evaluate(batch, "test")
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
lr=self.hparams.lr,
momentum=0.9,
weight_decay=5e-4,
)
steps_per_epoch = 45000 // BATCH_SIZE
scheduler_dict = {
"scheduler": OneCycleLR(
optimizer,
0.1,
epochs=self.trainer.max_epochs,
steps_per_epoch=steps_per_epoch,
),
"interval": "step",
}
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
# + [markdown] id="71da4176a4cc"
# #### Train the model locally
# + id="af65cc43993c"
model = LitResnet(lr=0.05)
model.datamodule = cifar10_dm
trainer = Trainer(
progress_bar_refresh_rate=10,
max_epochs=5,
gpus=AVAIL_GPUS,
logger=TensorBoardLogger("lightning_logs/", name="resnet"),
callbacks=[LearningRateMonitor(logging_interval="step")],
strategy="dp",
)
trainer.fit(model, cifar10_dm)
trainer.test(model, datamodule=cifar10_dm)
# + [markdown] id="4eb55916219c"
# ## Vertex AI Training using the Vertex AI SDK and a custom container
# + [markdown] id="e61fc5c1b9e0"
# ### Build the custom container
# + [markdown] id="be10c827e494"
# #### Run these steps once to setup artifact registry and authorize docker to use it
# + id="667b03930fc3"
# ! gcloud config set project $PROJECT_ID
# ! gcloud services enable artifactregistry.googleapis.com
# ! sudo usermod -a -G docker ${USER}
# ! gcloud auth configure-docker us-central1-docker.pkg.dev --quiet
# + id="2647eb53957f"
REPOSITORY = "gpu-training-repository"
# + id="37973b9d3ca3"
# ! gcloud artifacts repositories create $REPOSITORY --repository-format=docker \
# --location=$REGION --description="Vertex GPU training repository"
# + [markdown] id="b2cfd36ceacd"
# #### Make a trainer directory
# + id="05fa93664880"
import os
os.mkdir("trainer")
# + [markdown] id="27f1c07650b0"
# #### Build the container
# This code extends the original example and adds argument parsing, TensorBoard logging, ability to choose the training strategy, and model saving to Cloud Storage
# + id="89f63d109973"
# %%writefile trainer/task.py
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import OneCycleLR
from torch.optim.swa_utils import AveragedModel, update_bn
from torchmetrics.functional import accuracy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import OneCycleLR
from torch.optim.swa_utils import AveragedModel, update_bn
from torchmetrics.functional import accuracy
# Arg parsing and shutil for folder creation
import argparse
import shutil
seed_everything(7)
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
AVAIL_GPUS = min(1, torch.cuda.device_count())
BATCH_SIZE = 256 if AVAIL_GPUS else 64
NUM_WORKERS = int(os.cpu_count() / 2)
print (PATH_DATASETS)
print (AVAIL_GPUS)
print (BATCH_SIZE)
print (NUM_WORKERS)
train_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
cifar10_normalization(),
]
)
test_transforms = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
cifar10_normalization(),
]
)
cifar10_dm = CIFAR10DataModule(
data_dir=PATH_DATASETS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms,
)
# Added code to read args
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--distribute', dest='distribute', type=str, default='dp',
help='Distributed training strategy.')
parser.add_argument('--num-nodes', dest='num_nodes',
default=1, type=int,
help='Number of nodes')
parser.add_argument(
'--model-dir', dest='model_dir', default=os.getenv('AIP_MODEL_DIR'), type=str,
help='a Cloud Storage URI of a directory intended for saving model artifacts')
parser.add_argument(
'--tensorboard-log-dir', dest='tensorboard_log_dir', default=os.getenv('AIP_TENSORBOARD_LOG_DIR'), type=str,
help='a Cloud Storage URI of a directory intended for saving TensorBoard')
parser.add_argument(
'--checkpoint-dir', dest='checkpoint_dir', default=os.getenv('AIP_CHECKPOINT_DIR'), type=str,
help='a Cloud Storage URI of a directory intended for saving checkpoints')
args = parser.parse_args()
return args
# Cunction to make model directory if it doesn't exist
def makedirs(model_dir):
if os.path.exists(model_dir) and os.path.isdir(model_dir):
shutil.rmtree(model_dir)
os.makedirs(model_dir)
return
def create_model():
model = torchvision.models.resnet18(pretrained=False, num_classes=10)
model.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
model.maxpool = nn.Identity()
return model
class LitResnet(LightningModule):
def __init__(self, lr=0.05):
super().__init__()
self.save_hyperparameters()
self.model = create_model()
# TensorBoard logging at epoch end
def training_epoch_end(self,outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
tensorboard_logs = {'loss': avg_loss}
epoch_dictionary={'loss': avg_loss,'log': tensorboard_logs}
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
self.log("train_loss", loss)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def validation_step(self, batch, batch_idx):
self.evaluate(batch, "val")
def test_step(self, batch, batch_idx):
self.evaluate(batch, "test")
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
lr=self.hparams.lr,
momentum=0.9,
weight_decay=5e-4,
)
steps_per_epoch = 45000 // BATCH_SIZE
scheduler_dict = {
"scheduler": OneCycleLR(
optimizer,
0.1,
epochs=self.trainer.max_epochs,
steps_per_epoch=steps_per_epoch,
),
"interval": "step",
}
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
def main():
# Parse args
args = parse_args()
print (f"Args={args}")
print (f"model directory={args.epochs}")
print (f"model directory={args.model_dir}")
print (f"distribute strategy={args.distribute}")
# model, ensorboard, and checkpoint directories set
local_model_dir = './tmp/model'
local_tensorboard_log_dir = './tmp/logs'
local_checkpoint_dir = './tmp/checkpoints'
model_dir = args.model_dir or local_model_dir
tensorboard_log_dir = args.tensorboard_log_dir or local_tensorboard_log_dir
checkpoint_dir = args.checkpoint_dir or local_checkpoint_dir
print ("Model directory" + model_dir)
print ("TensorBoard directory" + tensorboard_log_dir)
print ("Checkpoint directory" + checkpoint_dir)
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if model_dir and model_dir.startswith(gs_prefix):
model_dir = model_dir.replace(gs_prefix, gcsfuse_prefix)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
if tensorboard_log_dir and tensorboard_log_dir.startswith(gs_prefix):
tensorboard_log_dir = tensorboard_log_dir.replace(gs_prefix, gcsfuse_prefix)
if not os.path.isdir(tensorboard_log_dir):
os.makedirs(tensorboard_log_dir)
if checkpoint_dir and checkpoint_dir.startswith(gs_prefix):
checkpoint_dir = checkpoint_dir.replace(gs_prefix, gcsfuse_prefix)
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
model = LitResnet(lr=0.05)
model.datamodule = cifar10_dm
trainer = Trainer(
progress_bar_refresh_rate=10,
gpus=AVAIL_GPUS,
logger=TensorBoardLogger(tensorboard_log_dir, "resnet"),
callbacks=[LearningRateMonitor(logging_interval="step")],
# Changes to use args, change default checkpoint dir, and set number of nodes
max_epochs=args.epochs,
strategy=args.distribute,
default_root_dir=checkpoint_dir,
num_nodes=args.num_nodes,
)
trainer.fit(model, cifar10_dm)
trainer.test(model, datamodule=cifar10_dm)
#Save model step
model_name = "pylightning_resnet_state_dict.pth"
model_save_path = os.path.join(model_dir, model_name)
if trainer.global_rank == 0:
makedirs(model_dir)
print("Saving model to {}".format(model_save_path))
torch.save(model.state_dict(), model_save_path)
if __name__ == '__main__':
main()
# + [markdown] id="d3682bd84fda"
# #### Configure the container name and path to artifact registry
# + id="c70a580cc6d4"
content_name = "pytorch-lightning-gpu-training"
hostname = f"{REGION}-docker.pkg.dev"
image_name_train = content_name
tag = "latest"
custom_container_image_uri_train = (
f"{hostname}/{PROJECT_ID}/{REPOSITORY}/{image_name_train}:{tag}"
)
# + [markdown] id="a45380a286c8"
# #### Create the requirements.txt and Dockerfile
# + id="1cc1a3438482"
# %%writefile trainer/requirements.txt
torch>=1.6, <1.9
lightning-bolts
pytorch-lightning>=1.3
torchmetrics>=0.3
torchvision
# + id="51a958343b27"
# %%writefile trainer/Dockerfile
FROM pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime
COPY . /trainer
WORKDIR /trainer
RUN pip install -r requirements.txt
ENTRYPOINT ["python", "task.py"]
# + [markdown] id="9a7e8f8f534f"
# #### Create an empty __init__.py file required to be in the container
# + id="459b9d5a8965"
import os
with open(os.path.join("trainer", "__init__.py"), "w") as fp:
pass
# + [markdown] id="7b36b457aa5d"
# #### Build the container, train the model within the container image locally, and push to Artifact Registry
# + id="e58c75158872"
# ! cd trainer && docker build -t $custom_container_image_uri_train -f Dockerfile .
# + id="bde55966086a"
# ! docker run --rm $custom_container_image_uri_train
# + id="34c407c3be0e"
# ! docker push $custom_container_image_uri_train
# + id="526115eabf35"
# ! gcloud artifacts repositories describe $REPOSITORY --location=$REGION
# + [markdown] id="f026b57d265e"
# ### Initialize Vertex SDK
# + id="a0deece1086e"
from google.cloud import aiplatform
aiplatform.init(
project=PROJECT_ID,
staging_bucket=BUCKET_URI,
location=REGION,
)
# + [markdown] id="1ec68e279ed1"
# ### Create a Vertex AI TensorBoard Instance
# + id="d945f0e32b02"
tensorboard = aiplatform.Tensorboard.create(
display_name=content_name,
)
# + [markdown] id="cb13cc82acc5"
# #### Option: Use a previously created Vertex AI TensorBoard instance
#
# ```
# tensorboard_name = "Your TensorBoard Resource Name or TensorBoard ID"
# tensorboard = aiplatform.Tensorboard(tensorboard_name=tensorboard_name)
# ```
# + [markdown] id="0e526882fbda"
# ### Run a Vertex AI SDK custom container training tob with multiple GPUs
# + id="93e74e00996c"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y-%m-%d-%H%M%S")
print(TIMESTAMP)
# + [markdown] id="e38c06b9377f"
# #### Set the parameters for the training. The model/TensorBoard/checkpoint directory uses the Vertex defaults. Uncomment to set your own
# + id="8494564137b0"
gcs_output_uri_prefix = f"{BUCKET_URI}/{content_name}-{TIMESTAMP}"
# + id="6c999fe5df46"
EPOCHS = 30
TRAIN_STRATEGY = "dp" # Distributed Parallel for single machine multiple GPU
MODEL_DIR = f"{BUCKET_URI}/{content_name}/model"
TB_DIR = f"{BUCKET_URI}/{content_name}/logs"
CHKPT_DIR = f"{BUCKET_URI}/{content_name}/checkpoints"
NUM_NODES = 1
machine_type = "n1-standard-4"
accelerator_count = 2
accelerator_type = "NVIDIA_TESLA_V100"
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--distribute=" + TRAIN_STRATEGY,
"--num-nodes=" + str(NUM_NODES),
"--model-dir=" + MODEL_DIR,
"--checkpoint-dir=" + CHKPT_DIR,
]
# + id="db9de8eec4c4"
custom_container_training_job = aiplatform.CustomContainerTrainingJob(
display_name=content_name + "-MultGPU-dp-" + TIMESTAMP,
container_uri=custom_container_image_uri_train,
)
# + id="fa5f67712789"
custom_container_training_job.run(
args=CMDARGS,
replica_count=NUM_NODES,
base_output_dir=gcs_output_uri_prefix,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=SERVICE_ACCOUNT,
tensorboard=tensorboard.resource_name,
sync=False,
)
# + id="29b14f2289f3"
print(f"Custom Training Job Name: {custom_container_training_job.resource_name}")
print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}")
# + [markdown] id="e38c06b9377f"
# #### Delete the training job
# + id="d681091fd23d"
custom_container_training_job.delete()
# + [markdown] id="5f2b541299e9"
# ## Run training on multiple machines w/ 1 GPU on each
# + id="b15bd8c318d8"
EPOCHS = 30
TRAIN_STRATEGY = "ddp" # Distributed Parallel for single machine multiple GPU
MODEL_DIR = f"{BUCKET_URI}/{content_name}-ddp/model"
TB_DIR = f"{BUCKET_URI}/{content_name}-ddp/logs"
CHKPT_DIR = f"{BUCKET_URI}/{content_name}-ddp/checkpoints"
NUM_NODES = 2
machine_type = "n1-standard-4"
accelerator_count = 1
accelerator_type = "NVIDIA_TESLA_V100"
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--distribute=" + TRAIN_STRATEGY,
"--num-nodes=" + str(NUM_NODES),
"--model-dir=" + MODEL_DIR,
"--checkpoint-dir=" + CHKPT_DIR,
]
# + id="a4ca2f0e0f00"
custom_container_training_job_dist = aiplatform.CustomContainerTrainingJob(
display_name=content_name + "-MultiCPU-1GPU-ddp-" + TIMESTAMP,
container_uri=custom_container_image_uri_train,
)
# + id="8fbe3a5549a1"
custom_container_training_job_dist.run(
args=CMDARGS,
replica_count=NUM_NODES,
base_output_dir=gcs_output_uri_prefix,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=SERVICE_ACCOUNT,
tensorboard=tensorboard.resource_name,
sync=False,
)
# + id="29b14f2289f3"
print(f"Custom Training Job Name: {custom_container_training_job_dist.resource_name}")
print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}")
# + [markdown] id="TpV-iwP9qw9c"
# ## Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
# + id="sx_vKniMq9ZX"
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete TensorBoard
TB_NAME = tensorboard.resource_name
# ! gcloud beta ai tensorboards delete $TB_NAME --quiet
# Delete the training job
custom_container_training_job_dist.delete()
CONTENT_DIR = f"{BUCKET_URI}/{content_name}*"
# Delete Cloud Storage objects that were created
# ! gsutil -m rm -r $CONTENT_DIR
if delete_bucket and "BUCKET_URI" in globals():
# ! gsutil -m rm -r $BUCKET_URI
| notebooks/community/sdk/sdk_pytorch_lightning_custom_container_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting Reference Manual
# Use this manual to help you create plots using python. More specifically, we'll be using a library of Python functions called "matplotlib.pyplot", these are where all the tools to create and format plots are held. Below you'll see we've imported this library and decided to call it "plt", that means that whenever we use one of our plotting functions we have to first tell it which library it's in by placing "plt." ahead of the function name. You'll also notice we've imported a second library of functions, "numpy". This one contains lots of common mathematical and scientific computing functions, a few of which we'll use. Don't worry if this seems a little daunting at first, you'll quickly get familiar with the functions and you'll be making your own plots before you know it.
import matplotlib.pyplot as plt
import numpy as np
# ### Plotting A List Of Points
# To plot a list of points the first thing we need to do is create the list of points. Each point will be written in the form (x, y).
myList = [(-2, 10), (5, 6), (13, -8), (0, 4)]
# Now that I've decided on my points I'd like to plot, I need to break them into two lists. One list will be the x-coordinates and the other will be the y-coordinates. Just make sure that they're in the same order so that you're plotting the correct points.
xValues = [-2, 5, 13, 0]
yValues = [10, 6, -8, 4]
# #### Choosing Colour and Marker Shape
# To plot our points successfully we need to let python know what kind of plot we want. We can do this by specifying the colour and shape of the points. To do this we will create a two symbol code to set our preferences.
#
# The first represents colour and the second is the marker shape. Using the legend below you can create a code to format your plot.
#
# **Colour Symbols:** blue = 'b', green = 'g', red = 'r', cyan = 'c', magenta = 'm', yellow = 'y', black = 'k', white = 'w'
#
# **Shape Symbols:** point = '.', pixel = ',', circle = 'o', triangle '^', square = 's', pentagon = 'p', star = '*', plus = '+', cross = 'x', diamond = 'D'
#
# Let's start off by making our points magenta stars, to do this we will use the code 'm*'.
# +
plt.plot(xValues, yValues, 'm*')
plt.show()
# -
# You have two options for how you want to input the coordinate information into the plot function. You can either create lists of the information and use their names in the plot function like we did above, or you can input the x and y coordinates directly into it. Both create the same plot so it's whatever you prefer!
# +
plt.plot([-2, 5, 13, 0], [10, 6, -8, 4], 'm*')
plt.show()
# -
# #### Changing The Size of Points
# Let's say we'd like our markers to be bigger or smaller, we're going to specify a size argument. Let's say we'd like them to be quite a bit larger, we could use size 18. We'll use our same plotting instructions from above but we'll set the 'markersize'.
# +
xValues = [-2, 5, 13, 0]
yValues = [10, 6, -8, 4]
plt.plot(xValues, yValues, 'm*', markersize = 18)
plt.show()
# -
# #### Creating Markers That Aren't Filled In
# Let's say you'd like the marker to be just the outline of the shape. Just like when we wanted to specify the markersize, we'll set another parameter. This time it's called 'fillstyle' and we'll set it to 'none'.
# +
xValues = [-2, 5, 13, 0]
yValues = [10, 6, -8, 4]
plt.plot(xValues, yValues, 'm*', markersize = 18, fillstyle = 'none')
plt.show()
# -
# #### Connecting Points
# We've now learned how to plot lists of points and format them to suit our preferences. But what if we wanted to connect these points? It's actually very simple to do this, we just need to make a slight change to our colour and shape formatting code. To recap, our first symbol determines colour and the second determines the markershape (i.e. 'm*' is magenta stars). If we want to connect these we just need to add a symbol between these two to represent linestyle. Below is the legend for some of the different lines you can use:
#
# **Line Style Symbols:** solid line = '-', dashed line = '--', dash-dot line = '-.', dotted line = ':'
#
# For example, if I wanted to connect my magenta stars with a dashed line, my format code would be 'm--*'. Let's give it a try.
# +
xValues = [-2, 5, 13, 0]
yValues = [10, 6, -8, 4]
plt.plot(xValues, yValues, 'm--*', markersize = 18)
plt.show()
# -
# You'll notice that they're connected based on the order they're entered into the plot function.
# ### Plotting Functions
# Ok we've seen points plotted on their own and then connected them with lines. Now it's time to think about how we might plot a function.
#
# Let's start off by defining a function we'd like to plot. I'll use a simple parabola. Pay close attention to how I define a function in python.
def f(x):
y = x*(x - 3)
return y
# I'd now like to plot my parbola from -10 to 10. That means I need to evaluate f(x) at x-values ranging from -10 to 10. We'll start off by creating a list of these values.
#
# To create this list we can use the python function from the "numpy" library: np.linspace(start, stop, numberOfSteps). This will generate any number of points between two numbers. For our range let's have 200 numbers so that we have only slight changes in f(x) between x-values (this will generate a smoother curve when plotted).
X = np.linspace(-10, 10, 200)
# Ok now all that's left is to plot f(x) using our range of values for x. To do this we will replace the x in f(x) with the name of the list of values we created. In this case, that's X.
#
# #### Choosing Colour and Line Style
# Think back to what you just learned about plotting points and the format codes. The first symbol is for the colour, then the linestyle between points, and finally the marker shape. When we're plotting a function, we'll use the first two and leave out marker shape.
#
# For example, I'd like to plot my parabola as a blue solid line so my format code will be 'b-'.
# +
plt.plot(X, f(X), 'b-')
plt.show()
# -
# #### Choosing Line Width
# Just like when we're plotting points we can use other parameters in our plot function to make more formatting decisions. For functions you can change the width of your plotted line/curve. Let's say I'd like my blue parabola to be much thicker, I can set the paramter "lw" to 10.
# +
plt.plot(X, f(X), 'b-', lw = 10)
plt.show()
# -
# ### Adding Text To A Plot
# Sometimes we'd like to be able to label points or even write some information on our plots. We can do that in a similar way to plotting points or functions.
plt.text(0, 0, 'Hello!', size = 40, color = 'green')
# It's as easy as that! The first parameter is the x-coordinate of the position of the text and the second parameter is the y-coordinate. The third parameter is the text itself, it's important that this is in quotations. The last 2 parameters are size and colour (again make sure the name of the colour is in quotations).
# ### Formatting A Plot
# You've now learned how to plot points and functions in python. Let's now work on how to format the canvas of the plot. You can change the axes sizes, markings, titles and gridlines.
# +
plt.figure(figsize = (7, 7))
plt.figure(figsize = (7, 7))
plt.xlim((-10, 10))
plt.ylim((-10, 10))
plt.xticks([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10])
plt.yticks([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10])
plt.axhline(0, color = 'black', lw =1)
plt.axvline(0, color = 'black', lw = 1)
plt.xlabel('X-Axis')
plt.ylabel('Y-Axis')
plt.title('Title')
plt.grid()
plt.show()
# -
# ### Putting It All Together
# Now that you have all the pieces to the plotting puzzle, it's time to put them all together. Start by formatting the axes and grid lines, then you can plot as many functions and points as you'd like. Take some time to make some changes to my plot to help understand what each instruction does and how the plotting functions work.
# +
plt.grid()
plt.xlim((-10, 10))
plt.ylim((-10, 10))
plt.xticks([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10])
plt.yticks([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10])
plt.axhline(0, color = 'black', lw =1)
plt.axvline(0, color = 'black', lw = 1)
def g(x):
y = 0.15*(x + 5)*(x - 5)
return y
X = np.linspace(-5, 5, 100)
plt.plot(X, g(X), 'r-', lw = 4)
points1 = [(-2.5, 5), (2.5, 5)]
plt.plot([-2.5, 2.5], [5, 5], 'bo', markersize = 3)
plt.plot([-2.5, 2.5], [5, 5], 'bo', markersize = 50, fillstyle = 'none')
points2 = [(0, 2.5)]
plt.plot([0], [2.5], 'y^', markersize = 20, fillstyle = 'none')
plt.show()
# -
| plotting-reference-manual.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print("Hello world")
# baseado em - https://www.kaggle.com/leandrodoze/sentiment-analysis-in-portuguese
#imports
import nltk
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
import numpy as np
# https://www.kaggle.com/luisfredgs/imdb-ptbr
dataset = pd.read_csv('/home/renato/Documentos/imdb-reviews-pt-br.csv')
dataset.head()
df2 = pd.read_csv('./files test/Tweets_Mg.csv')
df2['Classificacao'].value_counts()
# ## Unir os dois dataframe para teste
rm_df2 = [x for x in list(df2.columns) if x not in ['Text','Classificacao']]
#deixa os dois df com o mesmo numero de colunas
df = dataset.drop(columns=['text_en','id'])
df2 = df2.drop(columns=rm_df2)
#deixa as colunas de sentimentos igual a do outro df
df['sentiment'] = df['sentiment'].str.replace('neg', 'Negativo')
df['sentiment'] = df['sentiment'].str.replace('pos', 'Positivo')
df
# concatena os dois df
df.columns = ['Text', 'Polaridade']
df2.columns = ['Text', 'Polaridade']
df_concat = pd.concat([df,df2])
df_concat
df_concat['Polaridade'].value_counts()
# ## Construindo o modelo
tweets = df_concat["Text"].values
#tweets
classes = df_concat["Polaridade"].values
classes
# +
# Agora, vamos treinar o modelo usando a abordagem Bag of Words e o algoritmo Naive Bayes Multinomial
# - Bag of Words, na prática, cria um vetor com cada uma das palavras do texto completo da base,
# depois, calcula a frequência em que essas palavras ocorrem em uma data sentença, para então
# classificar/treinar o modelo
# - Exemplo HIPOTÉTICO de três sentenças vetorizadas "por palavra" e classificadas baseada na
# frequência de suas palavras:
# {0,3,2,0,0,1,0,0,0,1, Positivo}
# {0,0,1,0,0,1,0,1,0,0, Negativo}
# {0,1,1,0,0,1,0,0,0,0, Neutro}
# - Olhando para esses vetores, meu palpite é que as palavras nas posições 2 e 3 são as com maior
# peso na determinação de a que classe pertence cada uma das três sentenças avaliadas
# - A função fit_transform faz exatamente esse processo: ajusta o modelo, aprende o vocabulário,
# e transforma os dados de treinamento em feature vectors, a.k.a. vetor com frequêcia das palavras
vectorizer = CountVectorizer(analyzer = "word")
freq_tweets = vectorizer.fit_transform(tweets)
modelo = MultinomialNB()
modelo.fit(freq_tweets, classes)
# -
# ## Testando o Modelo
# +
# Vamos usar algumas frases de teste para fazer a classificação com o modelo treinado
testes = ["Bom dia, que Hoje e sexta"]
freq_testes = vectorizer.transform(testes)
modelo.predict(freq_testes)
# -
# Validação cruzada do modelo. Neste caso, o modelo é dividido em 10 partes, treinado em 9 e testado em 1
resultados = cross_val_predict(modelo, freq_tweets, classes, cv = 10)
# Quão acurada é a média do modelo?
metrics.accuracy_score(classes, resultados)
# ## Testando com outra base de dados
df = pd.read_csv('../Dados coletados/Twitter_streaming.csv', low_memory=False)
df.head()
def classifica_tweets(tweet):
tweet = [tweet]
freq_testes = vectorizer.transform(tweet)
return modelo.predict(freq_testes)
df_tweets = list(df['TWEET'])
for i in range(1000,1150):
predict = classifica_tweets(df_tweets[i])
print(f"{predict} - {df_tweets[i]}\n")
classifica_tweets('e que passou do tempo')[0]
df_tweets['polaridade'] = np.array([classifica_tweets(tweet)[0] for tweet in df['TWEET'].values.astype('U')])
df_tweets
# ## ingnore daqui pra baixo
def clean()
#df = df.drop(columns = ['DATA_TWEET'])
df.to_csv('./files test/tweets_classificados.csv')
#df['Classificacao'] = '-'
| notebooks/analise-de-sentimentos/Analise de sentimento - Twitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] uuid="f9db1dc0-60de-4114-ac7b-caba1b3a4e63"
# # 使用Jupyter-Notebook快速搭建文本分类应用
#
# 这是一篇介绍如何在PAI-DSW里用EasyTransfer平台训练文本分类器的教程。只需要一份配置文件,一份ipynb文件,您就可以完成对原始数据的特征提取,网络构建,损失函数及分类评估/预测的简单调用。运行本DEMO需要如下的配置信息
#
# - python 3.6+
# - tensorflow 1.12+
#
# ## (一)数据准备
# 下面以一个基于bert的文本分类为例,通过端到端的分布式训练/评估/预测流程,展示平台的易用性。这里的端到端指的是直接读入原始数据就可以训练,而不需要事先转换成Bert特征格式。
#
# -
# !wget -O easytransfer-1.0.0-py3-none-any.whl https://pai-public-data.oss-cn-beijing.aliyuncs.com/public_whl/easytransfer-1.0.0-py3-none-any.whl
# !pip install ./easytransfer-1.0.0-py3-none-any.whl --user
# + uuid="c39089e1-d85a-485a-8711-65586b1ddeac"
# !mkdir data
# !wget -O ./data/train.csv https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/tutorial/dsw/train.csv
# !wget -O ./data/dev.csv https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/tutorial/dsw/dev.csv
# + uuid="16ae9b6a-62ad-4816-bc08-14e3f4727e06"
import pandas as pd
# + uuid="70a8b996-2a2a-4539-8c65-809cffbc314e"
# !head ./data/train.csv
# + uuid="aa8f56c0-db1c-4d0e-803d-976e33786ef2"
df = pd.read_csv('./data/train.csv', header=None, delimiter='\t', encoding='utf8')
# + uuid="7699e1a8-1d2b-4a11-9e17-0700dae18636"
df.columns = ['label','content']
# + uuid="fe9c6f42-9fd8-4227-9c9e-54a5c6e7ba88"
df.head(2)
# + [markdown] cellType="code" uuid="afb86c76-9082-477c-b93a-c041304cbcf7"
# ## (二)定义配置文件
#
# 如下是我们easytransfe的配置,比如说predict_checkpoint_path是指定验证集上指标最好的checkpoint的路径。
# 详细配置介绍请看easytransfer文档: https://yuque.antfin-inc.com/pai/transfer-learning/zyib3t
# + uuid="2f687623-98d6-4394-b8ce-558070dc8a6d"
config_json = {
"worker_hosts": "locahost",
"task_index": 1,
"job_name": "chief",
"num_gpus": 1,
"num_workers": 1,
"modelZooBasePath": "/home/admin/jupyter/my_model_zoo",
"preprocess_config": {
"input_schema": "label:str:1,content:str:1",
"first_sequence": "content",
"second_sequence": None,
"sequence_length": 16,
"label_name": "label",
"label_enumerate_values": "tech,finance,entertainment,world,car,culture,sports,military,edu,game,travel,agriculture,house,story,stock",
"output_schema": "label,predictions"
},
"model_config": {
"pretrain_model_name_or_path": "pai-bert-tiny-zh",
"num_labels": 15
},
"train_config": {
"train_input_fp": "./data/train.csv",
"train_batch_size": 2,
"num_epochs": 1,
"model_dir": "model_dir",
"optimizer_config": {
"learning_rate": 1e-5
},
"distribution_config": {
"distribution_strategy": None
}
},
"evaluate_config": {
"eval_input_fp": "./data/dev.csv",
"eval_batch_size": 8
},
"predict_config": {
"predict_checkpoint_path": "model_dir/model.ckpt-834",
"predict_input_fp": "./data/dev.csv",
"predict_output_fp": "./data/predict.csv"
}
}
# + [markdown] uuid="785e9792-dd80-477b-92c0-0b54e3c94213"
# ## (三)定义分类应用
#
# ### 导入ez_transfer库文件
# - base_model: 所有应用都需要继承的父类
# - Config:用来解析配置文件的父类
# - layers:基础组件。比如Embedding,Attention等
# - model_zoo: 管理预训练模型的组件库,通过get_pretrained_model方法可调用bert模型
# - preprocessors:管理各种应用的预处理逻辑
# - CSVReader:csv格式的数据读取器
# - softmax_cross_entropy:用于分类任务的损失函数
# - classification_eval_metrics:用于分类任务的评估指标,比如Accuracy
# + uuid="10ccec35-4235-4f4a-96a9-85cced4a1eb5"
import tensorflow as tf
from easytransfer import base_model, Config
from easytransfer import layers
from easytransfer import model_zoo
from easytransfer import preprocessors
from easytransfer.datasets import CSVReader,CSVWriter
from easytransfer.losses import softmax_cross_entropy
from easytransfer.evaluators import classification_eval_metrics
# + [markdown] uuid="2db018a2-8824-42e8-b357-91b88e148b0f"
# ## 构图
# 完整的训练/评估/预测/链路,由四个函数构成
# - build_logits: 构图
# - build_loss:定义损失函数
# - build_eval_metrics:定义评估指标
# - build_predictions:定义预测输出
# + uuid="91c6db6b-425d-4047-9ebe-f0917c1a7aad"
class TextClassification(base_model):
def __init__(self, **kwargs):
super(TextClassification, self).__init__(**kwargs)
self.user_defined_config = kwargs["user_defined_config"]
def build_logits(self, features, mode=None):
# 负责对原始数据进行预处理,生成模型需要的特征,比如:input_ids, input_mask, segment_ids等
preprocessor = preprocessors.get_preprocessor(self.pretrain_model_name_or_path,
user_defined_config=self.user_defined_config)
# 负责构建网络的backbone
model = model_zoo.get_pretrained_model(self.pretrain_model_name_or_path)
dense = layers.Dense(self.num_labels, kernel_initializer=layers.get_initializer(0.02), name='dense')
input_ids, input_mask, segment_ids, label_ids = preprocessor(features)
_, pooled_output = model([input_ids, input_mask, segment_ids], mode=mode)
logits = dense(pooled_output)
return logits, label_ids
def build_loss(self, logits, labels):
return softmax_cross_entropy(labels, self.num_labels, logits)
def build_eval_metrics(self, logits, labels):
return classification_eval_metrics(logits, labels, self.num_labels)
def build_predictions(self, output):
logits, _ = output
predictions = dict()
predictions["predictions"] = tf.argmax(logits, axis=-1, output_type=tf.int32)
return predictions
# + [markdown] uuid="c31301bb-0dc0-4506-9c94-73d76942c782"
# # (四)启动训练
# + uuid="32cfce03-786a-434f-9166-7b3649e4f84c"
config = Config(mode="train_and_evaluate_on_the_fly", config_json=config_json)
# + uuid="ec5ad940-9410-4d95-8a17-788c826f18eb"
app = TextClassification(user_defined_config=config)
# + uuid="d2578057-a24c-4056-a928-832b0e2f3024"
train_reader = CSVReader(input_glob=app.train_input_fp,
is_training=True,
input_schema=app.input_schema,
batch_size=app.train_batch_size)
eval_reader = CSVReader(input_glob=app.eval_input_fp,
is_training=False,
input_schema=app.input_schema,
batch_size=app.eval_batch_size)
# + uuid="84bb64e6-5461-4685-b105-872c6ad01c27"
app.run_train_and_evaluate(train_reader=train_reader, eval_reader=eval_reader)
# + uuid="12ce0aaf-4864-42c8-a6ee-76d871d9dc2e"
| examples/easytransfer-quick_start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook Basics
# ---
# This lesson assumes that the user has Jupyter [installed](https://jupyter.readthedocs.io/en/latest/install.html) and that the notebook server can be started by running:
#
# jupyter notebook
#
# For more details on how to run the notebook server, see [Running the Notebook Server](Running the Notebook Server.ipynb).
# ## The Dashboard
# ---
# When the notebook server is first started, a browser will be opened to the notebook dashboard. The dashboard serves as a home page for the notebook. Its main purpose is to display the portion of the filesystem accessible by the user, and to provide an overview of the running kernels, terminals, and parallel clusters.
# ### Files Tab
#
# The files tab provides an interactive view of the portion of the filesystem which is accessible by the user. This is typically rooted by the directory in which the notebook server was started.
#
# The top of the files list displays clickable breadcrumbs of the current directory. It is possible to navigate the filesystem by clicking on these breadcrumbs or on the directories displayed in the notebook list.
#
# A new notebook can be created by clicking on the **`New`** dropdown button at the top of the list, and selecting the desired language kernel.
#
# Notebooks can also be uploaded to the current directory by dragging a notebook file onto the list or by clicking the **`Upload`** button at the top of the list.
#
# <img src="images/dashboard_notebooks_tab_5_0.png" />
# ### Running Tab
#
# The running tab displays the currently running notebooks which are known to the server. This view provides a convenient way to track notebooks that have been started during a long running notebook server session.
#
# Each running notebook will have an orange **`Shutdown`** button which can be used to shutdown its associated kernel. Closing the notebook's page is not sufficient to shutdown a kernel.
#
# Running terminals are also listed, provided that the notebook server is running on an operating system which supports PTY.
#
# <img src="images/dashboard_running_tab_4_0.png" />
# ### Clusters Tab
#
# The clusters tab provides a summary view of [IPython Parallel](https://ipyparallel.readthedocs.io/en/latest/) clusters. The IPython Parallel extension must be [installed](https://github.com/ipython/ipyparallel) in order to use this feature.
#
# <img src="images/dashboard_clusters_tab_4_0.png" />
# ## The Notebook
# ---
# When a notebook is opened, a new browser tab will be created which presents the notebook user interface (UI). This UI allows for interactively editing and running the notebook document.
#
# A new notebook can be created from the dashboard by clicking on the **`Files`** tab, followed by the **`New`** dropdown button, and then selecting the language of choice for the notebook.
#
# An interactive tour of the notebook UI can be started by selecting **`Help -> User Interface Tour`** from the notebook menu bar.
# ### Header
#
# At the top of the notebook document is a header which contains the notebook title, a menubar, and toolbar. This header remains fixed at the top of the screen, even as the body of the notebook is scrolled. The title can be edited in-place (which renames the notebook file), and the menubar and toolbar contain a variety of actions which control notebook navigation and document structure.
#
# <img src="images/notebook_header_4_0.png" />
# ### Body
#
# The body of a notebook is composed of cells. Each cell contains either markdown, code input, code output, or raw text. Cells can be included in any order and edited at-will, allowing for a large ammount of flexibility for constructing a narrative.
#
# - **Markdown cells** - These are used to build a nicely formatted narrative around the code in the document. The majority of this lesson is composed of markdown cells.
#
# - **Code cells** - These are used to define the computational code in the document. They come in two forms: the *input cell* where the user types the code to be executed, and the *output cell* which is the representation of the executed code. Depending on the code, this representation may be a simple scalar value, or something more complex like a plot or an interactive widget.
#
# - **Raw cells** - These are used when text needs to be included in raw form, without execution or transformation.
#
# <img src="images/notebook_body_4_0.png" />
# #### Modality
#
# The notebook user interface is *modal*. This means that the keyboard behaves differently depending upon the current mode of the notebook. A notebook has two modes: **edit** and **command**.
#
# **Edit mode** is indicated by a green cell border and a prompt showing in the editor area. When a cell is in edit mode, you can type into the cell, like a normal text editor.
#
# <img src="images/edit_mode.png">
#
# **Command mode** is indicated by a grey cell border. When in command mode, the structure of the notebook can be modified as a whole, but the text in individual cells cannot be changed. Most importantly, the keyboard is mapped to a set of shortcuts for efficiently performing notebook and cell actions. For example, pressing **`c`** when in command mode, will copy the current cell; no modifier is needed.
#
# <img src="images/command_mode.png">
#
# <br>
# <div class="alert alert-success">
# Enter edit mode by pressing `Enter` or using the mouse to click on a cell's editor area.
# </div>
# <div class="alert alert-success">
# Enter command mode by pressing `Esc` or using the mouse to click *outside* a cell's editor area.
# </div>
# <div class="alert alert-warning">
# Do not attempt to type into a cell when in command mode; unexpected things will happen!
# </div>
# #### Mouse navigation
#
# The first concept to understand in mouse-based navigation is that **cells can be selected by clicking on them.** The currently selected cell is indicated with a grey or green border depending on whether the notebook is in edit or command mode. Clicking inside a cell's editor area will enter edit mode. Clicking on the prompt or the output area of a cell will enter command mode.
#
# The second concept to understand in mouse-based navigation is that **cell actions usually apply to the currently selected cell**. For example, to run the code in a cell, select it and then click the <button class='btn btn-default btn-xs'><i class="fa fa-play icon-play"></i></button> button in the toolbar or the **`Cell -> Run`** menu item. Similarly, to copy a cell, select it and then click the <button class='btn btn-default btn-xs'><i class="fa fa-copy icon-copy"></i></button> button in the toolbar or the **`Edit -> Copy`** menu item. With this simple pattern, it should be possible to perform nearly every action with the mouse.
#
# Markdown cells have one other state which can be modified with the mouse. These cells can either be rendered or unrendered. When they are rendered, a nice formatted representation of the cell's contents will be presented. When they are unrendered, the raw text source of the cell will be presented. To render the selected cell with the mouse, click the <button class='btn btn-default btn-xs'><i class="fa fa-play icon-play"></i></button> button in the toolbar or the **`Cell -> Run`** menu item. To unrender the selected cell, double click on the cell.
# #### Keyboard Navigation
#
# The modal user interface of the IPython Notebook has been optimized for efficient keyboard usage. This is made possible by having two different sets of keyboard shortcuts: one set that is active in edit mode and another in command mode.
#
# The most important keyboard shortcuts are **`Enter`**, which enters edit mode, and **`Esc`**, which enters command mode.
#
# In edit mode, most of the keyboard is dedicated to typing into the cell's editor. Thus, in edit mode there are relatively few shortcuts. In command mode, the entire keyboard is available for shortcuts, so there are many more possibilities.
#
# The following images give an overview of the available keyboard shortcuts. These can viewed in the notebook at any time via the **`Help -> Keyboard Shortcuts`** menu item.
#
# <img src="images/notebook_shortcuts_4_0.png">
#
# The following shortcuts have been found to be the most useful in day-to-day tasks:
#
# - Basic navigation: **`enter`**, **`shift-enter`**, **`up/k`**, **`down/j`**
# - Saving the notebook: **`s`**
# - Cell types: **`y`**, **`m`**, **`1-6`**, **`r`**
# - Cell creation: **`a`**, **`b`**
# - Cell editing: **`x`**, **`c`**, **`v`**, **`d`**, **`z`**, **`ctrl+shift+-`**
# - Kernel operations: **`i`**, **`.`**
# ## The Text Editor
# ---
# The notebook application has the ability to edit more than just notebook files and code cells. Any plain text file can be edited using the built-in text editor.
#
# The text editor will be opened in a new browser tab whenever a non-notebook text file is accessed from the dashboard. A new text file can also be created from the dashboard by clicking on the **`Files`** tab, followed by the **`New`** dropdown button, and then selecting **`Text File`**.
#
# The text editor has a header which is similar to that of the notebook's, and includes the document title and a menubar. The syntax highlighting for the text file is determined automatically by the file extension. It can also be set manually via the **`Language`** option in the menubar.
#
# <img src="images/text_editor_4_0.png">
# ## The Terminal
# ---
# If the notebook server is run on an operating system which supports [PTY](https://en.wikipedia.org/wiki/Pseudoterminal) (Linux/Mac), then the notebook application will be able to spawn interactive terminal instances. If the operating system does not support PTY (Windows), the terminal feature will not be enabled.
#
# A new terminal can be spawned from the dashboard by clicking on the **`Files`** tab, followed by the **`New`** dropdown button, and then selecting **`Text File`**.
#
# The terminal supports all applications which would otherwise run in a PTY, this includes classical terminal applications like Vim, Nano, and Bash.
#
# <img src="images/terminal_4_0.png">
| examples/Notebook/Notebook Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import sys
import matplotlib.pyplot as plt
# %matplotlib qt5
#
# extend path by location of the dvr package
#
jolanta_root="/home/thomas/Current_Work/Jolanta-by-dvr/"
sys.path.append(jolanta_root+'Python_libs')
import dvr
import jolanta
amu_to_au=1822.888486192
au2cm=219474.63068
au2eV=27.211386027
Angs2Bohr=1.8897259886
# +
#
# compute DVR of T and V
# then show the density of states
# in a potential + energy-levels plot
# the standard 3D-Jolanta is used (resonance at 1.75 -0.2i eV)
#
rmin=0
rmax=25 # grid from 0 to rmax
thresh = 8 # maximum energy for plot
ppB = 10 # grid points per Bohr
nGrid=int((rmax-rmin)*ppB)
rs = dvr.DVRGrid(rmin, rmax, nGrid)
Vs = jolanta.Jolanta_3D(rs)
Ts = dvr.KineticEnergy(1, rmin, rmax, nGrid)
[energy, wf] = dvr.DVRDiag2(nGrid, Ts, Vs)
n_ene=0
for i in range(nGrid):
print("%3d %12.8f au = %12.5f eV" % (i+1, energy[i], energy[i]*au2eV))
n_ene += 1
if energy[i]*au2eV > thresh:
break
# "DVR normalization", sum(wf[:,0]**2)
# this is correct for plotting
c=["orange", "blue"]
#h=float(xmax) / (nGrid+1.0)
scale=3*au2eV
plt.cla()
plt.plot(rs,Vs*au2eV, '-', color="black")
for i in range(n_ene):
plt.plot(rs, scale*wf[:,i]**2+energy[i]*au2eV, '-', color=c[i%len(c)])
plt.ylim(-5, 1.5*thresh)
plt.xlabel('$r$ [Bohr]')
plt.ylabel('$E$ [eV]')
plt.show()
# +
#
# RAC data by change of the parameter b in the potential
#
plt.cla()
plt.plot(rs, jolanta.Jolanta_3D(rs, b=1.2)*au2eV, '-', color="black")
for b in [1.3, 1.4, 1.5]:
plt.plot(rs, jolanta.Jolanta_3D(rs, b=b)*au2eV, color="blue")
plt.xlim(0,15)
plt.ylim(-20, 10)
plt.show()
# +
#
# RAC soft-box loop; scaling parameter lambda
#
ld_min=0
ld_max=1.0
nEs_keep=4 # how many energies are kept
n_ld=101 # ~ (max - min)/stepsize + 1 with stepsize = 0.1 eV
lds=np.linspace(ld_min, ld_max, num=n_ld, endpoint=True)
run_data = np.zeros((n_ld, nEs_keep)) # array used to collect all eta-run data
for l in range(n_ld):
Vs = jolanta.Jolanta_3D(rs, b=1.2+lds[l])
[energy, wf] = dvr.DVRDiag2(nGrid, Ts, Vs)
run_data[l,:] = energy[0:nEs_keep]
print(l+1, end=" ")
if (l+1)%10==0:
print()
run_data *= au2eV
# -
#
# 3D: all states contribute
#
plt.cla()
for i in range(0, nEs_keep):
plt.plot(lds,run_data[:,i], '-', color='blue')
plt.show()
# +
#
#
dlim=" "
#dmim="," # use for creating .csv files
#
data = np.zeros((n_ld, nEs_keep+1))
data[:,0]=lds
data[:,1:]=run_data
fname="rac_b-stab.dat"
header = 'l'
for i in range(0, nEs_keep):
header = header + dlim + 'E' + str(i+1)
np.savetxt(fname, data, fmt='%15.12f', delimiter=dlim, header=header)
# -
| notebooks/.ipynb_checkpoints/J3D_make_RAC_data-b-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Manipulation
#
# Author: <NAME>
# +
import pandas as pd
import feature_engine as fe
import numpy as np
import random
import time
from itertools import compress
# dsutils can be installed from github using:
# python -m pip install git+https://github.com/Strabes/dsutils.git
import dsutils as ds
from dsutils.utils import histograms as hg
from dsutils.transformers import *
from dsutils.transformers._experimental import OneHotComboSparse
from dsutils.transformers._variable_selector import MakeTransformer, ColumnSelector
from numpy import matlib
import matplotlib.pyplot as plt
from feature_engine.imputation import (
CategoricalImputer,
AddMissingIndicator,
MeanMedianImputer)
from feature_engine.selection import DropConstantFeatures, DropFeatures
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from xgboost.sklearn import XGBClassifier
from sklearn.model_selection import (
GridSearchCV,
PredefinedSplit,
train_test_split)
from sklearn.feature_selection import RFECV
from scipy import sparse
import shap
import copy
from column_types import ColumnTypes
from utils import datetime_preprocess, str_cleaner_df
pd.set_option('display.max_columns', None)
# %matplotlib inline
# -
# ## Read data into pandas
df = pd.read_csv("../../../data/lending_club_loan_two.csv")
# ### Always set aside test data as soon as possible
p_test = 0.2
df_train, df_test = train_test_split(
df,
test_size = p_test,
random_state = 1)
df_train.head(5)
# ## Verify data types
# ### Fix all date/time columns
#
# It's usually best to convert dates and timestamps to datetime objects immediately
## Fix date columns:
df_train = datetime_preprocess(df_train)
df_test = datetime_preprocess(df_test)
# ### Make sure all columns are of the correct data type
# import utils
# from importlib import reload
# reload(utils)
ct = ColumnTypes(df_train)
ct.report()
# ## Apply NULL/null/NA/NaN consistently
#
# Different datesources may result in different formats for null/missing values. It's typically a good idea to apply a consistent format. I'll do this by replacing 'NULL', 'null' and '' in character columns with `np.nan`.
# +
target = 'loan_status'
object_cols_x_target = [c for c in ct.object_cols if c != target]
p = Pipeline([
("str_cleaner",TransformWrapper(str_cleaner_df,variables = ct.object_cols)),
("num_nan_ind",AddMissingIndicator(
variables = ct.numeric_cols, missing_only = True)),
("fill_cat_nas",CategoricalImputer(
variables = object_cols_x_target, fill_value = 'MISSING')),
("pcb",PercentThresholdBinner(x=object_cols_x_target,percent_threshold = 0.01)),
("max_level_bin",MaxLevelBinner(x=object_cols_x_target,max_levels=15)),
("rmmean",MeanMedianImputer(variables=ct.numeric_cols)),
("drop_date",DropFeatures(features_to_drop=ct.datetime_cols)),
("drop_quasi_constant",DropConstantFeatures(
tol=0.97,variables = ct.numeric_cols + object_cols_x_target)),
("one_hot_sparse", MakeTransformer(
OneHotComboSparse,
cols_to_enc = ColumnSelector(dtype_include = 'object', excl_pattern = target),
num_cols = ColumnSelector(dtype_include = 'numeric')))])
# -
train_X = p.fit_transform(df_train)
train_X
sum([len(c) for c in p.steps[-1][1].transformer.enc.categories_]) + \
len(p.steps[-1][1].transformer.num_cols)
le = LabelEncoder()
train_y = le.fit_transform(df_train[target])
le.classes_
train_X.shape
train_idx = np.array(
random.sample(range(train_X.shape[0]),
int(0.8*train_X.shape[0])))
test_fold = np.zeros((train_X.shape[0],))
test_fold[train_idx] = -1
# +
ps_start_time = time.time()
param_test = {
'max_depth':[2,4,6],
'n_estimators' : [100,200,300],
'learning_rate' : [0.05,0.1],
'subsample' : [0.5,0.75],
'colsample_bytree' : [0.5,0.75]
}
ps = PredefinedSplit(test_fold)
gsearch2 = GridSearchCV(
estimator = XGBClassifier(
min_child_weight=1,
gamma=0,
#subsample=0.8,
#colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=1,
gpu_id=0,
tree_method = 'gpu_hist'),
param_grid = param_test,
scoring='roc_auc',
n_jobs=4,
cv=ps,
#cv=5,
verbose=1
)
gsearch2.fit(train_X,train_y)
ps_end_time = time.time()
# -
pd.DataFrame(gsearch2.cv_results_).sort_values('rank_test_score').head(5)
"Run time: {sec:.2f}".format(sec=(ps_end_time - ps_start_time)/60)
# ## Feature Selection
# +
rfe_params = copy.deepcopy(gsearch2.best_params_)
add_params = {
"min_child_weight":1,
"gamma":0,
"objective":'binary:logistic',
"nthread":4,
"scale_pos_weight":1,
"seed":1}
rfe_params = {**rfe_params,**add_params}
# +
xgbc = XGBClassifier(**rfe_params,
gpu_id=0,
tree_method = 'gpu_hist')
selector = RFECV(
estimator = xgbc,
step = 5,
cv = ps,
scoring = 'roc_auc',
min_features_to_select = 1,
verbose = 1)
selector.fit(train_X,train_y)
# +
print("Optimal number of features : %d" % selector.n_features_)
n_features = list(range(selector.n_features_in_,1,-5)) + [1]
n_features = list(set(n_features))
n_features.sort(reverse=False)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(n_features,
selector.grid_scores_)
plt.show()
# -
final_preds = list(compress(all_preds,selector.get_support().tolist()))
xgbc_gpu = XGBClassifier(**rfe_params,gpu_id=0,tree_method = 'gpu_hist')
xgbc_gpu.fit(train_X.toarray()[:,selector.get_support()],train_y);
xgbc_gpu.predict_proba(train_X.todense()[0,:])
feature_importances = pd.DataFrame({
'Feature' : final_preds,
'Importance' : xgbc_gpu.feature_importances_})
feature_importances.sort_values('Importance',ascending=False)
import lime
import lime.lime_tabular
explainer = lime.lime_tabular.LimeTabularExplainer(
train_X.toarray()[:,selector.get_support()],
mode='classification',
training_labels=train_y,
feature_names=final_preds)
exp = []
for i in range(5):
exp.append(
explainer.explain_instance(
train_X.toarray()[i,:],
xgbc_gpu.predict_proba,
num_features=5))
for i in exp:
i.as_pyplot_figure();
start_tm = time.time()
import shap
shap_values = shap.TreeExplainer(xgbc_gpu).shap_values(train_X.toarray())
end_tm = time.time()
print("Shapley run time: {} seconds".format(str(round(end_tm-start_tm))))
shap.summary_plot(
shap_values,
train_X.toarray(),
feature_names=final_preds,
plot_type="bar")
shap.initjs()
shap.summary_plot(
shap_values,
train_X.toarray(),
feature_names=final_preds)
| xgboost/XGBoost_with_LendingClub.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from keras.datasets import mnist
from keras.layers import *
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
from sklearn import preprocessing
# ## Data Preprocessing
# + [markdown] heading_collapsed=true
# ### Load Data
# + hidden=true
df = pd.read_pickle("database.pkl")
data = df["Close"].values
signal = np.load("trading_signal.npy")
# + hidden=true
signal[:30]
# + [markdown] heading_collapsed=true
# ### One-hot Encoding
# + hidden=true
# One-hot encode
y_train = to_categorical(signal, 13)
# + [markdown] heading_collapsed=true
# ### Scaling
# + hidden=true
# Standardize
standardize = preprocessing.scale(data)
# Scaling
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
x_scaled = scaler.fit_transform(standardize.reshape(standardize.shape[0], 1))
# -
# ### Timestep Splitting
time_step = 30
d = np.zeros((x_scaled.shape[0] - time_step + 1, 30, 1))
y_step = np.zeros((y_train.shape[0] - time_step + 1, time_step, 13))
for i in range(d.shape[0]):
d[i] = x_scaled[i:i + time_step]
if i < y_step.shape[0]:
y_step[i] = y_train[i:i + time_step]
# ### Train/Test Splitting
# Split train/test
x_train = d[:signal.shape[0]]
x_test = d[signal.shape[0]:]
# ### Return
# Calculate Return Values
return_x = df["Close"].values - df["Open"].values
# Standardize
# return_x = preprocessing.scale(return_x)
return_x = return_x.astype("float32") - np.mean(return_x)
# Scaling
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
return_x = scaler.fit_transform(standardize.reshape(return_x.shape[0], 1))
# Timestep Splitting
tmp = np.zeros((return_x.shape[0] - time_step + 1, 30, 1))
for i in range(d.shape[0]):
tmp[i] = return_x[i:i + time_step]
# Train/Test Splitting
return_x_train = tmp[:signal.shape[0]]
return_x_test = tmp[signal.shape[0]:]
# ## DNN
# ### Build Model
# +
model0 = Sequential()
# model0.add(Flatten(input_shape=(30,1), name="input"))
model0.add(Dense(128, activation="tanh", input_shape=(1,), name="fc1"))
model0.add(Dense(64, activation="tanh", name="fc2"))
model0.add(Dense(32, activation="tanh", name="fc3"))
model0.add(Dense(16, activation="tanh", name="fc4"))
model0.add(Dense(13, activation="softmax", name="output"))
model0.summary()
# -
# ### Train Model
model0.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy", "mse"])
history0 = model0.fit(data[:270000], y_train, batch_size=128, epochs=500, validation_split=0.1, callbacks=[EarlyStopping(patience=10)])
# ## LSTM
# Hyperparameters
latent_dim = 16
batch_size = 300
# ### Build Model
# +
model1 = Sequential()
model1.add(LSTM(latent_dim, return_sequences=True, batch_input_shape=(batch_size, time_step, 1), name="lstm1"))
model1.add(Flatten(name="flatten1"))
model1.add(Dense(64, activation="tanh", name="fc1"))
model1.add(Dense(32, activation="tanh", name="fc2"))
model1.add(Dense(16, activation="tanh", name="fc3"))
model1.add(Dense(13, activation="softmax", name="output"))
model1.summary()
# -
# ### Train Model
model1.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history1 = model1.fit(return_x_train, y_train, batch_size=300, epochs=300, validation_split=0.1, shuffle=False, callbacks=[EarlyStopping(patience=10)])
# + code_folding=[]
h = {}
h["loss"] = []
h["acc"] = []
h["val_loss"] = []
h["val_acc"] = []
for e in range(100):
for b in range(return_x_train.shape[0]-1000 // batch_size):
# Random select batch data
idx = np.random.randint(0, return_x_train.shape[0]-1000, batch_size)
loss, acc = model1.train_on_batch(return_x_train[idx], y_train[idx])
loss, acc = model1.evaluate(return_x_train, y_train, batch_size=batch_size)
val_loss, val_acc = model1.evaluate(return_x_train[return_x_train.shape[0]-1000:], y_train[return_x_train.shape[0]-1000:], batch_size=batch_size)
print("Epoch %d/100\tloss: %.4f - acc: %.4f - val_loss: %.4f - val_acc: %.4f" % (e+1, loss, acc, val_loss, val_acc))
h["loss"].append(loss)
h["acc"].append(acc)
h["val_loss"].append(val_loss)
h["val_acc"].append(val_acc)
# -
# ### Predict
preds1 = model1.predict(x_test[:3000], batch_size=batch_size)
preds1 = np.argmax(preds1, axis=1)
# ## 偉嘉Ver.
# ### Data Preprocessing
# +
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 14:31:02 2018
@author: chia
"""
from keras.utils import np_utils
from keras import backend as K
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from itertools import product
import warnings
warnings.filterwarnings("ignore")
#train_data, test_data = data[:270000], data[270000:]
target = np.array(pd.get_dummies(signal.flatten()))
y_target = signal+6
y_target = y_target[30:]
Kseconds = 30
epochs = 300
batch = 2048
def normalization_for_class(x):
sc = MinMaxScaler()
ss = StandardScaler()
# x = sc.fit_transform(x.reshape(x.shape[0],1))
x = ss.fit_transform(x.reshape(x.shape[0], 1))
return x, ss
def weighted_categorical_crossentropy(weights):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
loss = weighted_categorical_crossentropy(weights)
model.compile(loss=loss,optimizer='adam')
"""
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss
def cal_return_old(data): # 用來算Return
after_price = np.array(data)
current_price = np.insert(after_price, 0, 0) # 前補0
after_price = np.append(after_price, 0) # 後補0
# 後一秒(after_price) - 前一秒(current_price)
secReturn = (after_price - current_price)
secReturn = np.delete(secReturn, [0, len(secReturn)-1]) # 去除最頭最尾
return secReturn
return_ = cal_return_old(data)
#data, ss = normalization_for_class(data)
#data = data.flatten()
return_, ss = normalization_for_class(return_)
return_ = return_.flatten()
"""#price"""
x_train_price = np.zeros((data.shape[0] - Kseconds + 1, Kseconds))
for i in range(x_train_price.shape[0]):
x_train_price[i] = data[i: i + Kseconds]
x_train_price, x_test_price = x_train_price[:270000], x_train_price[270000:]
a = np.mean(x_train_price, axis=1)
a = np.repeat(a, Kseconds, axis=0)
a = a.reshape(int(a.shape[0]/Kseconds), Kseconds)
data_sampling = x_train_price - a
b = np.mean(x_test_price, axis=1)
b = np.repeat(b, Kseconds, axis=0)
b = b.reshape(int(b.shape[0]/Kseconds), Kseconds)
data_sampling_test = x_test_price - b
"""#return"""
x_train = np.zeros((return_.shape[0] - Kseconds + 1, Kseconds))
for i in range(x_train.shape[0]):
x_train[i] = return_[i: i + Kseconds]
x_train, x_test = x_train[:270000-Kseconds], x_train[270000-Kseconds:]
y_train = target
nb_class = 13
unique, counts = np.unique(signal, return_counts=True)
sum_dict = dict(zip(unique+6, counts))
c = np.zeros(13)
for i in range(13):
c[i] = 270000 / (sum_dict[i] * nb_class)
weights = dict(zip(unique+6, c))
#from sklearn.utils import class_weight
#sample_weights = class_weight.compute_sample_weight('balanced', signal[29:])
# -
# ### Build Model
# +
def cnn(window_size, nb_input_series, output_dim=1):
model = Sequential()
model.add(Conv1D(filters=36, kernel_size=6,
input_shape=(window_size, nb_input_series), name="conv1d"))
model.add(PReLU(name="prelu_1"))
model.add(Flatten(name="faltten"))
model.add(Dense(64, kernel_regularizer="l2", name="fc1"))
model.add(PReLU(name="prelu_2"))
model.add(Dense(32, kernel_regularizer="l2", name="fc2"))
model.add(PReLU(name="prelu_3"))
model.add(Dense(16, kernel_regularizer="l2", name="fc3"))
model.add(PReLU(name="prelu_4"))
model.add(Dense(output_dim, activation="softmax", name="output"))
model.summary()
return model
def cnn_train(model, x, y, epochs=100):
x = np.reshape(x, (x.shape[0], x.shape[1], 1))
train_fit = model.fit(x=x, y=y, validation_split=0.1,
epochs=epochs, batch_size=batch, callbacks=[EarlyStopping(patience=20)])
return model, train_fit
#自己的解 (正確的,能overfitting,目前用price減去每條time-series平均處理)
#model, history = cnn_train(cnn(data_sampling[29:].shape[1], 1, output_dim = 13, loss = "categorical_crossentropy"), data_sampling[29:], y_train[29:], epochs = 100)
#print("Predicting cnn...")
#predict_test = model.predict(data_sampling_test.reshape(data_sampling_test.shape[0],data_sampling_test.shape[1],1))
#predict_train = model.predict(data_sampling.reshape(data_sampling.shape[0],x_train.shape[1],1))
#
#loss_test = model.evaluate(data_sampling_test.reshape(data_sampling_test.shape[0],data_sampling_test.shape[1],1), predict_test)
#
#test = np.argmax(predict_test, axis = 1) - 6
#train = np.argmax(predict_train, axis = 1) - 6
# 頭哥解
model = cnn(x_train.shape[1], 1, output_dim=13)
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy", "mae"])
# -
# ### Train
# +
iidx = np.arange(270000-Kseconds).reshape(270000-Kseconds, 1)
w = np.array([100]*13)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
for ii in range(1):
mat_idx = np.array([])
for t in range(10):
for c in range(13):
leng = y_target[y_target == c].shape[0]
temp = iidx[y_target == c]
idx = np.random.randint(0, high=leng, size=w[c])
mat_idx = np.append(mat_idx, temp[idx]).astype('int32')
history = model.fit(x_train[mat_idx], y_target[mat_idx, :],
epochs=500, batch_size=256, validation_split=0.1, callbacks=[EarlyStopping(patience=20)])
# -
# ### Predict
predict_test = model.predict(x_test.reshape(
x_test.shape[0], x_test.shape[1], 1))
predict_train = model.predict(x_train.reshape(
x_train.shape[0], x_train.shape[1], 1))
# Predict probability to categorical
pred_test = np.argmax(predict_test, axis=1) - 6
pred_train = np.argmax(predict_train, axis=1) - 6
np.save("pred7231", pred_test)
| 0709_group_contest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/TGalliz/FinancePortfolio/blob/main/MLforInvest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="mPK6VFbFuGeM"
#Description: This Program uses an artificial recurrent Neural Netoword called LSTM. to predict
# the closing stock price of a corporation (DSG.TO) using the past 60 days stock price.
#Import Libraries
import math
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# + colab={"base_uri": "https://localhost:8080/", "height": 450} id="KAF_e85MvBDX" outputId="e874ab64-882c-4ad1-87e8-4128c34a31bd"
#Get the stock quote
df = web.DataReader('DSG.TO', data_source='yahoo',start='2012-01-01', end='2020-12-31')
#Show the data
df
# + colab={"base_uri": "https://localhost:8080/"} id="3ZcfH-bVvbP5" outputId="d924da3c-63ab-44cc-e4b9-bbe914c78166"
#Get the number of rows in the date set
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 442} id="e8kkUohovhfp" outputId="c0e1f886-6b1c-41c3-f2cf-a4a989b777fc"
#Visualize the closing price
plt.figure(figsize=(16,8))
plt.title('Close Price History')
plt.plot(df['Close'])
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price CAD $',fontsize =18)
# + colab={"base_uri": "https://localhost:8080/"} id="RhllQyRQvvWM" outputId="a1f0a8a4-062c-47d4-dedb-1f8692fc5eb9"
#Create new dataframe with only 'Close column'
data = df.filter(['Close'])
#Convert dataframe to a numpy array
dataset = data.values
#Get the number of rows to train the model on
training_data_len = math.ceil(len(dataset)*0.8)
training_data_len
# + colab={"base_uri": "https://localhost:8080/"} id="iIiYb2wcwxPZ" outputId="ae6849ad-7718-475b-91f9-db868a6b111b"
#Scale the data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
# + colab={"base_uri": "https://localhost:8080/"} id="e9eojLyjxG22" outputId="4d807aad-c7b8-4d32-8a6e-023babede525"
#Create the training dataset
#Create the scaled training data set
train_data = scaled_data[0:training_data_len,:]
#Split the data into x_train and y_train datasets
#Windowing data on 60 days
#takes the past 60 days to previous the 60th day
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i,0])
y_train.append(train_data[i,0])
if i<=61:
print(x_train)
print(y_train)
print()
# + id="r-Q-N8OUw_Ix"
#Convert the x_train and y_train to numpy arrays
x_train, y_train = np.array(x_train),np.array(y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="rmrsJuebyqrh" outputId="5679cdc8-21c3-4259-84fe-19488f8e0f53"
#Reshape the data to give the data a 3D appearance
#a LSTM expects the input number of samples, number of timesteps number of features
x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
x_train.shape
# + id="49bB4oxJzLPT"
#Build the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape = (x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences= False))
model.add(Dense(25))
model.add(Dense(1))
# + id="J_dKJG17zoHQ"
#Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# + colab={"base_uri": "https://localhost:8080/"} id="Kabo32cwz8Bb" outputId="cc96e2b4-f3dc-42ca-b4f1-800cae2103cc"
#Train the model
model.fit(x_train,y_train, batch_size=1, epochs=1)
# + id="LG6wF6PZ0GmQ"
#Create the testing dataset
#Create a new array containing scaled values from index 1808 to 2259
test_data = scaled_data[training_data_len-60: ,:]
#Create teh data set x_test and y_test
x_test = []
y_test = dataset[training_data_len: ,:]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
# + id="8EqvvyyY0hFR"
#convert the data to a numpy array
x_test = np.array(x_test)
# + id="G0Axj-R41FVQ"
#Reshape data to 3D
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# + id="nkH5FuA01Xdg"
#Get the models predicted price values
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
# + colab={"base_uri": "https://localhost:8080/"} id="J6A2CyD012Go" outputId="6c8fa1ea-f9b9-412c-9fa6-50b5482e197e"
#Get the root mean squared error (RMSE)
rmse=np.sqrt(np.mean(((predictions- y_test)**2)))
rmse
# + colab={"base_uri": "https://localhost:8080/", "height": 547} id="25hePjIG2RBg" outputId="6251bea7-4659-43b1-85bb-aa93a0adb837"
#Plot the data
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions
#Visualize the data
plt.figure(figsize=(16,8))
plt.title('Model')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price CAD $', fontsize=18)
plt.plot(train['Close'])
plt.plot(valid[['Close','Predictions']])
plt.legend(['Train','Val','Predictions'],loc='lower right')
plt.show()
# + id="2a4KjS4pbC6Q"
#Get the quote
| MLforInvest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import json
import keras
from keras import layers
import random
import sys
import seaborn as sns
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
reviews = pd.read_json('./data/Office_Products_5.json',lines=True)
reviews.columns
reviews.head()
reviews['reviewTimeAsDate'] = pd.to_datetime(reviews['reviewTime'])
#reviews['reviewTime'] = reviews['reviewTime'].astype('datetime64[ns]', format='%y')
reviews.head()
reviews.groupby('overall').count()
reviews.groupby('verified').count()
reviews.groupby(['overall','verified']).count()
reviews.groupby('asin').count().reset_index().sort_values('overall',ascending=False)
reviews.groupby('reviewerID').count().reset_index().sort_values('overall',ascending=False)
reviews['reviewText'].str.len().mean()
reviews['reviewText'].str.len().max()
reviews['reviewText'].str.len().min()
reviews['reviewText'].str.len()
review_lengths = reviews[['overall','reviewText']]
review_lengths['review_length'] = review_lengths['reviewText'].str.len()
review_lengths
review_len_by_rating = review_lengths.groupby('overall')['review_length'].mean().reset_index()
review_len_by_rating
fix, ax = plt.subplots(figsize=(13,8))
sns.set_theme(style="whitegrid")
sns.despine(bottom=True, left=True)
ax = sns.barplot(x="overall", y="review_length", data=review_len_by_rating)
ax.set_title('Average Reivew Length by Rating')
ax.set_ylabel('Review Length')
ax.set_xlabel('Rating')
count_by_overall_verified = reviews.groupby(['overall','verified']).count()[['reviewTime']].reset_index().rename(columns={'reviewTime':'count'})
count_by_overall_verified
fix, ax = plt.subplots(figsize=(15,9))
#sns.set_theme(style="whitegrid")
sns.despine(bottom=True, left=True)
ax = sns.barplot(x="overall", y="count", hue="verified", data=count_by_overall_verified)
ax.set_title('Reviews by Rating')
ax.set_xlabel('Rating')
ax.set_ylabel('Number of Reviews')
num_reviews_by_products = reviews.groupby('asin').count().reset_index()[['asin','overall']].rename(columns={'overall':'count'})
num_reviews_by_products['review_level'] = pd.cut(num_reviews_by_products['count'],
bins=[0,10,100,500,2500],
labels=['rare','sometimes','often','frequent'])
num_reviews_by_products
num_reviews_by_products.groupby('review_level')[['count']].count().reset_index()
# +
reviews_by_year = reviews.groupby(reviews['reviewTimeAsDate'].dt.year)[['overall']].count().reset_index().rename(columns={'overall':'count',
'reviewTimeAsDate':'year'})
reviews_by_year['year'] = pd.to_datetime(reviews_by_year['year'], format='%Y')
# -
reviews_by_year
fix, ax = plt.subplots(figsize=(15,9))
sns.set_theme(style="whitegrid")
sns.despine(bottom=True, left=True)
ax = sns.lineplot(data=reviews_by_year, x="year", y="count")
ax.set_title('Reviews By Year')
ax.set_xlabel('Year')
ax.set_ylabel('Number of Reviews')
max(reviews['reviewTimeAsDate'])
# df.groupby([df['birthdate'].dt.year.rename('year'), df['birthdate'].dt.month.rename('month')]).agg({'count'})
# df['birthdate'].groupby(df.birthdate.dt.to_period("M")).agg('count')
reviews_by_mon_year = reviews.groupby([reviews['reviewTimeAsDate'].dt.year.rename('year'),
reviews['reviewTimeAsDate'].dt.month.rename('month')]).count().reset_index()[['year','month','overall']]
reviews_by_mon_year['date'] = pd.to_datetime(reviews_by_mon_year[['year', 'month']].assign(DAY=1))
reviews_by_mon_year
reviews_by_mon_year[(reviews_by_mon_year['year']>2014) & (reviews_by_mon_year['date'] < '2018-09-01')]
fix, ax = plt.subplots(figsize=(11,8))
sns.set_theme(style="whitegrid")
sns.despine(bottom=True, left=True)
ax = sns.lineplot(data=reviews_by_mon_year[(reviews_by_mon_year['year']>2013) & (reviews_by_mon_year['date'] < '2018-09-01')],
x="date", y="overall")
reviews_by_year.dtypes
reviews_by_year[(reviews_by_year['year']>'2013') & (reviews_by_year['year'] < '2018')]
reviews_by_mon_year[(reviews_by_mon_year['year']==2018)]
| generate-product-reviews/.ipynb_checkpoints/product-review-eda-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Trying out Bayesian inference with PyMC3 on covid data
#
# _Disclaimer: this is in no way intended to be relied on!_
# _this was done purely for me to learn something_
#
# It doesn't respect reactions of the countries, it doesn't respect the testing capabilities / numbers in the countries, it doesn't respect real biological models and past research in the field of virology and pandemics.
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
import seaborn as sns
import pandas as pd
import theano
# %matplotlib inline
import warnings
from scipy.stats import halfnorm
warnings.filterwarnings('ignore')
# ### Data based on a dump from a wiki page offering country specific infections.
#
# Data is a snapshot form Kaggle taken from around mid April 2020 and wasn't updated since!
#
# To make the data more representative, days before 2000 infections were reached were removed, since there might have been just single hotspots that were under control, also only those timeseries were looked at, that had in it's current state more than 20.000 infections counted.
# Furthermore the data was restricted to series of at least 10 days.
# These restrictions allow to look at a smaller set.
# +
infections = []
countries = {}
MIN_DATES = 10
with open('untitled1.txt', 'r') as csv:
intermediate = []
counter = 0
for line in csv:
line = line.strip().split(',')
country = line[2]+'-'+line[1]
infection = int(float(line[4]))
deaths = int(float(line[5]))
# print(line)
if infection < 2000:
continue
if not country in countries:
countries[country] = 0
counter = 0
if len(intermediate) > MIN_DATES and intermediate[-1][2] > 10000:
for i in intermediate:
infections.append(i)
intermediate = []
counter += 1
intermediate.append([country, counter, infection, deaths])
if len(intermediate) > MIN_DATES:
for i in intermediate:
infections.append(i)
full_df = None
full_df = pd.DataFrame(infections, columns=['country', 'day', 'infections', 'deaths'])
full_df = full_df.astype({'day': 'int32', 'infections': 'int32', 'deaths': 'int32'})
#filters = full_df.country.apply(lambda x: x in [
# 'China', 'Germany', 'Japan', 'South Korea', 'France', 'Netherlands'])
#full_df=full_df[filters]
countries = full_df.country.values
uniq_countries = full_df.country.unique()
n_countries = len(uniq_countries)
full_df['country_idx'] = [list(uniq_countries).index(x) for x in countries]
#print(full_df.country_idx)
#print(full_df)
print(list(enumerate(uniq_countries)))
# -
# ### here is the modeling part
#
# the base idea is to fit a sigmoid like function to model the number of total infections. This assumption alone is probably already enough reason to not trust any output of this model. So _please don't trust_ the model.
#
# Instead of using the regular sigmoid, I chose the _Gompertz Function_:
#
# \begin{equation}
# \large{
# f(x) = a \cdot e^{b \cdot e^{c \cdot x} }
# }
# \end{equation}
#
# The reason for using the Gompertz function is it's assymmetrie, allowing to adjust for the exponential increase ans slow down phases.
# with $b, c < 0$ the value of $a$ determines the upper limit and therefore in our investigation the upper limit of infections.
# $b$ and $c$ determine the speeed and acceleration.
#
# To have some benefit from all the past countries, I tried to model $b$ and $c$ hierarchical, having a "mean value" across all time series, and the individual time series deviates from this according to a small normal distribution. The idea is, to have estimates for how things will develop even when very little hints are in the data.
#
# +
from theano import shared
predictors = full_df.day.values.copy()
predictors_shared = shared(predictors)
country_id = full_df.country_idx.values.copy()
country_idx = shared(country_id)
from theano import shared
predictors = full_df.day.values.copy()
predictors_shared = shared(predictors)
import scipy
with pm.Model() as model:
a = pm.Uniform('a', lower=1000, upper=2000000, shape=n_countries)
b_base = pm.Normal('b_base', mu=-4.5, sigma=0.5)
b = pm.Normal('b', mu=b_base, sigma=0.5, shape=n_countries)
c_base = pm.Normal('c_base', mu=-0.075, sigma=0.03)
c = pm.Normal('c', mu=c_base, sigma=0.03, shape=n_countries)
y = (a[country_idx] * pm.math.exp(b[country_idx] * pm.math.exp(c[country_idx] * (predictors_shared))))
obs = pm.Normal('obs', mu=y, sigma=15000, observed=full_df.infections.values)
trace = pm.sample(40000, cores=2)
# -
# ### Now plotting the results of the fittings
#
# The fittings did not work out very well, we will see why when we look at the traces.
#
# We can see some pretty wide confidence intervals, so like the output suggested it didn't work out too well.
# Interestingly this is especially then the case, when the counts haven't turned into the slow down phase where the infections are under control. This also makes sense, because the model has to guess which kind of behavior it will see when the infections get under control, without having any hints on it.
# But here is the hierarchical model at least helping a bit, interpolating from overal behavior of the infections to the individual case.
#
# +
from pymc3 import forestplot
plt.figure(figsize=(20,20))
forestplot(trace, var_names=['a'])
forestplot(trace, var_names=['b'])
forestplot(trace, var_names=['c'])
pm.traceplot(trace)
print(list(enumerate(uniq_countries)))
# -
# ### now predicting the future...
#
# the traceplot above show what we already assumed, had some issues, especially the base values of c and b didn't fully converge to a single distribution, normally you would do a reparametrization and probably increase tuning steps to fix this.
# But still let us try to now use the found model parameters to simulate how it's going to continue.
#
# +
#ppc = pm.sample_posterior_predictive(trace, samples=500, model=model)
x = np.tile(np.linspace(1, 100, 100).astype('int32'), n_countries)
print(len(x))
predictors_shared.set_value(x)
y = np.repeat(np.linspace(0,n_countries-1,n_countries).astype('int32'), 100)
print(len(y))
country_idx.set_value(y)
with model:
post_pred = pm.sample_posterior_predictive(trace, samples=10000)
# -
# ### looking at fittings and predictions
# What we can actually see is that the model fitted the given points quite ok, but the predictions have quite a lot uncertainty. Especially in those cases, where there is little hint as to how much the region was able to slow down.
# So again don't rely on this model for anything.
# This was done purely as an educational exercise.
# +
means = post_pred['obs'].mean(axis=0, keepdims=False).copy()
stds = post_pred['obs'].std(axis=0)
for i in range(n_countries):
choice = y==i
old_choice = full_df.country_idx==i
plt.figure(figsize=(10,10))
plt.errorbar(np.linspace(1,100,100),
means[choice],
stds[choice],
linestyle='None',
marker='.')
plt.plot(np.linspace(1,len(full_df[old_choice]), len(full_df[old_choice])),
full_df.infections[old_choice],
marker='o')
plt.title(uniq_countries[i])
plt.show()
# -
| BayesianCovid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1jrlCqCEdkdN"
# <table align="center">
#
# </table>
#
#
# <table align="center">
# <thead>
# <tr>
# <td align="center"><a target="_blank" href="https://cae.au.dk/en/research/key-areas-in-research-and-development/design-and-construction/construction-automation-and-information-technologies/people/">
# <img src="https://mbg.au.dk/fileadmin/site_files/mb/Logoer/au/aulogo.jpg" style="padding-bottom:5px;" height="70px"/></a></td>
# <td align="center"><a target="_blank" href="https://colab.research.google.com/github/kakke14/TA_Content/blob/master/advanced_technologies_in_construction/lecture_3/RTLS_Lecture.ipynb">
# <img src="https://i.ibb.co/2P3SLwK/colab.png" style="padding-bottom:5px;"height="70px" /></a></td>
# <td align="center"><a target="_blank" href="https://github.com/kakke14/TA_Content">
# <img src="https://i.ibb.co/xfJbPmL/github.png" height="70px" style="padding-bottom:5px;" /></a></td>
#
# </tr>
# </thead>
# <!-- <tbody> -->
# <tr>
# <td>Visit AU research Group</td>
# <td>Run in Google Colab</td>
# <td>View Source on GitHub</td>
# </tr>
# </tbody>
# </table>
# + id="K4mqk9OkeZen"
# Copyright 2022 AU Digital Construction. All Rights Reserved.
#
# Use or modification of this code outside of the course should reference:
# <NAME>, <NAME>, and <NAME>
# Lecture Notes Digital Construction 2022
# + [markdown] id="b7b3p_nVe-fg"
# # Lecture 3 - Real Time Location Systems
# In this lecture you will get exposed to Python programming and location data. You will learn how to process the raw data output into tangible knowledge that can be used to analyze trajectories of construction assets.
# You will encounter several codeblocks with ***TO DO*** written in them. This is where you will have to follow the instructions and fill out the missing code before running the cell.
# + [markdown] id="VjY5qfhGfHF4"
# ## Install dependencies
# For all colab exercises, we will be needing several python libraries. Some of them will be repeating dependencies for all exercises, while other might only be used in certain examples. For this lecture, we will install the following
#
# These libraries are part of the Kernel running i Colab and are therefore redy to be imported with the following lines
#
# + id="uLgXvAkUjHQe"
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
from tqdm import tqdm
# + [markdown] id="PL5cDX0FSliO"
# But how about a library that is not preinstalled ?? Unfortunately Traja is not....
#
#
#
#
#
# + id="7Uo0uPdBVWYS"
# import traja
# + [markdown] id="xAF0bKOcV5eE"
# How do we get around missing libraries this ??
# Normally it would just mean that we had to install the library with the python package manager called pip, using the command 'pip install [package name]'
#
# If that is not possible, here is [Hint](https://colab.research.google.com/notebooks/snippets/importing_libraries.ipynb)
# + colab={"base_uri": "https://localhost:8080/"} id="yTk7kxTbUgGG" outputId="6d386be1-37d6-4d83-99b8-4467f773f2e1"
#### TODO ####
# Install Traja lib on the colab Kernel
#### Sollution ####
# # !pip install Traja
# + [markdown] id="W93oxtHvUhR3"
# Hopefully you managed to install Traja and will be able to run the "import traja" line below.
#
# Most of the packages in python are well documented and an example is this library, which documentation is available in the following link.
#
# [Traja Docs](https://traja.readthedocs.io/en/latest/index.html)
#
# We are not going to use Traja just yet but now we knwo how to install libraries if these are not pre-installed
# + id="q1BU84UWSqyr"
import traja
# + [markdown] id="dVXFywR9XFfA"
# ### Checkpoint 1
# Note the the following into a .txt file#
#
# 1. What command is used to install libraries in the python package manager?
# 2. What is needed in order to run the same command in colab?
#
#
# + [markdown] id="ea668Bi1Zog9"
# ## Install the remaining libraries
# + id="TgicpKafYJhw"
#### TODO ####
#install :
# lib1
# lib 2
# -
# ## Load the data
# For this part we are going to use pandas, which is a library for working with dataframes
# Pandas documentation: [Read function](https://pandas.pydata.org/docs/user_guide/io.html#io)
#
# The data that will be the subject for this lecture is RTLS data, more specifically UWB data recorded with the system that we used on the Demo day.
# The data is written as comma seperated data and the collomns correspond to:
# <Data Header>,<tag ID>,<X>,<Y>,<Z>,<battery>,<timestamp>,<unit>,<DQI>,<GDOP>,<Locate-Details>,<LF>
#
df = pd.read_csv("A.txt", names=["data_header","tag_id","x","y","z","battery","time","unit","dqi","gdop","locate_details"], converters = {'tag_ID': str})
df.head()
# That is a lot of information...
# For our analysis we only need id, position(x,y), and time , therefore we drop the rest for now. but it is good to know that more data exists.
#
df = df[['tag_id','x','y','time']]
# That was reading the data, it may be interesting to figure out how many tags that are present in the data
# but are there any smart ways to do so ?
# It would be interesting to find the number of unique values in the "tag_ID" coloumn ?
unique_tags = df.tag_id.unique()
number_of_unique_tags = len(unique_tags)
print(f"There exists {number_of_unique_tags} in the tag_ID column and their IDs are: \n {unique_tags}")
# Now, what is the total duration of the data collected?
# Could ve use max and min value of a column?
# Does the time format seem wierd? [epoch time](https://www.epochconverter.com/)
df.time = pd.to_datetime(df.time.values.astype(float),unit='s')
df
# That format seems more normal
# Now we just need to find the min and max value [min and max](https://www.kite.com/python/answers/how-to-find-the-max-value-of-a-pandas-dataframe-column-in-python)
min = df.time.min()
max = df.time.max()
duration = max-min
print(duration)
# Now we know how many tags that are in the data, and the duration
#
# what if we did this calculation for all tags, to see if they were active for the full duration
# +
for tag in unique_tags:
min = df.loc[df.tag_id==tag].time.min()
max = df.loc[df.tag_id==tag].time.max()
duration = max-min
print(f"the data duration for tag {tag} is {duration}")
# -
# ## Checkpoint 2
#
# What is the percentage duration of each tag relative to the tag that has the longest duration?
#
# Extend the above code to calculate this
# +
durations = []
for tag in unique_tags:
min = df.loc[df.tag_id==tag].time.min()
max = df.loc[df.tag_id==tag].time.max()
duration = max-min
durations.append(duration)
# print(f"the data duration for tag {tag} is {duration}")
max_duration = np.max(durations)
print(max_duration)
for tag in unique_tags:
min = df.loc[df.tag_id==tag].time.min()
max = df.loc[df.tag_id==tag].time.max()
duration = max-min
pct_duration = round((duration/max_duration)*100,2)
print(f"the data duration for tag {tag} is {duration} ~ {pct_duration}%")
# -
# ## Plotting the trajectory data
# In python there are several libraries for plotting data, and Matplitlib is one of those. Matplotlib offers a great amout of features (if you are used to matlab, matplotlib can do the same, but maybe in a different way)
#
# [Matplotlib tutorial](https://matplotlib.org/stable/tutorials/introductory/pyplot.html)
#
# The tutorial contains this example, which we will take inspiration from (but slightly change):
#
# ```
# import matplotlib.pyplot as plt
# names = ['group_a', 'group_b', 'group_c']
# values = [1, 10, 100]
#
# figure = plt.figure(figsize=(12, 4))
#
# ax1 = figure.add_subplot(131)
# ax1.bar(names, values)
# ax2 = figure.add_subplot(132)
# ax2.scatter(names, values)
# ax3 = figure.add_subplot(133)
# ax3.plot(names, values)
# figure.suptitle('Categorical Plotting')
# ```
#
# NB. also part of slides!
# +
# try to run the code to get an overview of the different plotting funtions
names = ['group_a', 'group_b', 'group_c']
values = [1, 10, 100]
figure = plt.figure(figsize=(12, 4))
ax1 = figure.add_subplot(131)
ax1.bar(names, values)
ax2 = figure.add_subplot(132)
ax2.scatter(names, values)
ax3 = figure.add_subplot(133)
ax3.plot(names, values)
figure.suptitle('Categorical Plotting')
figure.show()
# -
# %matplotlib qt
figure = plt.figure(figsize=(10,10))
axs = figure.add_subplot(221)
current_tag = None
for tag in unique_tags:
current_tag = df.loc[df.tag_id==tag]
# you can access the x, and y values through the "dot operator", e.g., current_tag.X
# what kind of plot makes sense when plotting trajectories?
# Instead of using the defaul plt object we use the plot object called "axs", that is added to the figure called "figure"
#### Sollution ###
axs.plot(current_tag.x, current_tag.y,'-', label=tag,linewidth=1)
break
# now set the title of the plot to somthing meaningfull e.g., incomming data, or unfiltered data
# Also pleae add a label for the x, and y-axis e.g, X [m] and Y [m]
# and make sure that the legends are shown
#### Sollution ###
axs.set_title("Original data")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
# ## Checkpoint 3
# Does you plot look similar to this ? (**not neccesarily the colors)
#
# <img src="Images/CP3.png" style="padding-bottom:5px;" height="300px"/>
#
# ## Moving on to the next step of handling data in this programming approach
# the data we just plotted seems to be rather messy and to contain quite some noise
#
# Any filters that we could apply ? how would a moving average filter effect the data?
# +
axs = figure.add_subplot(222)
MM_current_tag = current_tag.copy()
MM_current_tag.x = current_tag.x.rolling(10).mean()
MM_current_tag.y = current_tag.y.rolling(10).mean()
axs.plot(MM_current_tag.x, MM_current_tag.y,'-', label=tag,linewidth=1)
axs.set_title("Moving Avg. 10")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# +
axs = figure.add_subplot(223)
MM_current_tag = current_tag.copy()
MM_current_tag.x = current_tag.x.rolling(20).mean()
MM_current_tag.y = current_tag.y.rolling(20).mean()
axs.plot(MM_current_tag.x, MM_current_tag.y,'-', label=tag,linewidth=1)
axs.set_title("Moving Avg. 20")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# +
axs = figure.add_subplot(224)
MM_current_tag = current_tag.copy()
MM_current_tag.x = current_tag.x.rolling(30).mean()
MM_current_tag.y = current_tag.y.rolling(30).mean()
axs.plot(MM_current_tag.x, MM_current_tag.y,'-', label=tag,linewidth=1)
axs.set_title("Moving Avg. 30")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# -
# Describe with words, how a simple moving average effect the data, and its validity
#
# ## Now lets try build our own filtering algoritm
# could we use some domain knowledge about where the tags were placed
#
# what about filtering based on speed ?
# what about filtering based on displacement ?
#
# We refer to this type of filtering, domain knowledge based filtering, as we e.g., can derive maximum verlocities if we know who/what carried the tag.
#
# An easy way to extract verlocity and displacement information from a trajectory is to use [get_derivatives()](https://traja.readthedocs.io/en/latest/calculations.html#derivatives), which is a part of the Trja package that we installed earlier.
#
# How do we "mask" data ?
# A mask is a series of indexes and boolean values.
# ```
# 0. True
# 1. False
# .. ....
# 30. False
# 31. etc....
# ```
# In order to create such a mask we "ask" a question imagine this dataframe
# ```
# dataframe =
#
# idx height gender
# 0. 180 male
# 1. 200 female
# 2. 160 male
# 3. 170 non-binary
# ```
# Then we could as for the entries, where gender is male, and height is more than 170cm
#
# ```
# mask = (dataframe.gender==male) & (dataframe.height>=170)
#
# ```
# resuling in:
# ```
# mask =
# 0. True
# 1. False
# 2. False
# 3. False
# ```
#
# Now we can use this mask to get the dataframe that fulfills our query:
#
# ```
# queried_data = datafram.loc[mask]
# queried_data =
#
# idx height gender
# 0. 180 male
# ```
# Another way to do this is to create the mask in the query
#
# ```
# queried_data = datafram.loc[(dataframe.gender==male) & (dataframe.height>=170)]
# queried_data =
#
# idx height gender
# 0. 180 male
# ```
# here is some further information on [Masks](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.mask.html) in pandas
#
# ## Domain knowledge
#
# okay now that we know how to mask and query data we are ready to get find out, what we want to query.
#
# We know:
# 1. only human workers did carrie the tags
# 2. humans do not walk faster than 7.2 km/h as they should not run on a construction site
#
#
#
#
#
# +
# create a copy of the incomming data
df_copy = df.copy()
# change the time back to seconds
df_copy.time = (df_copy.time.values.astype(float))*10**-9
#create figure for the plots
figure = plt.figure(figsize=(10,10))
# add a plot to the figure
axs = figure.add_subplot(221)
tag_under_investigation = unique_tags[0] # get the id of the first available tag
# Query the data for the tag under investigation
df_tag = df_copy.loc[df_copy.tag_id == tag_under_investigation]
# plot the original/incomming/unfiltered dat for this tag
axs.plot(df_tag.x, df_tag.y,'-', label=tag,linewidth=1)
# configure the title, axis-lables, and activate legends
axs.set_title("Original Data")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
# use traja to calculate the derivatived of the dataframe
derivatives = df_tag.traja.get_derivatives()
# print the head of the datafram, which will print the first 5 rows and column names
print(derivatives.head())
# create a mask that is true for values below 2 m/s and above -2m/s
mask1 = derivatives.speed<2
mask2 = derivatives.speed>-2
maskspeed = mask1 & mask2
#query that portion of data
df_tag_speed = df_tag.loc[maskspeed]
#add a plot to the figure from before
axs = figure.add_subplot(222)
#plot the queried data
axs.plot(df_tag_speed.x, df_tag_speed.y,'-', label=tag,linewidth=1)
# set title, lables and activate legends
axs.set_title("filter based on speed 2m/s")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# now do the same for displacement, where the diplacement (-1 < displacemen < 1)
mask1 = derivatives.displacement<1
mask2 = derivatives.displacement>-1
maskdisplace = mask1 & mask2
df_disp = df_tag_speed.loc[maskdisplace]
axs = figure.add_subplot(223)
axs.plot(df_disp.x, df_disp.y,'-', label=tag,linewidth=1)
axs.set_title("Displacement 1m")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# -
# ## trying to add another of the shelf filtering algorithm
# The second filtering algorithm is a more sofisticated one. It is called [Savitzky–Golay](https://www.wikiwand.com/en/Savitzky%E2%80%93Golay_filter).
# It can be compared to fitting the data to a higher degree polynomial.
#
# [traja.smooth_sg(data, window)](https://traja.readthedocs.io/en/latest/calculations.html)
# add another plot in teh last position (224) to the figure from before
axs = figure.add_subplot(224)
# apply the sg_filter from the traja library
sg_filtered = traja.smooth_sg(df_tag_speed,w=7)
#plot the data
axs.plot(sg_filtered.x, sg_filtered.y,'-', label=tag,linewidth=1)
# set the title, axis-labels and show legends
axs.set_title("Savitzky-Golay filtering")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# ## Playing around with the filter
# Try to change the polynomial degree of the filter, and see the difference.
#
# What effet does the polynomial degree have?
# # Lets create some functions to handle this
# Here is some info on [creating funcitons](https://www.w3schools.com/python/python_functions.asp)
def filter_dataframe(dataframe_to_filter, sg_window=7):
data_copy_in =dataframe_to_filter.copy()
data_copy_in.time = (data_copy_in.time.values.astype(float))*10**-9
copy_df=pd.DataFrame(columns=data_copy_in.columns)
for tag in data_copy_in.tag_id.unique():
df_tag = data_copy_in.loc[data_copy_in.tag_id == tag]
# print(len(df_tag))
derivatives = df_tag.traja.get_derivatives()
#speed
mask1 = derivatives.speed<2
mask2 = derivatives.speed>-2
maskspeed = mask1 & mask2
df_tag = df_tag.loc[maskspeed]
#displacement
mask1 = derivatives.displacement<1
mask2 = derivatives.displacement>-1
maskdisplace = mask1 & mask2
df_tag = df_tag.loc[maskdisplace]
#sg filtering
if len(df_tag)< sg_window:
continue
df_tag = traja.smooth_sg(df_tag,w=sg_window)
copy_df = copy_df.append(df_tag)
return copy_df
# ## Are there others?
#
# Do you happen to know other location data filtering algorithms?
#
# What is the most used one for trajectory data?
#
# Are there any python implementations available ?
# # In area analyis
# What if we wanted to investigate how much time a tag (person) spend in a specific area? e.g., if an area is off limit due to safety resriction, progress analysis or somthing else
# Here we have at least two options:
# 1. To use the mask approach that we just did - This is fine as we want to check a square bounding box
# 2. Use a polygon, and check wether a point is inside or not - handy if we wanna check more sophisticated areas
#
# We will start with the first approach for one tag only
# +
# exclude data from other tags than 0025CB03 from the SG-filtered data
mask_id = sg_filtered.tag_id == "0025CB03"
data_for_tag = sg_filtered.loc[mask_id]
area = {
"x_min":11,
"y_min":11,
"x_max":13,
"y_max":13
}
# print(area["x_min"])
mask_x = ((sg_filtered.x >= area["x_min"]) & (sg_filtered.x<= area["x_max"]))
mask_y = ((sg_filtered.y >= area["y_min"]) & (sg_filtered.y <= area["y_max"]))
mask_inside_area = (mask_x & mask_y)
data_inside_area = sg_filtered.loc[mask_inside_area]
list_of_continues_visits=[]
current_visit=[]
for idx, row in enumerate(data_inside_area.iterrows()):
# print(row[1]["time"])
time_for_row = row[1]["time"]
if not current_visit: # check if empty
current_visit.append(time_for_row)
continue
elif time_for_row-current_visit[-1]<1: # less than one seccond
current_visit.append(time_for_row)
continue
elif time_for_row-current_visit[-1]>=1: # more than or equal to one seccond
if len(current_visit)>1:
list_of_continues_visits.append(current_visit)
current_visit = [] #empty current visit, and continue
for idx, visit in enumerate(list_of_continues_visits):
duration = visit[-1]-visit[1]
print(f"Visit #{idx} had duration of {round(duration,2)} s")
# -
# # How about a proximity analysis
# There can be several reasons to be interested in a persons proximity to objects, heavy macinery such as crane hook, excavator , or event other persons(e.g., Covid-19 regualtion)
#
# +
filtered_df = filter_dataframe(df)
tag_under_investigation = "0025CB03"
mask_id = filtered_df.tag_id == tag_under_investigation
data_for_tag = filtered_df.loc[mask_id]
remaining_data = filtered_df.loc[~mask_id]
incidents = pd.DataFrame(columns=remaining_data.columns)
for row in tqdm(data_for_tag.iterrows()):
time = row[1]["time"]
x = row[1]["x"]
y = row[1]["y"]
for row_2 in remaining_data.iterrows():
time_2 = row_2[1]["time"]
x_2 = row_2[1]["x"]
y_2 = row_2[1]["y"]
if abs(time-time_2)<1:
distance = sqrt((x-x_2)**2 + (y-y_2)**2)
if distance <= 1:
incidents = incidents.append(row_2[1])
figure = plt.figure(figsize=(10,10))
axs = figure.add_subplot(111)
axs.plot(data_for_tag.x, data_for_tag.y,'-', label=tag,linewidth=1)
axs.plot(incidents.x, incidents.y,'r*', label="Incidents",linewidth=1)
axs.set_title("incidents")
axs.set_xlabel("X [m]")
axs.set_ylabel("Y [m]")
axs.legend()
figure.show()
# -
# # Incident report analysis
# Can we say somthing more detailed about the incidents ?
#
# 1. How many incidents were mad by each individual tag?
# 2. Turn te above into a function and run in for loop and make the analysis for all tags
# 3. Print a nice report using the print(f"text {variable}") way
# 4. Create a confusion matrix-plot for easier overview
# 5. Calculate the number of incidents in [20cm, 40cm, 60cm, 80cm, 100cm] and make bar-plot
#
# +
# 1
for tag in unique_tags:
if tag == tag_under_investigation:
continue
mask_id = (incidents.tag_id == tag)
print(f"tag_id {tag} had {mask_id.sum()} proximity incidents with {tag_under_investigation}")
# 2
def proximity_analysis(df):
result = []
for tag in unique_tags:
tag_under_investigation = tag
mask_id = filtered_df.tag_id == tag_under_investigation
data_for_tag = filtered_df.loc[mask_id]
remaining_data = filtered_df.loc[~mask_id]
incidents = pd.DataFrame(columns=remaining_data.columns)
for row in tqdm(data_for_tag.iterrows()):
time = row[1]["time"]
x = row[1]["x"]
y = row[1]["y"]
for row_2 in remaining_data.iterrows():
time_2 = row_2[1]["time"]
x_2 = row_2[1]["x"]
y_2 = row_2[1]["y"]
if abs(time-time_2)<1:
distance = sqrt((x-x_2)**2 + (y-y_2)**2)
if distance <= 1:
incidents = incidents.append(row_2[1])
dictionary = {
"analyzed_id":tag,
"data_frame":incidents
}
result.append(dictionary)
return result
result_list = proximity_analysis(filtered_df)
# -
#3
def report_incidents(incidents_result):
for dictionary in incidents_result:
tag_under_investigation = dictionary["analyzed_id"]
incidents = dictionary["data_frame"]
print(f"tag_id {tag_under_investigation} had {len(incidents)} incidents in total")
for tag in unique_tags:
if tag == tag_under_investigation:
continue
mask_id = (incidents.tag_id == tag)
print(f"\t {mask_id.sum()} proximity incidents with {tag}")
report_incidents(result_list)
#4
import seaborn as sn
def generate_confusion_matrix (incidents_result):
matrix = []
for dictionary in incidents_result:
tag_under_investigation = dictionary["analyzed_id"]
incidents = dictionary["data_frame"]
print(f"tag_id {tag_under_investigation} had {len(incidents)} incidents in total")
row = []
for tag in unique_tags:
# if tag == tag_under_investigation:
# continue
mask_id = (incidents.tag_id == tag)
row.append(mask_id.sum())
print(f"\t {mask_id.sum()} proximity incidents with {tag}")
matrix.append(row)
return matrix
confusion_matrix = generate_confusion_matrix(result_list)
print(confusion_matrix)
df_cm = pd.DataFrame(confusion_matrix, index = [i for i in unique_tags],
columns = [i for i in unique_tags])
plt.figure(figsize = (10,7))
sn.heatmap(df_cm, annot=True)
plt.show()
#5
def proximity_analysis(df):
result = []
for tag in unique_tags:
tag_under_investigation = tag
mask_id = filtered_df.tag_id == tag_under_investigation
data_for_tag = filtered_df.loc[mask_id]
remaining_data = filtered_df.loc[~mask_id]
incidents = pd.DataFrame(columns=remaining_data.columns)
distances = []
for row in tqdm(data_for_tag.iterrows()):
time = row[1]["time"]
x = row[1]["x"]
y = row[1]["y"]
for row_2 in remaining_data.iterrows():
time_2 = row_2[1]["time"]
x_2 = row_2[1]["x"]
y_2 = row_2[1]["y"]
if abs(time-time_2)<1:
distance = sqrt((x-x_2)**2 + (y-y_2)**2)
if distance <= 1:
distances.append(distance)
incidents = incidents.append(row_2[1])
incidents["distances"]=distances
dictionary = {
"analyzed_id":tag,
"data_frame":incidents
}
result.append(dictionary)
return result
result_list = proximity_analysis(filtered_df)
# +
#5 continued
result_dictionary = {
"0-20cm":0,
"21-40cm":0,
"41-60cm":0,
"61-80cm":0,
"81-100cm":0
}
for dictionary in result_list:
incidents = dictionary["data_frame"]
mask_20 = ((incidents.distances>=.0)&(incidents.distances<=.20))
mask_40 = ((incidents.distances>=.21)&(incidents.distances<=.40))
mask_60 = ((incidents.distances>=.41)&(incidents.distances<=.60))
mask_80 = ((incidents.distances>=.61)&(incidents.distances<=.80))
mask_100 = ((incidents.distances>=.81)&(incidents.distances<=1))
result_dictionary["0-20cm"] += mask_20.sum()
result_dictionary["21-40cm"] += mask_40.sum()
result_dictionary["41-60cm"] += mask_60.sum()
result_dictionary["61-80cm"] += mask_80.sum()
result_dictionary["81-100cm"] += mask_100.sum()
plt.figure(figsize = (10,7))
plt.bar(result_dictionary.keys(),result_dictionary.values())
plt.show()
| advanced_technologies_in_construction/lecture_3/RTLS_Lecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from dnn_app_utils_v3 import *
from dnn_utils_v2 import sigmoid_backward, relu_backward, sigmoid, relu
from testCases_v4 import *
np.random.seed(1)
plt.rcParams['figure.figsize'] = (5.0,4.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap']='gray'
def initializeParameters(layer_dims):
parameters={}
L=len(layer_dims)-1
np.random.seed(3)
for l in range(1,L+1):
parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1])*np.sqrt(2/layer_dims[l-1])
parameters['b' + str(l)] = np.zeros((layer_dims[l],1))
return parameters
parameters = initializeParameters([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
def linear_forward(A,W,b):
Z = np.dot(W,A)+b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A,W,b)
return Z,cache
# +
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
# -
def linear_activation_forward(A_prev,W,b,activation):
if activation=="sigmoid":
Z,linear_cache = linear_forward(A_prev,W,b)
A, activation_cache = sigmoid(Z)
elif activation=="relu":
Z,linear_cache = linear_forward(A_prev,W,b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache=(linear_cache,activation_cache)
return A, cache
# +
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
# -
def L_model_forward(X,parameters):
caches=[]
A=X
L=len(parameters)//2
for l in range(1,L):
A_prev=A
A, cache = linear_activation_forward(A_prev, parameters["W"+str(l)], parameters["b"+str(l)], activation='relu')
caches.append(cache)
AL,cache = linear_activation_forward(A,parameters["W"+str(L)],parameters["b"+str(L)],activation='sigmoid')
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
def compute_cost(AL,Y):
m=Y.shape[1]
cost = (-1/m)*(np.sum(np.multiply(1-Y,np.log(1-AL)) + np.multiply(Y,np.log(AL))))
cost = np.squeeze(cost)
assert(cost.shape==())
return cost
# +
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
# -
def linear_backward(dZ, cache):
A_prev,W,b = cache
m = A_prev.shape[1]
dW = (1/m)*np.dot(dZ,A_prev.T)
db = (1/m)*np.sum(dZ,axis=1,keepdims=True)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# +
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# -
def linear_activation_backwards(dA,cache,activation):
linear_cache,activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA,activation_cache)
dA_prev,dW,db = linear_backward(dZ,linear_cache)
elif activation =="sigmoid":
dZ = sigmoid_backward(dA,activation_cache)
dA_prev,dW,db = linear_backward(dZ,linear_cache)
return dA_prev,dW,db
# +
dAL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backwards(dAL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backwards(dAL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# -
def L_model_backward(AL,Y,caches):
grads={}
L=len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
current_cache = caches[-1]
grads["dA"+str(L-1)], grads["dW"+str(L)],grads["db"+str(L)] = linear_activation_backwards(dAL,current_cache,activation='sigmoid')
for l in reversed(range(L-1)):
current_cache = caches[l]
grads["dA"+str(l)], grads["dW"+str(l+1)],grads["db"+str(l+1)] = linear_activation_backwards(grads["dA"+str(l+1)],current_cache,activation='relu')
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
def update_parameters(parameters,grads,learning_rate):
L=len(parameters)//2
for l in range(L):
parameters["W"+str(l+1)]=parameters["W"+str(l+1)]-learning_rate*grads["dW"+str(l+1)]
parameters["b"+str(l+1)]=parameters["b"+str(l+1)]-learning_rate*grads["db"+str(l+1)]
return parameters
# +
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
# -
X = sklearn.datasets.load_iris().data[0:36][:]
X = np.concatenate((X,sklearn.datasets.load_iris().data[50:86][:]),axis=0)
X = X.reshape(4,72)
X = sklearn.preprocessing.normalize(X, norm='l2', axis=1, copy=False, return_norm=False)
X_test = sklearn.datasets.load_iris().data[36:50][:]
X_test = np.concatenate((X_test,sklearn.datasets.load_iris().data[86:100][:]),axis=0)
X_test = X_test.reshape(4,28)
Y = sklearn.datasets.load_iris().target[0:36][:]
Y = np.concatenate((Y,sklearn.datasets.load_iris().target[50:86][:]),axis=0)
Y = Y.reshape(1,72)
Y_test = sklearn.datasets.load_iris().target[36:50][:]
Y_test = np.concatenate((Y_test,sklearn.datasets.load_iris().target[86:100][:]),axis=0)
Y_test = Y_test.reshape(1,28)
#
# X = Examples_shuffled[0:4][:]
# Y = (Examples_shuffled[4][:]).reshape(108,1)
layer_dims = [4,20,5,1]
def L_layer_model(X,Y,layer_dims,learning_rate=0.0075,num_iteration=3000,print_cost=True):
costs=[]
parameters = initializeParameters(layer_dims)
for i in range(0,num_iteration):
AL,caches = L_model_forward(X,parameters)
cost = compute_cost(AL,Y)
grads = L_model_backward(AL,Y,caches)
parameters = update_parameters(parameters,grads,learning_rate)
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
parameters = L_layer_model(X,Y,layer_dims,learning_rate=0.009,num_iteration=15000,print_cost=True)
predictions_train = predict(X, Y, parameters)
print(predictions_train)
pred_test = predict(X_test, Y_test, parameters)
print(pred_test)
| Neural+net+Iris (4).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Generate XOR data
rng = np.random.RandomState(0)
X = 2.3 * rng.randn(400, 2) # scale the variance to make lt more similar to XOR data in Tensorflow
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
y= np.array([[1.0] if y == True else [0] for y in Y])
plt.scatter(X[:,0],X[:,1],c=Y)
# +
# Define activate function
def sigmoid(x):
return 1.0/(1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# -
class NeuralNetwork:
def __init__(self, features, label):
self.input = features
self.weights1 = np.random.rand(self.input.shape[1],5)
self.weights2 = np.random.rand(5,3)
self.weights3 = np.random.rand(3,1)
self.y = label
self.learn_rate = 0.005
self.output = np.zeros(label.shape)
def forward_feed(self):
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.layer2 = sigmoid(np.dot(self.layer1, self.weights2))
self.output = sigmoid(np.dot(self.layer2, self.weights3))
def back_prop(self):
weights3_change = np.dot(self.layer2.T, (2*(self.y - self.output) \
* sigmoid_derivative(self.output)))
weights2_change = np.dot(self.layer1.T, (np.dot(2*(self.y - self.output) \
* sigmoid_derivative(self.output), self.weights3.T)\
* sigmoid_derivative(self.layer2)))
weights1_change = np.dot(self.input.T, np.dot((np.dot(2*(self.y - self.output) \
* sigmoid_derivative(self.output), self.weights3.T)\
* sigmoid_derivative(self.layer2)), self.weights2.T)\
* sigmoid_derivative(self.layer1))
self.weights1 += self.learn_rate * weights1_change
self.weights2 += self.learn_rate * weights2_change
self.weights3 += self.learn_rate * weights3_change
# Trian the neural network
nn = NeuralNetwork(X,y)
for epoch in range(1024):
nn.forward_feed()
nn.back_prop()
pred = [1 if item[0] > 0.5 else 0 for item in nn.output]
acc = sum([a == b for a,b in zip(Y,pred)])/len(pred)
print('The prediction accuracy of my Neural Network(sigmoid) is {}'.format(acc))
plt.scatter(X[:,0],X[:,1],c=pred)
| Model Building From Scratch/multilayer perceptron implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lamnguyen2187/algorithms_collection/blob/colab/Stack.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="tahJIWUdp6bU" colab_type="text"
# Create the max number of `k` digits from an array of `n` digits where `k` <= `n`, while preserving the relative order of the digits
#
#
# + id="PvKsiJ2orqj-" colab_type="code" outputId="bce0c125-4d9b-47c2-c20b-e985477053db" colab={"base_uri": "https://localhost:8080/", "height": 68}
# naive sorting
# wrong solution as this does not preserve the order
def maxK(arr, k):
return sorted(arr, reverse=True)[:k]
print(maxK([2,4,2,1], 2))
print(maxK([2,6,7,8], 2))
print(maxK([2,6,7,8], 4))
# + id="JWGzUTuK1oeS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="bbfed988-164b-4d63-fc28-5df05e5cfd72"
# heap
import heapq
# worst case complexity O(log(N)*N**2)
# when the array is sorted in ascending order and k == n
# the reason is because at each iteration, the code pops and discards k-1 elements from the heap
def maxK(arr, k):
n = len(arr)
pq = [(-v, idx) for idx, v in enumerate(arr)]
heapq.heapify(pq)
ans = []
last = -1
while k:
tmp = []
while pq and n-pq[0][1] < k:
tmp += heapq.heappop(pq),
if pq:
v, idx = heapq.heappop(pq)
if idx > last:
ans += -v,
last = idx
k -= 1
while tmp:
heapq.heappush(pq, tmp.pop())
return ans
print(maxK([2,4,2,1], 1))
print(maxK([2,4,2,1], 2))
print(maxK([2,4,2,1], 3))
print(maxK([2,4,2,1], 4))
print(maxK([2,6,7,8], 1))
print(maxK([2,6,7,8], 2))
print(maxK([2,6,7,8], 3))
print(maxK([2,6,7,8], 4))
print(maxK([1,2,3,4,5], 5))
# + id="Hux4USM0t1gM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="246c7791-e973-4668-ee5b-f550e786b6d7"
# stack
# O(N)
def maxK(arr, k):
stack = []
n = len(arr)
for idx, v in enumerate(arr):
while n - idx + len(stack) > k and stack and stack[-1] < v:
stack.pop()
if len(stack) < k:
stack += v,
return stack
print(maxK([2,4,2,1], 1))
print(maxK([2,4,2,1], 2))
print(maxK([2,4,2,1], 3))
print(maxK([2,4,2,1], 4))
print(maxK([2,6,7,8], 1))
print(maxK([2,6,7,8], 2))
print(maxK([2,6,7,8], 3))
print(maxK([2,6,7,8], 4))
print(maxK([1,2,3,4,5], 5))
| Stack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Configure Jupyter so figures appear in the notebook
# %matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
# %config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim library
from modsim import *
import matplotlib.pyplot as plt
# -
# ## HIV Model
def make_system(gamma, mu, tau, beta, alpha, sigma, delta, pi):
"""Make a system object for the SIR model.
beta:
gamma: rate of new activated, uninfected CD4 lymphocytes
tau: proportion of lymphocytes that are activated
mu: HIV independent death rate
alpha: rate at which latently infected cells turn into activated infected cells
delta: rate of removal of infected cells
sigma: rate of removal of virions
pi : the rate at which free virions are produced
returns: System object
Activated, uninfected = R, latently infected = L, actively infected = E, free virions = V
"""
init = State(R=200, L=0, E=0, V=.00000004)
t0 = 0
t_end = 12000
dt = 0.01
return System(init=init,beta=beta, gamma=gamma, tau=tau, mu=mu, alpha=alpha,
delta=delta, sigma=sigma, pi=pi, t0=t0, t_end= t_end, dt = dt)
def update_func(state, t, system):
"""Update the model.
returns: State ()
new_lymph = (E * system.gamma)
lymph_death = R - mu
Latently_infected = system.beta * V *0.1
actively_infected = (system.alpha * Latently_infected)
actively_infected_fromR = (system.alpha * R)
free_virions = (system.pi * v) - (system.sigma*v) - infected
"""
R, L, E, V = state
dRdt = (system.tau*system.gamma) - (system.mu*R) - (system.beta*R*V)
dLdt = (0.1*system.beta*R*V)-(system.mu*L) - (system.alpha*L)
dEdt = ((1-0.1)*(system.beta*R*V)) + (system.alpha*L) - (system.delta*E)
dVdt = (system.pi*E)-(system.sigma*V)
R += dRdt*system.dt
L += dLdt*system.dt
E += dEdt*system.dt
V += dVdt*system.dt
return State(R=R, L=L, E=E, V=V)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.row[t0] = init
for t in linrange(t0, t_end):
frame.row[t+1] = update_func(frame.row[t], t, system)
return frame
def plot_results(R, L, E, V):
"""Plot the results of a SIR model.
S: TimeSeries
I: TimeSeries
R: TimeSeries
"""
plot(L, '--', label='L')
plot(R, '-', label='R')
plot(E, '-', label='E')
plot(V, '-', label='V')
decorate(xlabel='Time (days)',
ylabel='Population')
system = make_system(1.36, .00136, .2, .00027, .036, 2, 0.33, 100)
results = run_simulation(system, update_func)
plot_results(results.R, results.L, results.E, results.V)
plt.semilogx(results.R, '-', label='R')
plt.semilogx(results.E, '-', label='E')
plt.semilogx(results.V, '-', label='V')
decorate(xlabel='Time (days) (on a log scale)',
ylabel='Population')
#
| code/hiv_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py3]
# language: python
# name: conda-env-py3-py
# ---
# # How to use `autoreload`
#
# I have been confused on how to use [`autoreload`](http://ipython.readthedocs.io/en/stable/config/extensions/autoreload.html) IPython extension for a long time. The documentation simply wasn't clear to me. Or, rather, it seemed clear, but then I was surprised by the behavior.
#
#
# After submitting a bug request w/ [<NAME>](https://github.com/dmargala) (we were confused together), we got a [response](https://github.com/ipython/ipython/issues/10395#issuecomment-286467407) that helped to clear things up.
#
# I've updated this with my new understanding. Hopefully this helps anyone else who is confused.
# + deletable=true editable=true
import os
import sys
import time
sys.path.append("..")
# %reload_ext autoreload
# + [markdown] deletable=true editable=true
# Create a simple packe with a few simple modules that we will update.
# -
directory = "../examplepackage/"
if not os.path.exists(directory):
os.makedirs(directory)
# + deletable=true editable=true
# %%writefile ../examplepackage/neato.py
def torpedo():
print('First module modification 0!')
# +
# %%writefile ../examplepackage/neato2.py
def torpedo2():
print('Second module modification 0!')
# + deletable=true editable=true
# %%writefile ../examplepackage/neato3.py
def torpedo3():
print('Third module modification 0!')
# + deletable=true editable=true
# when hitting 'run all' this needs a short delay (probable race condition).
time.sleep(1.5)
# + [markdown] deletable=true editable=true
# # `%autoreload 1`
#
# The docs say:
#
# ```
# # # %autoreload 1
#
# Reload all modules imported with %aimport every time before executing the Python code typed.
# ```
# -
import examplepackage.neato
import examplepackage.neato2
import examplepackage.neato3
# + deletable=true editable=true
# %autoreload 1
# %aimport examplepackage
# -
# You might think that importing `examplepackage` would result in that package being auto-reloaded if you updated code inside of it. You'd be wrong. Follow along!
examplepackage.neato.torpedo()
# + deletable=true editable=true
examplepackage.neato2.torpedo2()
# + deletable=true editable=true
examplepackage.neato3.torpedo3()
# + deletable=true editable=true
# %%writefile ../examplepackage/neato.py
def torpedo():
print('First module modification 1')
# + deletable=true editable=true
# %%writefile ../examplepackage/neato2.py
def torpedo2():
print('Second module modification 1')
# + deletable=true editable=true
# %%writefile ../examplepackage/neato3.py
def torpedo3():
print('Third module modification 1!')
# + deletable=true editable=true
# when hitting 'run all' this needs a short delay (probable race condition).
time.sleep(1.5)
# + deletable=true editable=true
examplepackage.neato.torpedo()
# + deletable=true editable=true
examplepackage.neato2.torpedo2()
# + deletable=true editable=true
examplepackage.neato3.torpedo3()
# -
# Nothing is updated. You have to import the module explicitly like:
# + deletable=true editable=true
# %autoreload 1
# %aimport examplepackage.neato
# + deletable=true editable=true
examplepackage.neato.torpedo()
# + deletable=true editable=true
examplepackage.neato2.torpedo2()
# + deletable=true editable=true
examplepackage.neato3.torpedo3()
# -
# # `%autoreload 2`
#
# The docs say:
#
# ```
# # %autoreload 2
#
# Reload all modules (except those excluded by %aimport) every time before executing the Python code typed.
# ```
#
# I read this as "if you set `%autoreload 2`, then it will reload all modules except whatever you `%aimport examplepackage.module`". This is not how it works. When using `%aimport` you also have to flag it with a `-`. See below.
# %autoreload 2
# %aimport examplepackage.neato
# %aimport -examplepackage.neato2
examplepackage.neato.torpedo()
examplepackage.neato2.torpedo2()
# + deletable=true editable=true
examplepackage.neato3.torpedo3()
# +
# %%writefile ../examplepackage/neato.py
def torpedo():
print('First module modification 2!')
# +
# %%writefile ../examplepackage/neato2.py
def torpedo2():
print('Second module modification 2!')
# + deletable=true editable=true
# %%writefile ../examplepackage/neato3.py
def torpedo3():
print('Third module modification 2!')
# -
# when hitting 'run all' this needs a short delay (race condition).
time.sleep(1.5)
examplepackage.neato.torpedo()
examplepackage.neato2.torpedo2()
# + deletable=true editable=true
examplepackage.neato3.torpedo3()
# -
| notebooks/autoreload-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generation of Synthetic Sitelle Data
#
# The problem: How do we get the velocity component of the emission from the Sitelle spectra? This is needed to apply an ORCS fit...
#
# What are we going to do?
# - Generate Synthetic Sitelle Data for SN3 and the Signals Observation Parameters. We will vary the theta value (offset of pixel on chip), velocity, and broadening components for our five primary emission lines.
# - Apply a CNN to the data to try and learn the values
import orb.fit
import pylab as pl
import numpy as np
from orb.core import Lines
import random
from astropy.io import fits
import datetime
from tqdm import tqdm_notebook as tqdm
import pymysql
import pandas as pd
import matplotlib.pyplot as plt
# Set Input Parameters
output_dir = 'Data/'
# Set observation parameters
step = 2943
order = 8
resolution = 5000
vel_num = 5000 # Number of Velocity Values Sampled
broad_num = 1000 # Number of Broadening Values Sampled
theta_num = 1 # Number of Theta Values Sampled
num_syn = 10000 # Number of Synthetic Spectra
# Sample theta parameter
thetas_ = np.random.uniform(11.96,11.96,theta_num)#11.8,19.6,theta_num)
# Sample velocity
vel_ = np.random.uniform(-200,500,vel_num)
# Sample broadening
broad_ = np.random.uniform(10,200,broad_num)
# Same resolution
res_ = np.random.uniform(resolution-200, resolution, 200)
# Now we need to get our emission lines
halpha_cm1 = Lines().get_line_cm1('Halpha')
NII6548_cm1 = Lines().get_line_cm1('[NII]6548')
NII6583_cm1 = Lines().get_line_cm1('[NII]6583')
SII6716_cm1 = Lines().get_line_cm1('[SII]6716')
SII6731_cm1 = Lines().get_line_cm1('[SII]6731')
# +
# We must alo get our flux values from 3mdb
# First we load in the parameters needed to login to the sql database
MdB_HOST='3mdb.astro.unam.mx'
MdB_USER='OVN_user'
MdB_PASSWD='<PASSWORD>'
MdB_PORT='3306'
MdB_DBs='3MdBs'
MdB_DBp='3MdB'
MdB_DB_17='3MdB_17'
# Now we connect to the database
co = pymysql.connect(host=MdB_HOST, db=MdB_DB_17, user=MdB_USER, passwd=MdB_PASSWD)
# Now we get the lines we want
ampls = pd.read_sql("select H__1_656281A as h1, N__2_654805A as n1, N__2_658345A as n2, \
S__2_673082A as s1, S__2_671644A as s2, \
com1 as U, com2 as gf, com4 as ab \
from tab_17 \
where ref = 'BOND'"
, con=co)
# sims = pd.read_sql("SELECT ref FROM tab", con=co)
# sims['ref'].unique()
# -
sim_vals = ampls.iloc[1]
axis_corr = 1 / np.cos(np.deg2rad(11.96))
spectrum = orb.fit.create_cm1_lines_model([halpha_cm1], [sim_vals['h1']/sim_vals['h1']],
step, order, resolution, 11.96, fmodel='sincgauss',
sigma=1, vel=100)
spectrum_axis = orb.utils.spectrum.create_cm1_axis(np.size(spectrum), step, order, corr=axis_corr)
plt.plot(spectrum_axis, spectrum)
plt.xlim(15200, 15300)
len(ampls)
# We now can model the lines. For the moment, we will assume all lines have the same velocity and broadening
# Do this for randomized combinations of vel_ and broad_
for spec_ct in range(1):
if spec_ct%1000 == 0:
print("We are on spectrum number %i"%spec_ct)
pick_new = True
# Randomly select velocity and broadening parameter and theta
velocity = random.choice(vel_)
broadening = random.choice(broad_)
resolution = random.choice(res_)
theta = 11.96#random.choice(thetas_)
axis_corr = 1 / np.cos(np.deg2rad(theta))
# Randomly Select a M3db simulation
#while pick_new:
# Only pick simulation if the n1 and s1 lines are detectable
while pick_new:
sim_num = random.randint(0,len(ampls)-1)
#print(sim_num)
sim_vals = ampls.iloc[sim_num]
#print(sim_vals['h1']/sim_vals['n1'], sim_vals['h1']/sim_vals['s1'])
if ((sim_vals['h1']/sim_vals['n1'] < 5) and (sim_vals['h1']/sim_vals['s1'] < 8)):
pick_new = False
# Next we are going to normalize so that the max value is 1
max_val = sim_vals[['h1','n1','n2','s1','s2']].max()
#min_val = sim_vals[['h1','n1','n2','s1','s2']].min()
# Now add all of the lines...
spectrum = orb.fit.create_cm1_lines_model([halpha_cm1], [sim_vals['h1']/sim_vals['h1']],
step, order, resolution, theta, fmodel='sinc',
sigma=broadening, vel=velocity)
spectrum += orb.fit.create_cm1_lines_model([NII6548_cm1], [sim_vals['n1']/sim_vals['h1']],
step, order, resolution, theta, fmodel='sinc',
sigma=broadening, vel=velocity)
spectrum += orb.fit.create_cm1_lines_model([NII6583_cm1], [sim_vals['n2']/sim_vals['h1']],
step, order, resolution, theta, fmodel='sinc',
sigma=broadening, vel=velocity)
spectrum += orb.fit.create_cm1_lines_model([SII6716_cm1], [sim_vals['s1']/sim_vals['h1']],
step, order, resolution, theta, fmodel='sinc',
sigma=broadening, vel=velocity)
spectrum += orb.fit.create_cm1_lines_model([SII6731_cm1], [sim_vals['s2']/sim_vals['h1']],
step, order, resolution, theta, fmodel='sinc',
sigma=broadening, vel=velocity)
# We now add noise
SNR = np.random.uniform(25,30)
spectrum += np.random.normal(0.0,1.0/SNR,spectrum.shape)
spectrum_axis = orb.utils.spectrum.create_cm1_axis(np.size(spectrum), step, order, corr=axis_corr)
min_ = np.argmin(np.abs(np.array(spectrum_axis)-14400))
max_ = np.argmin(np.abs(np.array(spectrum_axis)-15700))
spectrum = spectrum[min_:max_] ## at R = 5000 -> spectrum[214:558]
spectrum_axis = spectrum_axis[min_:max_]
# Normalize Spectrum Values
spec_max = np.max(spectrum)
spectrum = [spec_/spec_max for spec_ in spectrum]
pl.clf()
pl.plot(spectrum_axis, spectrum)
#pl.savefig('Plots/spec_%i.png'%spec_ct)
# Gather information to make Fits file
col1 = fits.Column(name='Wavenumber', format='E', array=spectrum_axis)
col2 = fits.Column(name='Flux', format='E', array=spectrum)
cols = fits.ColDefs([col1, col2])
hdu = fits.BinTableHDU.from_columns(cols)
# Header info
hdr = fits.Header()
hdr['OBSERVER'] = '<NAME>'
hdr['COMMENT'] = "Synthetic Spectrum Number: %i"%spec_ct
hdr['TIME'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
hdr['VELOCITY'] = velocity
hdr['BROADEN'] = broadening
hdr['THETA'] = theta
hdr['RES'] = resolution
hdr['SIM'] = 'BOND'
hdr['SNR'] = SNR
hdr['SIM_NUM'] = sim_num
empty_primary = fits.PrimaryHDU(header=hdr)
hdul = fits.HDUList([empty_primary, hdu])
hdul.writeto(output_dir+'Spectrum_%i.fits'%spec_ct, overwrite=True)
# ## Reference Spectrum
| Notebooks/.ipynb_checkpoints/Sitelle-Generation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import os, math
import numpy as np, pandas as pd
import matplotlib.pyplot as plt, seaborn as sns
from pandas_summary import DataFrameSummary
from tqdm import tqdm, tqdm_notebook
from pathlib import Path
pd.set_option('display.max_columns', 1000)
pd.set_option('display.max_rows', 400)
sns.set()
os.chdir('../..')
from src import utils
# -
DATA = Path('data')
RAW = DATA/'raw'
INTERIM = DATA/'interim'
PROCESSED = DATA/'processed'
challenge = pd.read_csv(RAW/'Challenge_20180423.csv', low_memory=False)
customer = pd.read_csv(RAW/'Customer.csv', low_memory=False)
isin = pd.read_csv(RAW/'Isin.csv', low_memory=False)
submission = pd.read_csv(RAW/'sample_submission.csv', low_memory=False)
trade = pd.read_csv(RAW/'Trade.csv', low_memory=False)
val_friday = pd.read_feather(PROCESSED/'val_180420.feather')
from src.utils import make_val_set
val_thursday = make_val_set(trade[trade.TradeDateKey==20180419], challenge)
val_friday.shape, val_thursday.shape
trades_2018 = trade[trade.TradeDateKey>20180000]
trades_2018.shape
train_friday = trades_2018[trades_2018.TradeDateKey<20180420]
train_thursday = trades_2018[trades_2018.TradeDateKey<20180419]
train_friday = train_friday[train_friday.CustomerInterest == 1]
train_thursday = train_thursday[train_thursday.CustomerInterest == 1]
train_friday.head()
train_friday.shape, train_thursday.shape
last_friday = pd.Series(train_friday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['CustomerIdx', 'IsinIdx', 'BuySell'], keep='first')
.set_index(['CustomerIdx', 'IsinIdx', 'BuySell'])['TradeDateKey']) \
.to_dict()
last_thursday = pd.Series(train_thursday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['CustomerIdx', 'IsinIdx', 'BuySell'], keep='first')
.set_index(['CustomerIdx', 'IsinIdx', 'BuySell'])['TradeDateKey']) \
.to_dict()
from datetime import date
def date_diff(d1, d2):
return (date(d1 // 10000, (d1 // 100) % 100, d1 % 100) - \
date(d2 // 10000, (d2 // 100) % 100, d2 % 100)).days
val_friday['DaysSinceBuySell'] = val_friday.apply(lambda r: date_diff(r['TradeDateKey'],
last_friday.get((r['CustomerIdx'], r['IsinIdx'], r['BuySell']),
20170701)), axis=1)
val_thursday['DaysSinceBuySell'] = val_thursday.apply(lambda r: date_diff(r['TradeDateKey'],
last_thursday.get((r['CustomerIdx'], r['IsinIdx'], r['BuySell']),
20170701)), axis=1)
val_friday.head()
val_friday[(val_friday.CustomerInterest==0) & (val_friday.DaysSinceBuySell < 293)]['DaysSinceBuySell'].describe()
val_friday[(val_friday.CustomerInterest==1) & (val_friday.DaysSinceBuySell < 293)]['DaysSinceBuySell'].describe()
# %%time
last_friday = pd.Series(train_friday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['CustomerIdx', 'IsinIdx'], keep='first')
.set_index(['CustomerIdx', 'IsinIdx'])['TradeDateKey']) \
.to_dict()
last_thursday = pd.Series(train_thursday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['CustomerIdx', 'IsinIdx'], keep='first')
.set_index(['CustomerIdx', 'IsinIdx'])['TradeDateKey']) \
.to_dict()
val_friday['DaysSinceTransaction'] = val_friday.apply(lambda r: date_diff(r['TradeDateKey'],
last_friday.get((r['CustomerIdx'], r['IsinIdx']),
20170701)), axis=1)
val_thursday['DaysSinceTransaction'] = val_thursday.apply(lambda r: date_diff(r['TradeDateKey'],
last_thursday.get((r['CustomerIdx'], r['IsinIdx']),
20170701)), axis=1)
# %%time
last_friday = pd.Series(train_friday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['CustomerIdx'], keep='first')
.set_index(['CustomerIdx'])['TradeDateKey']) \
.to_dict()
last_thursday = pd.Series(train_thursday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['CustomerIdx'], keep='first')
.set_index(['CustomerIdx'])['TradeDateKey']) \
.to_dict()
val_friday['DaysSinceCustomerActivity'] = val_friday.apply(lambda r: date_diff(r['TradeDateKey'],
last_friday.get((r['CustomerIdx']),
20170701)), axis=1)
val_thursday['DaysSinceCustomerActivity'] = val_thursday.apply(lambda r: date_diff(r['TradeDateKey'],
last_thursday.get((r['CustomerIdx']),
20170701)), axis=1)
# %%time
last_friday = pd.Series(train_friday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['IsinIdx'], keep='first')
.set_index(['IsinIdx'])['TradeDateKey']) \
.to_dict()
last_thursday = pd.Series(train_thursday.sort_values('TradeDateKey', ascending=False) \
.drop_duplicates(['IsinIdx'], keep='first')
.set_index(['IsinIdx'])['TradeDateKey']) \
.to_dict()
val_friday['DaysSinceBondActivity'] = val_friday.apply(lambda r: date_diff(r['TradeDateKey'],
last_friday.get((r['IsinIdx']),
20170701)), axis=1)
val_thursday['DaysSinceBondActivity'] = val_thursday.apply(lambda r: date_diff(r['TradeDateKey'],
last_thursday.get((r['IsinIdx']),
20170701)), axis=1)
# maybe log for smoothness
pd.concat([val_friday[(val_friday.CustomerInterest==1) & (val_friday.DaysSinceBuySell < 293)]['DaysSinceBuySell'].describe(),
val_friday[(val_friday.CustomerInterest==1) & (val_friday.DaysSinceTransaction < 293)]['DaysSinceTransaction'].describe(),
val_friday[(val_friday.CustomerInterest==1) & (val_friday.DaysSinceCustomerActivity < 293)]['DaysSinceCustomerActivity'].describe(),
val_friday[(val_friday.CustomerInterest==1) & (val_friday.DaysSinceBondActivity < 293)]['DaysSinceBondActivity'].describe()], axis=1)
val_friday.to_feather(PROCESSED/'friday_datediffs.feather')
val_thursday.to_feather(PROCESSED/'thursday_datediffs.feather')
from src.utils import add_datediffs
# %%time
df_test = make_val_set(trade[trade.TradeDateKey==20180420], challenge)
# %%time
add_datediffs(df_test, trade[trade.TradeDateKey>20180000])
val_friday.head()
test = challenge.copy()
test['TradeDateKey'] = test['DateKey']
# %%time
add_datediffs(test, trade[trade.TradeDateKey>20180000])
test.head()
test.to_feather(PROCESSED/'test_datediffs.feather')
| notebooks/robert/RA-04-Make-Friday-Val-Set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python385jvsc74a57bd0916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1
# ---
# Goal of this notebook:
# - Implement a system to represent the properties of an element with adjacent vertices storing the properties
# - Each "property vertex" stores timestamp of property, property #, and whether the corresponding element was active at the time
# - Show how, given a time, a vertex can be queried to determine whether it was active during that time
# +
# Jupyter notebook needs this or else it will crash
import nest_asyncio
nest_asyncio.apply()
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
# Instantiate a Gremlin Graph
graph = Graph()
# Connect to the server, instantiate traversal of graph.
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin','g'))
# Get the vertices of the graph as a list, and print them.
print(g.V().toList())
# -
# Drop all vertices of the graph.
g.V().drop().iterate()
# +
g.addV().property('name', 'ANT000').next()
print(g.V().valueMap().toList())
traversal = g.V().has('name', 'ANT000')
print(traversal.outE().count().next())
# +
def add_property(traversal: any, time: float, active: bool) -> None:
"""
Add a property vertex to the vertex contained within traversal.next(), and connect it by an edge labelled 'had_property' from traversal.next() to the property vertex.
<traversal> should be a traversal such that traversal.next() is a Vertex.
Traversals
"""
# Vertex we are looking at.
v = traversal.clone().next()
# Number of properties that v has so far.
props_so_far = traversal.clone().outE('had_property').count().next()
p = g.addV() # Instantiate a new traversal to add a vertex to the graph. NOTE THAT THIS DOES NOT ACTUALLY ADD THE VERTEX TO THE GRAPH.
p.property('prop_ind', props_so_far) # Property index
p.property('time', time) # Set the timestamp of the property
p.property('active', active) # Set the active boolean of the property
g.V(v).addE('had_property').to(p.next()).next() # p.next() terminates the traversal and adds the vertex, and addE(...).to(...) adds an edge to the new vertex from the element vertex
# -
add_property(traversal=g.V().has('name', 'ANT000'), time=2, active=True)
add_property(traversal=g.V().has('name', 'ANT000'), time=4, active=False)
add_property(traversal=g.V().has('name', 'ANT000'), time=6, active=False)
add_property(traversal=g.V().has('name', 'ANT000'), time=8, active=True)
add_property(traversal=g.V().has('name', 'ANT000'), time=10, active=True)
add_property(traversal=g.V().has('name', 'ANT000'), time=12, active=True)
# +
# .order().by('time) sorts the vertices by their 'time' property.
print(g.V().has('name', 'ANT000').out('had_property').order().by('time').valueMap().toList())
print(g.V().has('name', 'ANT000').out('had_property').order().by('time').values('time').toList())
# +
def get_next_smallest_index(val, lst) -> int:
"""
Given a sorted list lst in increasing order and val where val is of the same type as all elements in lst,
do a binary search and return the lower bound on the index (if the exact value is not found).
"""
l, r = 0, len(lst) - 1
while l <= r:
mid = l + (r - l) // 2
if val > lst[mid]:
l = mid + 1
elif val < lst[mid]:
r = mid - 1
else:
return mid
return l - 1
lst = [1, 3, 5, 7]
for i in range(10):
print(i, lst[get_next_smallest_index(i, lst)])
# +
# Here's how checking whether a vertex was active or not at a time COULD work.
def check_active_at_time_1(name: str, time: float) -> bool:
"""
Check whether or not the vertex with name <name> was active at time <time>.
"""
lst = g.V().has('name', name).out('had_property').order().by('time').values('time').toList()
index = get_next_smallest_index(time, lst)
if index == -1:
return False
else:
return g.V().has('name', name).out('had_property').has('prop_ind', index).values('active').next()
def check_active_at_time_2(name: str, time: float) -> bool:
"""
Check whether or not the vertex with name <name> was active at time <time>.
Only query the graph ONCE at the start, then do everything else in Python.
"""
# Get a list of the value maps of all adjacent properties connected to vertex by edge 'had_property', sorted.
adj = g.V().has('name', name).out('had_property').order().by('time').valueMap().toList()
# Extract the times
lst = [adj[i]['time'][0] for i in range(0, len(adj))]
index = get_next_smallest_index(time, lst)
if index == -1:
return False
else:
return adj[index]['active'][0]
def dummy() -> bool:
"""
Imitate what the other functions do, but don't query the graph.
"""
adj = [
{'time': [2], 'active': [True]}, {'time': [3], 'active': [True]},
{'time': [4], 'active': [False]}, {'time': [5], 'active': [True]},
{'time': [6], 'active': [False]}, {'time': [7], 'active': [True]},
{'time': [8], 'active': [False]}, {'time': [9], 'active': [True]},]
# Extract the times
lst = [adj[i]['time'][0] for i in range(0, len(adj))]
index = get_next_smallest_index(6, lst)
if index == -1:
return False
else:
return adj[index]['active'][0]
# + tags=[]
from timeit import Timer
name = 'ANT000'
t = Timer('check_active_at_time_1("ANT000", 2)','from __main__ import check_active_at_time_1')
print("Querying graph several times per call:", t.timeit(number=100))
t = Timer('check_active_at_time_2("ANT000", 2)','from __main__ import check_active_at_time_2')
print("Querying graph once per call:", t.timeit(number=100))
t = Timer('dummy()','from __main__ import dummy')
print("Performing the same operations but not querying graph:", t.timeit(number=100))
# +
# Timing querying the graph
from timeit import Timer
t = Timer("g.V().has('name', 'ANT000').count().next()",'from __main__ import g')
print("Querying the graph:", t.timeit(number=100))
# -
# ## Conclusion: querying the graph from Python TAKES A LONG TIME!!!
#
# Another note: ideally, you wouldn't query the graph for every vertex, but rather get a 'path of vertices' from JanusGraph along with their value map, then do it in Python.
| project/property_vertices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import arviz as az
import pymc3 as pm
# import arviz.labels as azl
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import pickle
import matplotlib as mpl
import warnings
import functions as funcs
import numpy as np
warnings.simplefilter(action='ignore', category=FutureWarning)
# -
def R(x):
return 1/x
# +
features = ['dihedrals', 'distances_linear', 'distances_logistic']
feature_names = ['Dihedrals', 'Distances', 'logistic(Distances)']
inp_dir = 'sensitivities_exp_log_outcome'
# +
relevance_vars = ["l_tica__lag_s","l_tica__dim_s", "l_cluster__k_s", "l_distances__centre_s", "l_distances__steepness_s"]
info_share_vars =["l_distances__scheme[closest-heavy]_s", "l_distances__scheme[ca]_s"]
# +
def make_labeller(labels):
var_name_map={"l_cluster__k_s": r"$n$",
"l_tica__dim_s": r"$m$",
"l_tica__lag_s": r"$\tau_{tICA}$",
"l_distances__scheme[closest-heavy]_s": "$X-X$",
"l_distances__scheme[ca]_s": r"$C\alpha-C\alpha$",
"l_distances__centre_s": r"$cent.$",
"l_distances__steepness_s": r"$steep.$"}
def labeller(pos, x):
label = labels[x]
old_text = label.get_text()
if '__' in old_text:
var = old_text.split(':')[-1].strip()
new_var = var_name_map[var]
new_txt = f"{old_text.split(':')[0]} : {new_var}"
else:
new_txt = old_text
return new_txt
return labeller
def get_model_names(paths):
model_names = []
for x in paths:
text = np.array(str(x.stem).split('_'))
outcome_ix = np.where((text=='timescale') | (text == 'vamp'))[0][0]
outcome = text[outcome_ix]
if outcome == 'timescale':
num_its = text[outcome_ix+2]
outcome = f'$t_{{{num_its}}}$'
if outcome == 'vamp':
outcome = outcome.upper()
model_names.append(outcome)
return model_names
def get_traces(paths):
all_models = [pickle.load(x.open('rb')) for x in paths]
all_traces = [x['trace'] for x in all_models]
all_traces = [az.from_pymc3(trace=x)['posterior'] for x in all_traces]
return all_traces
def add_null_variables(trace, var_names):
for var_name in var_names:
trace = trace.assign({var_name: lambda x: x.sigma_n*0})
return trace
def add_variables_to_traces(traces, all_var_names):
tmp = []
for trace in traces:
var_names = set([x for x in trace.data_vars.keys()])
missing_vars = list(set(all_var_names) - var_names)
trace = add_null_variables(trace, missing_vars)
tmp.append(trace)
return tmp
# -
# +
for protein in ['uvf']: #funcs.PROTEIN_DIRS[:8]:
with sns.plotting_context('paper', font_scale=1):
annotate_kws = dict(xycoords='axes fraction', va='top', ha='left',
bbox={'facecolor': 'w', 'alpha': 0.5, 'edgecolor': 'w'})
fig = plt.figure(constrained_layout=True, figsize=(6, 8))
widths = [1, 1, 1]
heights = [4, 2]
spec = fig.add_gridspec(ncols=len(widths), nrows=len(heights), width_ratios=widths,
height_ratios=heights)
axes = np.empty((len(heights), len(widths)), dtype=object)
for i in range(len(heights)):
for j in range(len(widths)):
if j > 0:
axes[i, j] = fig.add_subplot(spec[i, j], sharey=axes[i, 0], sharex=axes[i, 0])
plt.setp(axes[i, j].get_yticklabels(), visible=False)
if j == 0:
axes[i,j] = fig.add_subplot(spec[i, j])
LETTERS = ['a', 'b', 'c', '', 'd', 'e', 'f']
let_count = 0
for feat_ix, feature in enumerate(features):
# Get the paths
timescale_paths = list(Path(inp_dir).glob(f"{protein}_{feature}_timescale_its*"))
timescale_paths.sort()
vamp_path = list(Path(inp_dir).glob(f"{protein}_{feature}_vamp_sensitivity*.pkl"))[0]
all_model_paths = [vamp_path]+timescale_paths
if len(timescale_paths) > 0:
# print(protein, feature)
# Get model names
model_names = get_model_names(all_model_paths)
all_traces = get_traces(all_model_paths)
vars_to_add = relevance_vars+info_share_vars
all_traces = add_variables_to_traces(all_traces, vars_to_add)
# Plot relevances:
ax = axes[0, feat_ix]
pm.plot_forest(all_traces, model_names=model_names,
combined=True, linewidth=2,
var_names=relevance_vars, transform=R, ax=ax, hdi_prob=0.95)
ax.grid()
ax.set_title(None)
ax.set_xlabel("Relevance")
ax.annotate(text=f"({LETTERS[feat_ix]}) {feature_names[feat_ix]}",xy=(0.01, 0.98), **annotate_kws)
let_count += 1
ax.set_xscale('log')
# Adjust axes labels
if feat_ix == 2:
labels = axes[0, 0].yaxis.get_majorticklabels()
labeller = make_labeller(labels)
ax.yaxis.set_major_formatter(labeller)
# Plot infor sharing:
ax = axes[1, feat_ix]
pm.plot_forest(all_traces, model_names=model_names,
combined=True, linewidth=2,
var_names=info_share_vars, ax=ax, hdi_prob=0.95)
ax.grid()
ax.set_xlabel("Information sharing")
if feature_names[feat_ix] != 'Dihedrals':
ax.annotate(text=f"({LETTERS[feat_ix+len(features)]}) {feature_names[feat_ix]}",xy=(0.01, 0.97), **annotate_kws)
ax.set_title(None)
ax.set_xscale('log')
# Plot information sharing
# Adjust axes labels
if feat_ix == 2:
labels = axes[1, 0].yaxis.get_majorticklabels()
labeller = make_labeller(labels)
ax.yaxis.set_major_formatter(labeller)
# plt.tight_layout()
plt.savefig(funcs.FIG_DIR.joinpath(f'sensitivities/{protein}_sensitivity.pdf'), bbox_inches='tight')
# -
all_model_paths
all_traces[0]
| results/sensitivity_charts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install python-docx
from docx import Document
# ls Downloads/*.docx
document=Document('Downloads/bios.docx')
type(document)
document.paragraphs[0].text
document.paragraphs[2].text
indx=0
for para in document.paragraphs:
indx+=1
if (len(para.text)>0):
print("\n paragraph",indx,"is")
print(para.text)
| FinalProject_docxfile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
########################################################################################################################
# Filename: Transformer_Models_Training.ipynb
#
# Purpose: Multi-label Text-categorization via Transformer networks -- Train networks/generate results
# Author(s): Bobby (Robert) Lumpkin
#
# Library Dependencies: numpy, pandas, scikit-learn, skmultilearn, joblib, os, sys, threshold_learning, matplotlib
########################################################################################################################
| Deep Learning Models/Transformer Models/.ipynb_checkpoints/Transformer_Models_Training-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (py27_pyro)
# language: python
# name: py27
# ---
# +
# %load_ext autoreload
# %autoreload 2
import pydrake
import torch
import pyro
from pyro import poutine
import time
import matplotlib.pyplot as plt
import scene_generation.data.dataset_utils as dataset_utils
from scene_generation.models.planar_multi_object_multi_class_2 import MultiObjectMultiClassModel
# +
import numpy as np
DATA_DIR_TRAIN = "/home/gizatt/projects/scene_generation/data/planar_bin/planar_bin_static_scenes_geometric_train/"
DATA_DIR_TEST = "/home/gizatt/projects/scene_generation/data/planar_bin/planar_bin_static_scenes_geometric_test/"
#DATA_FILE = "/home/gizatt/projects/scene_generation/data/planar_bin/planar_bin_static_scenes.yaml"
scenes_dataset_yaml = dataset_utils.ScenesDataset(DATA_DIR_TRAIN)
params_by_object_class = {}
for env_i in range(len(scenes_dataset_yaml)):
env = scenes_dataset_yaml[env_i]
for obj_i in range(env["n_objects"]):
obj_yaml = env["obj_%04d" % obj_i]
class_name = obj_yaml["class"]
if class_name not in params_by_object_class.keys():
params_by_object_class[class_name] = []
params_by_object_class[class_name].append(obj_yaml["pose"] + obj_yaml["params"])
for object_name in params_by_object_class.keys():
print object_name, ": "
params = np.stack(params_by_object_class[object_name])
print params.shape
print "means: ", np.mean(params, axis=0)
print "vars: ", np.std(params, axis=0)
# +
scenes_dataset = dataset_utils.ScenesDatasetVectorized(DATA_DIR_TRAIN, max_num_objects=20)
data = scenes_dataset.get_full_dataset()
scenes_dataset_test = dataset_utils.ScenesDatasetVectorized(DATA_DIR_TEST, max_num_objects=20)
data_test = scenes_dataset_test.get_full_dataset()
print "Train # batches: ", data.batch_size
print "Test # batches: ", data_test.batch_size
# +
# Rig for SVI, running with AutoDelta, which converges fairly reliably but
# confuses the variances
from collections import defaultdict
from torch.distributions import constraints
from pyro.infer import Trace_ELBO, SVI
from pyro.contrib.autoguide import AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal, AutoGuideList
import datetime
from tensorboardX import SummaryWriter
pyro.enable_validation(True)
log_dir = "/home/gizatt/projects/scene_generation/models/runs/pmomc2/" + datetime.datetime.now().strftime(
"%Y-%m-%d-%H-%m-%s")
writer = SummaryWriter(log_dir)
def write_np_array(writer, name, x, i):
for yi, y in enumerate(x):
writer.add_scalar(name + "/%d" % yi, y, i)
print "All params: ", pyro.get_param_store().get_all_param_names()
interesting_params = ["keep_going_weights",
"new_class_weights",
"params_means_0", "params_means_1",
"params_vars_0", "params_vars_1"]
model = MultiObjectMultiClassModel(scenes_dataset)
pyro.clear_param_store()
pyro.get_param_store().save("planar_multi_object_multi_class_2_simple_untrained.pyro")
guide = model.guide
optim = pyro.optim.Adam({'lr': 0.1, 'betas': [0.8, 0.99]})
elbo = Trace_ELBO(max_plate_nesting=1)
svi = SVI(model.model, guide, optim, loss=elbo)
losses = []
losses_test = []
snapshots = {}
for i in range(101):
# Guesses on important things:
# Big subsamples appear really important -- I had major loss of
# convergence when using smaller subsample sizes (like ~50).
# Also important: prior on the variance must be REALLY low.
# Otherwise long_box_mean diverges to negative... :(
# I think there's a fundamental problem with variance estimation
# under this guide / with this system -- see the single-box-dataset
# estimates that don't capture the x vs y variance.
loss = svi.step(data, subsample_size=250) / data.batch_size
losses.append(loss)
writer.add_scalar('loss', loss, i)
loss_test = svi.evaluate_loss(data_test, subsample_size=25) / data_test.batch_size
losses_test.append(loss_test)
writer.add_scalar('loss_test', loss_test, i)
for p in pyro.get_param_store().keys():
if p not in snapshots.keys():
snapshots[p] = []
snapshots[p].append(pyro.param(p).cpu().detach().numpy().copy())
for p in interesting_params:
write_np_array(writer, p, snapshots[p][-1], i)
if (i % 10 == 0):
print ".",
if (i % 50 == 0):
print "\n"
for p in interesting_params:
print p, ": ", pyro.param(p).detach().numpy()
print "Done"
# -
plt.figure().set_size_inches(12, 3)
plt.plot(losses, label="train")
plt.plot(losses_test, label="test")
plt.semilogy()
plt.xlabel("epoch")
plt.ylabel("loss (ELBO)")
plt.title("Loses: final [%f train][%f test]" % (np.mean(losses[-20:]), np.mean(losses_test[-20:])))
# Save model
pyro.get_param_store().save("planar_multi_object_multi_class_2_simple_unstructured.pyro")
# Load model
pyro.clear_param_store()
model = MultiObjectMultiClassModel(scenes_dataset)
pyro.get_param_store().load("planar_multi_object_multi_class_2_simple_unstructured.pyro")
# Convert that data back to a YAML environment, which is easier to
# handle.
for i in range(1):
scene_with_most_objects = None
for k in range(1):
generated_data, generated_encodings, generated_contexts = model.model()
scene_yaml = scenes_dataset.convert_vectorized_environment_to_yaml(
generated_data)
if scene_with_most_objects is None or scene_yaml[0]["n_objects"] > scene_with_most_objects["n_objects"]:
scene_with_most_objects = scene_yaml[0]
print scene_with_most_objects
dataset_utils.DrawYamlEnvironment(scene_with_most_objects, "planar_bin")
time.sleep(1.0)
# Convert that data back to a YAML environment, which is easier to
# handle.
generated_n_objs = []
for i in range(200):
generated_data, generated_encodings, generated_contexts = model.model()
scene_yaml = scenes_dataset.convert_vectorized_environment_to_yaml(
generated_data)
generated_n_objs.append(scene_yaml[0]["n_objects"])
print generated_n_objs
plt.hist(generated_n_objs, bins=range(20))
# +
import matplotlib.pyplot as plt
from scipy.stats import norm
class_weight_vals = pyro.param("new_class_weights").detach().numpy().copy()
# Use params_by_object_class from when we loaded dataset.
plt.figure().set_size_inches(6, 3)
plt.subplot(1, 2, 1)
plt.title("Learned distribution")
plt.bar([0, 1], class_weight_vals)
plt.xticks([0, 1], scenes_dataset.class_id_to_name)
plt.ylabel("Prob")
plt.subplot(1, 2, 2)
plt.title("Empirical distribution")
plt.bar([0, 1], [len(params_by_object_class[object_name]) for object_name in scenes_dataset.class_id_to_name])
plt.xticks([0, 1], scenes_dataset.class_id_to_name)
plt.ylabel("Count")
plt.tight_layout()
for object_name in params_by_object_class.keys():
object_id = scenes_dataset.class_name_to_id[object_name]
params_means = pyro.param("params_means_%d" % object_id).detach().numpy()
params_vars = pyro.param("params_vars_%d" % object_id).detach().numpy()
plt.figure().set_size_inches(18, 3)
params = np.stack(params_by_object_class[object_name]).copy()
#params[2, :] = (params[2, :] + 2 * np.pi) % (2 * np.pi)
for k in range(params.shape[1]):
if k >= 3:
param_name = scenes_dataset.params_names_by_class[object_id][k - 3]
else:
param_name = ["x", "z", "theta"][k]
print(object_name + "::" + param_name + ": %2.2f +/- %2.2f" % (params_means[k], params_vars[k]))
plt.subplot(1, params.shape[1], k+1)
plt.title(object_name + "::" + param_name)
plt.ylabel("density")
bins = None
if k == 0:
bins = np.linspace(-1., 1., 100)
elif k == 1:
bins = np.linspace(0., 1., 100)
elif k == 2:
bins = np.linspace(0, 2*np.pi, 200)
x_bins = plt.hist(params[:, k], label="Empirical", density=True, bins=bins)[1]
x_range = np.linspace(x_bins[0], x_bins[-1], 100)
plt.plot(x_range, norm.pdf(x_range, params_means[k], params_vars[k]), label="Learned")
plt.tight_layout()
# -
import scipy as sp
import scipy.stats
# Draw the keep_going CDF, which is the product of all previous keep_going vals.
keep_going_vals = pyro.param("keep_going_weights").detach().numpy().copy()
keep_going_pdf = np.zeros(keep_going_vals.shape[0])
for k in range(0, keep_going_pdf.shape[0]):
keep_going_pdf[k] = np.prod(keep_going_vals[0:k])*(1. - keep_going_vals[k])
plt.figure().set_size_inches(12, 3)
plt.title("Blue: Empirical dist of # objects, Orange: Fit dist of # of objects (geometric)")
plt.xlabel("# of objects")
plt.ylabel("Density")
xbins = plt.hist([p["n_objects"] for p in scenes_dataset_yaml], bins=range(0, 21), density=True)[1]
plt.plot(xbins, sp.stats.geom.pmf(xbins, 1.-keep_going_vals[0]))
plt.xticks(range(20))
plt.tight_layout()
# +
# Assume projection has been done for this trained model and saved to these files...
def generate_params_by_object_class(scenes_dataset_yaml):
params_by_object_class = {}
for env_i in range(len(scenes_dataset_yaml)):
env = scenes_dataset_yaml[env_i]
for obj_i in range(env["n_objects"]):
obj_yaml = env["obj_%04d" % obj_i]
class_name = obj_yaml["class"]
if class_name not in params_by_object_class.keys():
params_by_object_class[class_name] = []
params_by_object_class[class_name].append(obj_yaml["pose"] + obj_yaml["params"])
return params_by_object_class
scenes_dataset_yaml_static = dataset_utils.ScenesDataset(
"/home/gizatt/projects/scene_generation/models/generated_planar_bin_static_scenes_geometric_static.yaml")
params_by_object_class_static = generate_params_by_object_class(scenes_dataset_yaml_static)
scenes_dataset_yaml_nonpen = dataset_utils.ScenesDataset(
"/home/gizatt/projects/scene_generation/models/generated_planar_bin_static_scenes_geometric_nonpen.yaml")
params_by_object_class_nonpen = generate_params_by_object_class(scenes_dataset_yaml_nonpen)
# -
for object_name in params_by_object_class.keys():
object_id = scenes_dataset.class_name_to_id[object_name]
plt.figure().set_size_inches(18, 3)
param_sets_base = [
params_by_object_class[object_name],
#params_by_object_class_nonpen[object_name],
params_by_object_class_static[object_name]
]
#params_sets_names = ["empirical", "nonpenetrating", "static"]
params_sets_names = ["empirical", "static"]
param_sets = []
for params in param_sets_base:
params_stacked = np.stack(params).copy()
#params_stacked[2, :] = (params_stacked[2, :] + 2 * np.pi) % (2 * np.pi)
param_sets.append(params_stacked)
for k in range(param_sets[0].shape[1]):
if k >= 3:
param_name = scenes_dataset.params_names_by_class[object_id][k - 3]
else:
param_name = ["x", "z", "theta"][k]
plt.subplot(1, param_sets[0].shape[1], k+1)
plt.title(object_name + "::" + param_name)
plt.ylabel("density")
bins = None
if k == 0:
bins = np.linspace(-1., 1., 100)
elif k == 1:
bins = np.linspace(0., 1., 100)
elif k == 2:
bins = np.linspace(0, 2*np.pi, 200)
for i, params in enumerate(param_sets):
x_bins = plt.hist(params[:, k], label=params_sets_names[i], density=True, bins=bins, alpha=0.5)[1]
x_range = np.linspace(x_bins[0], x_bins[-1], 100)
plt.legend()
plt.tight_layout()
| notebooks/planar_bin/20190228_MBT_Context_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example 04: Causal discovery within a range of features
#
# -------------------------------------------
#
# ## Overview
#
#
# - AitiaExplorer allows you to set a higher and lower number of features for the causal discovery process.
# - In the example below, we set a number range and AitiaExplorer will start at the lower number of features and run the causal discovery analysis for each number in the range. The best performing results will then be returned.
# - This example is designed to show how AitiaExplorer can help narrow the choices for causal discovery.
# ### Imports
# +
import os
import sys
import math
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pycausal.pycausal import pycausal
from IPython.display import display, HTML
module_path = os.path.abspath(os.path.join('../src'))
if module_path not in sys.path:
sys.path.append(module_path)
from aitia_explorer.app import App
# stop the warning clutter
import warnings
warnings.filterwarnings('ignore')
# -
# ### Set up for causal discovery
#
# - First create an AitiaExplorer instance and using it to load some data into a pandas dataframe.
# - We will be using the [HEPAR II](https://www.bnlearn.com/bnrepository/#hepar2) dataset.
aitia = App()
df = aitia.data.hepar2_10k_data()
df.head()
# ### Set up for causal discovery
#
# - Next we will select the causal and feature selection algorithms and start the py-causal Java VM.
feature_selection_list = []
feature_selection_list.append(aitia.feature_selection.LINEAR_REGRESSION)
feature_selection_list.append(aitia.feature_selection.RANDOM_FOREST)
algorithm_list = []
algorithm_list.append(aitia.algo_runner.PC)
algorithm_list.append(aitia.algo_runner.RFCI_continuous)
pc = pycausal()
pc.start_vm()
# - Now we will run the causal discovery with the number of feature set to a range between 10 and 20.
results, summary, target_graph, all_results = aitia.run_analysis_with_high_low(
df,
target_graph_str=None, # no target graph, one will be created
feature_high=20, feature_low=10,
feature_selection_list=feature_selection_list,
algorithm_list=algorithm_list,
pc=pc)
# ### Display the results
#
# - The summary dataframe will contain the analysis results with the lowest SHD.
display(HTML(summary.to_html()))
# - We can look at the causal graph for the best result (index 2 or 3).
results[3].causal_graph.draw()
# - We can take a look at all the results.
display(HTML(all_results.to_html()))
# - We can now plot the SHD and AUPRC for the run.
# +
shd_min = []
for s in range(11, 21):
shd = all_results.loc[all_results['No. of Features Req.'] == s]['SHD'].min()
shd_min.append(shd)
auprc_max = []
for a in range(11, 21):
auprc = all_results.loc[all_results['No. of Features Req.'] == a]['AUPRC'].max()
auprc_max.append(auprc)
# +
plt.rcParams["figure.figsize"] = (10, 5)
plt.plot(np.arange(11,21), shd_min, label='SHD')
plt.title('Structural Hamming Distance')
plt.xlabel('Analysis Index')
plt.ylabel('Value')
plt.legend()
plt.show()
# +
plt.rcParams["figure.figsize"] = (10, 5)
plt.plot(np.arange(11,21), auprc_max, label='AUPRC')
plt.title('AUPRC')
plt.xlabel('Analysis Index')
plt.ylabel('Value')
plt.legend()
plt.show()
# -
# From the graphs and the results, it would be worth doing a narrower causal discovery run:
# - From the `PC` and `RFCI-continuous` causal discovery algorithms.
# - Using the features supplied by the random forest classifier.
#
# This is brief indication of what AitiaExplorer can provide - a way of testing multiple methods of producing a causal graph from a dataset to see which looks the most promising.
| notebooks/.ipynb_checkpoints/Example04-Causal_Discovery_Within_A_Range_of_Features-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library('readr')
library('magrittr')
library('dplyr')
library('tidyr')
library('ggplot2')
# +
ratio_levels=c('0:1', '1:1', '1:2', '1:4', '1:8')
cd48_grouped <-
read_csv(
'flow.csv',
col_types=cols(
`Sample`=col_character(),
`Bead:Cell ratio`=col_factor(levels=ratio_levels),
.default=col_double()
)
) %>%
mutate(
`CD4+ CD8-`=`Live cells/CD4+ CD8- | Freq. of Parent (%)`,
`CD4- CD8+`=`Live cells/CD4- CD8+ | Freq. of Parent (%)`,
`CD4+ CD8- CD69 MFI`=`Live cells/CD4+ CD8- | Mean (Comp-PE-A)`,
`CD4- CD8+ CD69 MFI`=`Live cells/CD4- CD8+ | Mean (Comp-PE-A)`,
) %>% group_by(`Bead:Cell ratio`)
cd48_summary <-
rbind(
cd48_grouped %>%
summarize(
`Freq_mean`=mean(`CD4+ CD8-`),
`Freq_sd`=sd(`CD4+ CD8-`),
`CD96_mean`=mean(`CD4+ CD8- CD69 MFI`),
`CD96_sd`=sd(`CD4+ CD8- CD69 MFI`)
) %>%
mutate(`Population`="CD4+ CD8-"),
cd48_grouped %>%
summarize(
`Freq_mean`=mean(`CD4- CD8+`),
`Freq_sd`=sd(`CD4- CD8+`),
`CD96_mean`=mean(`CD4- CD8+ CD69 MFI`),
`CD96_sd`=sd(`CD4- CD8+ CD69 MFI`)
) %>%
mutate(`Population`="CD4- CD8+")
)
# -
cd48_summary %>%
ggplot(aes(x=`Bead:Cell ratio`, y=`Freq_mean`, fill=`Bead:Cell ratio`)) +
geom_col() +
geom_errorbar(
aes(ymin=`Freq_mean`-`Freq_sd`, ymax=`Freq_mean`+`Freq_sd`),
width=0.2
) +
facet_wrap(~`Population`) +
ylim(0, 100) +
ylab('Percent population (%)\n(n=3)') +
xlab('Activation bead (anti-CD3 and anti-CD28) to cell ratio')
cd48_summary %>%
ggplot(aes(x=`Bead:Cell ratio`, y=`CD96_mean`, fill=`Bead:Cell ratio`)) +
geom_col() +
geom_errorbar(
aes(ymin=`CD96_mean`-`CD96_sd`, ymax=`CD96_mean`+`CD96_sd`),
width=0.2
) +
facet_wrap(~`Population`) +
ylab('CD69 Mean Fluorescence Intensity\n(n=3)') +
xlab('Activation bead (anti-CD3 and anti-CD28) to cell ratio')
read_tsv(
'proliferation_assay.tsv',
col_types=cols(
`Row`=col_factor(levels=LETTERS[1:8]),
`Donor`=col_factor(levels=c("D8", "D9")),
.default=col_double()
)
) %>%
mutate(
`Donor`=factor(`Donor`, labels=c("Donor 8", "Donor 9"))
) %>%
gather(
key='Dilution',
value='A570-A595',
`1:1`:`1:2048`,
factor_key=TRUE
) %>%
group_by(`Donor`, `Dilution`) %>%
summarize(
`Norm_abs_mean`=mean(`A570-A595`),
`Norm_abs_sd`=sd(`A570-A595`),
) %>%
ggplot(aes(x=`Dilution`, y=`Norm_abs_mean`, group=`Donor`)) +
geom_point() +
geom_line() +
geom_errorbar(
aes(ymin=`Norm_abs_mean`-`Norm_abs_sd`, ymax=`Norm_abs_mean`+`Norm_abs_sd`),
width=0.2
) +
geom_smooth(method="lm", colour="red", formula=y~I(x^3)+I(x^2)+x) +
facet_wrap(~`Donor`, ncol=1) +
ylim(0.1, .3) +
xlab('Activation bead (anti-CD3 and anti-CD28) to cell ratio') +
ylab('Normalized absorbance (A570nm-A595nm)\n(n=8)')
read_tsv(
'proliferation_assay.tsv',
col_types=cols(
`Row`=col_factor(levels=c('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')),
`Donor`=col_factor(levels=c("D8", "D9")),
.default=col_double()
)
) %>%
gather(
key='Dilution',
value='A570-A595',
`1:1`:`1:2048`,
factor_key=TRUE
) %>%
group_by(`Dilution`) %>%
summarize(
`Norm_abs_mean`=mean(`A570-A595`),
`Norm_abs_sd`=sd(`A570-A595`),
) %>%
ggplot(aes(x=`Dilution`, y=`Norm_abs_mean`, group=1)) +
geom_point() +
geom_errorbar(
aes(ymin=`Norm_abs_mean`-`Norm_abs_sd`, ymax=`Norm_abs_mean`+`Norm_abs_sd`),
width=0.2
) +
geom_smooth(method="lm", colour="red", formula=y~I(x^3)+I(x^2)+x) +
geom_line() +
ylim(.1, .3)
?gather
| analyses/activation-bead-titration/Activation bead titration results via flow and count.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # gerekli kütüphaneler
# +
# uyarı ayarı
import warnings
warnings.filterwarnings("ignore")
# veri işleme
import pandas as pd
import numpy as np
# istatistik
import scipy as sc
import hypothetical
import pingouin
import statsmodels as sm
# modelleme
import mlxtend
import lifetimes
# veri görselleştirme
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
from IPython.display import HTML, display
# kütüphane ayarları
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
pd.set_option('mode.chained_assignment', None)
sns.set_palette("husl")
sns.set(rc={'image.cmap': 'coolwarm'})
# -
# # verinin çalışma ortamına alınması
# 2009-2011 dönemi verileri **veri** isimli değişken olarak çalışma ortamına alınır:
veri = pd.read_csv("dataset/online_retail_2.csv")
print(veri.shape)
veri.head()
veri.info()
# # veri hazırlığı
# +
# ülke grupları
avrupa_ülkeleri = ['Austria', 'Belgium', 'Cyprus', 'Czech Republic', 'Denmark',
'EIRE', 'European Community', 'Finland', 'France', 'Germany',
'Greece', 'Iceland','Italy', 'Lithuania', 'Malta', 'Netherlands',
'Norway', 'Poland', 'Portugal', 'Spain', 'Sweden', 'Switzerland',
'United Kingdom', 'Channel Islands']
amerika_ülkeleri = ['Canada', 'USA', 'Brazil', 'Bermuda']
asya_ülkeleri = ['Bahrain','Hong Kong', 'Japan', 'Saudi Arabia', 'Singapore', 'Thailand', 'United Arab Emirates']
# ülke grupları fonksiyon
def ülke_grubu(row):
global avrupa_ülkeleri
global amerika_ülkeleri
global asya_ülkeleri
if row['Country'] in avrupa_ülkeleri:
return "Europe"
elif row['Country'] in amerika_ülkeleri:
return "America"
elif row['Country'] in asya_ülkeleri:
return "Asia"
else:
return "Other"
# veri önişleme fonksiyonu
def veri_önişleme(veri):
# kayıp değerlerin atılması
veri.dropna(axis = 0, subset = ['Description', 'Customer ID'], inplace = True)
# çoklayan kayıtların atılması
veri.drop_duplicates(inplace = True)
# tip dönüşümleri
veri['InvoiceDate'] = pd.to_datetime(veri['InvoiceDate'])
veri['Customer ID'] = veri['Customer ID'].astype(np.int64)
veri = veri[veri['Invoice'].str.startswith("C",na=False) == False]
veri['Invoice'] = veri['Invoice'].astype(np.int64)
# yeni değişkenlerin oluşturulması
veri['TotalPrice'] = veri['Price']*veri['Quantity']
veri = veri.assign(CountryGroup=veri.apply(ülke_grubu, axis=1))
return veri
# -
veri = veri_önişleme(veri)
veri.head()
veri.info()
# # müşteri yaşamboyu değeri (customer lifetime value-clv) modeli
# model için terminoloji:
#
# * Frekans(frequency), müşterinin yaptığı tekrarlanan satın alma işlemlerinin sayısını temsil eder. Bu, toplam alışveriş sayısından bir az olduğu anlamına gelir.
# * T, seçilen zaman birimlerinde müşterinin yaşını temsil eder. Bu, bir müşterinin ilk satın alma işlemi ile çalışılan dönemin sonu arasındaki süreye eşittir.
# * Yakınlık (recency), müşterinin en son alışverişini gerçekleştirdiği zamanki yaşını temsil eder. Bu, bir müşterinin ilk satın alma işlemi ile en son satın alma arasındaki süreye eşittir. (Dolayısıyla, yalnızca 1 alışveriş yapmışlarsa, yakınlık 0 olur.)
# model çıktıları:
#
# * $P(X(t) = x | λ, p)$ : t uzunluğundaki bir zaman diliminde x işlemi gözlemleme olasılığı
# * $E(X(t) | λ, p)$ : t uzunluğundaki bir dönemde beklenen alışveriş sayısı
# * $P(τ>t)$ : bir müşterinin τ periyodunda aktif olmama olasılığı
print(f"minimum alışveriş tarihi: {veri['InvoiceDate'].min()}")
print(f"minimum alışveriş tarihi: {veri['InvoiceDate'].max()}")
# son 1 yıllık alışveriş verisi:
veri_son_yıl = veri[veri['InvoiceDate']>="2010-12-01"]
print(f"minimum alışveriş tarihi: {veri_son_yıl['InvoiceDate'].min()}")
print(f"minimum alışveriş tarihi: {veri_son_yıl['InvoiceDate'].max()}")
# günlük zaman değişkeni:
veri_son_yıl['DailyDate'] = pd.to_datetime(veri_son_yıl['InvoiceDate']).dt.date
veri_son_yıl.head()
ilgilendiğimiz_değişkenler = ["Customer ID", "DailyDate", "TotalPrice"]
myd_veri = veri_son_yıl[ilgilendiğimiz_değişkenler]
rfm_verisi = lifetimes.utils.summary_data_from_transaction_data(myd_veri,
customer_id_col='Customer ID',
datetime_col='DailyDate',
monetary_value_col='TotalPrice'
)
# RFM verisi:
print(f"tekil müşteri sayısı: {rfm_verisi.shape[0]}")
rfm_verisi.head()
# **gözlemler:**
#
# * analizimizde 4339 müşteri var.
# * 12346 ID'li müşteri yalnızca 1 satın alma işlemi gerçekleştirmiş (tekrar yok), bu nedenle sıklığı ve yakınlığı 0 ve yaşı 325 gün (ör. İlk satın alma ile analizdeki sürenin sonu arasındaki süre).
print(f"müşteri frekansı özeti:\n {rfm_verisi['frequency'].describe()}")
print("\n")
print(f"frekansı 0 olan müşteri oranı: {sum(rfm_verisi['frequency'] == 0)/float(len(rfm_verisi))}")
rfm_verisi['frequency'].plot(kind='hist', bins=50);
# * verimizde tüm müşterilerin yaklaşık %35'inden fazlası yalnızca bir kez alışveriş yapmış.
# * BG/NBD modeli:
# * modelin ayrıntısı için: http://www.brucehardie.com/papers/bgnbd_2004-04-20.pdf
# * modelin formel hali:
#
# $$\begin{array}{l}
# E\left(Y(t) \mid X=x, t_{x}, T, r, \alpha, a, b\right)= \\
# \qquad \begin{array}{c}
# \frac{a+b+x-1}{a-1}\left[1-\left(\frac{\alpha+T}{\alpha+T+t}\right)^{r+x}{2} F_{1}\left(r+x, b+x ; a+b+x-1 ; \frac{t}{\alpha+T+t}\right)\right] \\
# 1+\delta_{x>0} \frac{a}{b+x-1}\left(\frac{\alpha+T}{\alpha+t_{x}}\right)^{r+x}
# \end{array}
# \end{array}$$
bgf = lifetimes.BetaGeoFitter(penalizer_coef=0.0)
bgf_modeli = bgf.fit(rfm_verisi['frequency'], rfm_verisi['recency'], rfm_verisi['T'])
# modelin özet çıktısı ve parametre tahminleri:
bgf_modeli.summary
# frekans/yakınlık matrisi:
# +
from lifetimes.plotting import plot_frequency_recency_matrix
fig = plt.figure(figsize=(12,8))
plot_frequency_recency_matrix(bgf_modeli);
# -
# **gözlemler:**
#
# * bir müşteri 120 alışveriş işlemi gerçekleştirdiyse ve en son alışverişi yaklaşık 350 günlükken gerçekleştiyse (ör. yakınlık: ilk alışverişi ile son alışverişi arasındaki süre 350 günse), o zaman en iyi müşterimizdir (sağ alttaki bölge).
# * yakın zamand ve çok alışveriş yapan müşteriler muhtemelen gelecekte en iyi müşterilerimiz olacaktır. bu müşteriler en iyi müşterilerimizdir.
# * çok alışveriş yapmış, ancak yakın zamanda (sağ üst köşe) alışveriş yapmamış müşteriler muhtemelen pasifleşmiştir.
# * seyrek olarak satın alan müşteriyi temsil eden (40, 300) civarında başka bir müşteri türü de var ve onu son zamanlarda görmediğimiz için tekrar alışveriş yapma ihtimali de var. ancak, pasifleşmiş mi yoksa sadece alışveriş yapanlar arasında mı emin değiliz.
# hangi müşterilerin kesinlikle aktif olduğunu tahmin edebiliriz:
# +
from lifetimes.plotting import plot_probability_alive_matrix
fig = plt.figure(figsize=(12,8));
plot_probability_alive_matrix(bgf_modeli);
# -
# **gözlemler:**
#
# * yakın zamanda alışveriş yapan müşteriler neredeyse kesinlikle "aktif".
# * yakın zamanda değil, çok şey satın alan müşteriler muhtemelen pasifleşmiştir. ve geçmişte ne kadar çok şey satın alırlarsa, pasifleşme olasılıkları da o kadar yüksektir. sağ üstte temsil edilirler.
# müşterileri “önümüzdeki dönemde beklenen en yüksek alışverişlerden” en düşük seviyeye doğru sıralayabiliriz. modelimiz, müşterilerin alışveriş geçmişlerini kullanarak, bir müşterinin gelecek dönemde beklenen alışverişlerini tahmin edecek bir yöntem de içerir:
# +
t = 1
rfm_verisi['predicted_purchases'] = bgf_modeli.conditional_expected_number_of_purchases_up_to_time(t,
rfm_verisi['frequency'],
rfm_verisi['recency'],
rfm_verisi['T'])
rfm_verisi.sort_values(by='predicted_purchases').tail(5)
# -
# modelimiz ne kadar iyi uymuş?
# +
from lifetimes.plotting import plot_period_transactions
plot_period_transactions(bgf_modeli)
# -
# modelin kalibrasyon ve test dönemi için uygunluğunun testi:
# +
from lifetimes.utils import calibration_and_holdout_data
test_verisi = calibration_and_holdout_data(
veri_son_yıl,
'Customer ID',
'DailyDate',
calibration_period_end='2011-06-08',
observation_period_end='2011-12-09' )
# -
test_verisi.head()
# +
from lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases
bgf = lifetimes.BetaGeoFitter(penalizer_coef=0.1)
bgf_test = bgf.fit(test_verisi['frequency_cal'],
test_verisi['recency_cal'],
test_verisi['T_cal'])
plot_calibration_purchases_vs_holdout_purchases(bgf_test, test_verisi);
# -
# bir müşterinin gelecek dayranışlarının tahmini:
# +
t = 10
müşteri = rfm_verisi.loc[12348]
bgf_modeli.predict(t,
müşteri['frequency'],
müşteri['recency'],
müşteri['T'])
# -
# **gözlemler:**
#
# * modelimiz, 12348 id'li müşterinin gelecek alışverişin 10 gün içinde 0,08 olasılıkla gerçekleşeceğini tahmin eder.
# en iyi müşterimiz için aktif olma olasılığı:
# +
from lifetimes.plotting import plot_history_alive
müşteri_id = 14911
başlangıçtan_bu_yana_geçen_süre = 365
müşteri_verisi = veri_son_yıl.loc[veri_son_yıl['Customer ID'] == müşteri_id]
fig = plt.figure(figsize=(12,8));
plot_history_alive(bgf_modeli,
başlangıçtan_bu_yana_geçen_süre,
müşteri_verisi,
'DailyDate');
# -
# **gözlemler:**
#
# * en iyi müşterimiz kesinlikle aktif, ancak 2011'in başlarında birkaç kez pasifleşmiş.
# son olarak parasal değeri de hesaba katarak müşteri yaşaboyu değerini hesaplayalım.
#
# * Gamma-Gamma modeli ile bunu gerçekleştireceğiz. kaynak: http://www.brucehardie.com/notes/025/gamma_gamma.pdf
# * bizden yalnızca en az bir kez tekrarlı alışveriş yapmış müşterileri tahmin edelim.
# +
geri_dönen_müşteriler = rfm_verisi[rfm_verisi['frequency']>0]
print(f"en az bir kez tekrarlı alışveriş yapan müşteri sayısı : {geri_dönen_müşteriler.shape[0]}")
geri_dönen_müşteriler.head()
# -
# Gamma-Gamma modeli:
# +
ggf = lifetimes.GammaGammaFitter(penalizer_coef=0.0)
ggf_modeli = ggf.fit(
geri_dönen_müşteriler['frequency'],
geri_dönen_müşteriler['monetary_value']
)
# -
ggf_modeli.summary
# şimdi modeli uygulayarak, her bir müşterinin yaşamboyu değerini hesaplayabiliriz:
geri_dönen_müşteriler['customer_lifetime_value'] = ggf_modeli.conditional_expected_average_profit(
geri_dönen_müşteriler['frequency'],
geri_dönen_müşteriler['monetary_value']
)
geri_dönen_müşteriler.head()
# en değerli müşteriler:
geri_dönen_müşteriler.sort_values(by='customer_lifetime_value').tail()
| uygulamalar/03_modelleme.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Imports
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# -
PROCESSED_DATA_PATH = '/mnt/e/HOME/AI/model_stock_predictions/data/processed/final_data.csv'
PROCESSED_DATA_PATH_PICKLE = '/mnt/e/HOME/AI/model_stock_predictions/data/processed/final_data.pickle'
data_frame = pd.read_csv(PROCESSED_DATA_PATH)
data_frame.head()
data_frame = data_frame.rename(columns={"Unnamed: 0": "SN"})
data_frame.columns
# +
data_frame_dummy = data_frame
data_frame_dummy['Timestamp'] = data_frame_dummy['Date'].map(lambda data: datetime.strptime(data, "%Y-%m-%d").timestamp())
data_frame_dummy
# -
data_frame = data_frame_dummy
# +
data_frame_symbol = data_frame[data_frame['Symbol'] == '20MICRONS']
plt.plot(data_frame_symbol['Timestamp'].to_list(), data_frame_symbol['Close'].to_list())
# -
data_frame.columns
data_frame_description = data_frame.describe()
data_frame_description
data_frame_description.columns
data_correlation = data_frame_symbol.iloc[:,1:]
data_correlation.corr()
data_frame_symbol
# +
# Needed Columns
data_frame_columns = [
'Date',
'Symbol',
'Open',
'Close',
'VWAP',
'Volume',
'Turnover',
'Trades',
'Deliverable Volume',
'Timestamp'
]
updated_data_frame = data_frame_symbol[data_frame_columns]
# -
updated_data_frame
updated_data_frame.corr()
# +
data_frame_columns = [
'Date',
'Symbol',
'Open',
'Close',
'VWAP',
'Volume',
'Turnover',
'Trades',
'Deliverable Volume',
'Timestamp'
]
final_data_frame = data_frame[data_frame_columns]
final_data_frame = final_data_frame[final_data_frame['Date'].notna()]
final_data_frame = final_data_frame[final_data_frame['Symbol'].notna()]
final_data_frame
# +
# [
# 'Date',
# 'Symbol',
# 'Close',
# 'VWAP',
# 'Volume',
# 'Turnover',
# 'Trades',
# 'Deliverable Volume',
# 'Timestamp'
# ]
final_data_frame['Date'] = final_data_frame['Date'].fillna('')
final_data_frame['Symbol'] = final_data_frame['Symbol'].fillna('')
final_data_frame['Open'] = final_data_frame['Open'].fillna(0)
final_data_frame['Close'] = final_data_frame['Close'].fillna(0)
final_data_frame['VWAP'] = final_data_frame['VWAP'].fillna(0)
final_data_frame['Volume'] = final_data_frame['Volume'].fillna(0)
final_data_frame['Trades'] = final_data_frame['Trades'].fillna(0)
final_data_frame['Turnover'] = final_data_frame['Turnover'].fillna(0)
final_data_frame['Deliverable Volume'] = final_data_frame['Deliverable Volume'].fillna(0)
final_data_frame['Timestamp'] = final_data_frame['Timestamp'].fillna(0)
final_data_frame
# -
final_data_frame
FINAL_DATA_PATH = '/mnt/e/HOME/AI/model_stock_predictions/data/processed/processed_final_data.csv'
FINAL_DATA_PATH_PICKLE = '/mnt/e/HOME/AI/model_stock_predictions/data/processed/processed_final_data.pickle'
# +
final_data_frame.to_csv(FINAL_DATA_PATH)
with open(FINAL_DATA_PATH_PICKLE, 'wb') as file:
pickle.dump(final_data_frame, file)
# +
temp_data_frame = final_data_frame[final_data_frame['Symbol'] == '20MICRONS']
a_df = temp_data_frame['Close'].round(decimals = 1).to_list()
# np.diff(a_df)
final_arr = []
change_per = []
for i, x in enumerate(a_df):
if i != 0 or i == len(a_df) - 1:
final_arr.append(round(x - a_df[i-1], 1))
else:
final_arr.append(0)
for x, y in zip(a_df, final_arr):
if x != 0:
change_per.append( round((y/x)*100, 1) )
else:
change_per.append(0)
# print(len(a_df), len(final_arr))
print(a_df[:10], final_arr[:10], change_per[:10])
# +
# Correlation array of stocks for closing rate and timestamp
correlation = pd.DataFrame()
index = 1
for symbol in final_data_frame['Symbol'].unique():
temp_data_frame = final_data_frame[final_data_frame['Symbol'] == symbol]
temp_cost = temp_data_frame['Close']
temp_open = temp_data_frame['Open']
temp_time = temp_data_frame['Timestamp']
temp_trades = temp_data_frame['Trades']
temp_change = []
temp_change_percentage = []
for i, x in enumerate(temp_cost.round(decimals = 1).to_list()):
try:
temp_change.append(round(x - a_df[i-1], 1))
except:
temp_change.append(0)
for x, y in zip(temp_cost, temp_change):
if x != 0:
temp_change_percentage.append( round((y/x)*100, 1) )
else:
temp_change_percentage.append(0)
correlation = correlation.append(
pd.DataFrame(
{
'Symbol': symbol,
'CostCorrelation': temp_time.corr(temp_cost),
'Average': np.mean(temp_cost.to_numpy()),
'Current': temp_cost.to_list()[-1],
'Open': temp_open.to_list()[-1],
'RateOfChange': np.mean(temp_change),
'RateOfChangePercentage': np.mean(temp_change_percentage),
'MaxChange': max(temp_change),
'MaxChangePercentage': max(temp_change_percentage),
'TradeCorrelation': temp_time.corr(temp_trades)
},
index=[index]
)
)
print ('\r symbol count ', index, end='')
index += 1
correlation
# -
FINAL_CORRELATION_PATH = '/mnt/e/HOME/AI/model_stock_predictions/data/processed/processed_final_correlation.csv'
FINAL_CORRELATION_PATH_PICKLE = '/mnt/e/HOME/AI/model_stock_predictions/data/processed/processed_final_correlation.pickle'
# +
# correlation.drop('Unnamed: 0', inplace=True, axis=1)
# correlation = correlation.rename(columns={"correlation": "Correlation"})
# +
correlation.to_csv(FINAL_CORRELATION_PATH)
with open(FINAL_CORRELATION_PATH_PICKLE, 'wb') as file:
pickle.dump(correlation, file)
# -
correlation
# +
# Add open into the matrix
# The prediction has to happen using a loop. Creating new open for every next day by predicting close
| notebooks/data-cleaning-and-processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # TTS Inference Prosody Control
#
# This notebook is intended to teach users how to control duration and pitch with the FastPitch model.
# # License
#
# > Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# >
# > Licensed under the Apache License, Version 2.0 (the "License");
# > you may not use this file except in compliance with the License.
# > You may obtain a copy of the License at
# >
# > http://www.apache.org/licenses/LICENSE-2.0
# >
# > Unless required by applicable law or agreed to in writing, software
# > distributed under the License is distributed on an "AS IS" BASIS,
# > WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# > See the License for the specific language governing permissions and
# > limitations under the License.
# + tags=[]
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
BRANCH = 'main'
# # If you're using Google Colab and not running locally, uncomment and run this cell.
# # !apt-get install sox libsndfile1 ffmpeg
# # !pip install wget unidecode
# # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
# -
# ## Setup
#
# Please run the below cell to import libraries used in this notebook. This cell will load the fastpitch model and hifigan models used in the rest of the notebook. Lastly, two helper functions are defined. One is used for inference while the other is used to plot the inference results.
# +
# Import all libraries
import IPython.display as ipd
import librosa
import librosa.display
import numpy as np
import torch
from matplotlib import pyplot as plt
# %matplotlib inline
# Reduce logging messages for this notebook
from nemo.utils import logging
logging.setLevel(logging.ERROR)
from nemo.collections.tts.models import FastPitchModel
from nemo.collections.tts.models import HifiGanModel
from nemo.collections.tts.helpers.helpers import regulate_len
# Load the models from NGC
fastpitch = FastPitchModel.from_pretrained("tts_en_fastpitch").eval().cuda()
hifigan = HifiGanModel.from_pretrained("tts_hifigan").eval().cuda()
sr = 22050
# Define some helper functions
# Define a helper function to go from string to audio
def str_to_audio(inp, pace=1.0, durs=None, pitch=None):
with torch.no_grad():
tokens = fastpitch.parse(inp)
spec, _, durs_pred, _, pitch_pred, *_ = fastpitch(text=tokens, durs=durs, pitch=pitch, speaker=None, pace=pace)
audio = hifigan.convert_spectrogram_to_audio(spec=spec).to('cpu').numpy()
return spec, audio, durs_pred, pitch_pred
# Define a helper function to plot spectrograms with pitch and display the audio
def display_pitch(audio, pitch, sr=22050, durs=None):
fig, ax = plt.subplots(figsize=(12, 6))
spec = np.abs(librosa.stft(audio[0], n_fft=1024))
# Check to see if pitch has been unnormalized
if torch.abs(torch.mean(pitch)) <= 1.0:
# Unnormalize the pitch with LJSpeech's mean and std
pitch = pitch * 65.72037058703644 + 214.72202032404294
# Check to see if pitch has been expanded to the spec length yet
if len(pitch) != spec.shape[0] and durs is not None:
pitch = regulate_len(durs, pitch.unsqueeze(-1))[0].squeeze(-1)
# Plot and display audio, spectrogram, and pitch
ax.plot(pitch.cpu().numpy()[0], color='cyan', linewidth=1)
librosa.display.specshow(np.log(spec+1e-12), y_axis='log')
ipd.display(ipd.Audio(audio, rate=sr))
plt.show()
# -
# ## Duration Control
#
# This section is applicable to models that use a duration predictor module. This module is called the Length Regulator and was introduced in FastSpeech [1]. A list of NeMo models that support duration predictors are as follows:
#
# - [FastPitch](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_fastpitch)
# - [FastPitch_HifiGan_E2E](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_e2e_fastpitchhifigan)
# - [FastSpeech2](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_fastspeech_2)
# - [FastSpeech2_HifiGan_E2E](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_e2e_fastspeech2hifigan)
# - [TalkNet](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_talknet)
# - [Glow-TTS](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_glowtts)
#
# While each model has their own implementation of this duration predictor, all of them follow a simple convolutional architecture. The input is the encoded tokens, and the output of the module is a value that represents how many frames in the decoder correspond to each token. It is essentially a hard attention mechanism.
#
# Since each model outputs a duration value per token, it is simple to slow down or increase the speech rate by increasing or decreasing these values. Consider the following:
#
# ```python
# def regulate_len(durations, pace=1.0):
# durations = durations.float() / pace
# # The output from the duration predictor module is still a float
# # If we want the speech to be faster, we can increase the pace and make each token duration shorter
# # Alternatively we can slow down the pace by decreasing the pace parameter
# return durations.long() # Lastly, we need to make the durations integers for subsequent processes
# ```
#
# Let's try this out with FastPitch
# +
#Define what we want the model to say
input_string = "Hey, I am speaking at different paces!" # Feel free to change it and experiment
# Let's run fastpitch normally
_, audio, *_ = str_to_audio(input_string)
print(f"This is fastpitch speaking at the regular pace of 1.0. This example is {len(audio[0])/sr:.3f} seconds long.")
ipd.display(ipd.Audio(audio, rate=sr))
# We can speed up the speech by adjusting the pace
_, audio, *_ = str_to_audio(input_string, pace=1.2)
print(f"This is fastpitch speaking at the faster pace of 1.2. This example is {len(audio[0])/sr:.3f} seconds long.")
ipd.display(ipd.Audio(audio, rate=sr))
# We can slow down the speech by adjusting the pace
_, audio, *_ = str_to_audio(input_string, pace=0.75)
print(f"This is fastpitch speaking at the slower pace of 0.75. This example is {len(audio[0])/sr:.3f} seconds long.")
ipd.display(ipd.Audio(audio, rate=sr))
# -
# ## Pitch Control
#
# The newer spectrogram generator models predict the pitch for certain words. Since these models predict pitch, we can adjust the predicted pitch in a similar manner to the predicted durations like in the previous section. A list of NeMo models that support pitch control are as follows:
#
# - [FastPitch](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_fastpitch)
# - [FastPitch_HifiGan_E2E](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_e2e_fastpitchhifigan)
# - [FastSpeech2](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_fastspeech_2)
# - [FastSpeech2_HifiGan_E2E](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_e2e_fastspeech2hifigan)
# - [TalkNet](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_talknet)
#
# ### FastPitch
#
# As with the previous tutorial, we will focus on FastPitch. FastPitch differs from some other models as it predicts a pitch difference to a normalized (mean 0, std 1) speaker pitch. Other models will just predict the unnormalized pitch. Looking at a simplified version of the FastPitch model, we see
#
# ```python
# # Predict pitch
# pitch_predicted = self.pitch_predictor(enc_out, enc_mask) # Predicts a pitch that is normalized with speaker statistics
# pitch_emb = self.pitch_emb(pitch.unsqueeze(1)) # A simple 1D convolution to map the float pitch to a embedding pitch
#
# enc_out = enc_out + pitch_emb.transpose(1, 2) # We add the pitch to the encoder output
# spec, *_ = self.decoder(input=len_regulated, seq_lens=dec_lens) # We send the sum to the decoder to get the spectrogram
# ```
#
# Let's see the `pitch_predicted` for a sample text. You can run the below cell. You should get an image that looks like the following for the input `Hey, what is my pitch?`:
#
# <img src="https://raw.githubusercontent.com/NVIDIA/NeMo/main/tutorials/tts/fastpitch-pitch.png">
#
# Notice that the last word `pitch` has an increase in pitch to stress that it is a question.
# +
import librosa
import librosa.display
from matplotlib import pyplot as plt
import numpy as np
from nemo.collections.tts.helpers.helpers import regulate_len
# %matplotlib inline
#Define what we want the model to say
input_string = "Hey, what is my pitch?" # Feel free to change it and experiment
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec, audio, durs_pred, pitch_pred = str_to_audio(input_string)
# FastPitch predicts one pitch value per token. To plot it, we have to expand the token length to the spectrogram length
pitch_pred, _ = regulate_len(durs_pred, pitch_pred.unsqueeze(-1))
pitch_pred = pitch_pred.squeeze(-1)
# Note we have to unnormalize the pitch with LJSpeech's mean and std
pitch_pred = pitch_pred * 65.72037058703644 + 214.72202032404294
# Let's plot the predicted pitch and how it affects the predicted audio
fig, ax = plt.subplots(figsize=(12, 6))
spec = np.abs(librosa.stft(audio[0], n_fft=1024))
ax.plot(pitch_pred.cpu().numpy()[0], color='cyan', linewidth=1)
librosa.display.specshow(np.log(spec+1e-12), y_axis='log')
ipd.display(ipd.Audio(audio, rate=sr))
# -
# ## Plot Control
#
# Now that we see how the pitch affects the predicted spectrogram, we can now adjust it to add some effects. We will explore 4 different manipulations:
#
# 1) Pitch shift
#
# 2) Pitch flatten
#
# 3) Pitch inversion
#
# 4) Pitch amplification
# ### Pitch Shift
# First, let's handle pitch shifting. To shift the pitch up or down by some Hz, we can just add or subtract as needed. Let's shift the pitch down by 50 Hz and compare it to the previous example.
# +
#Define what we want the model to say
input_string = "Hey, what is my pitch?" # Feel free to change it and experiment
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec_norm, audio_norm, durs_norm_pred, pitch_norm_pred = str_to_audio(input_string)
# Note we have to unnormalize the pitch with LJSpeech's mean and std
pitch_shift = pitch_norm_pred * 65.72037058703644 + 214.72202032404294
# Now let's pitch shift down by 50Hz
pitch_shift = pitch_shift - 50
# Now we have to renormalize it to be mean 0, std 1
pitch_shift = (pitch_shift - 214.72202032404294) / 65.72037058703644
# Now we can pass it to the model
spec_shift, audio_shift, durs_shift_pred, _ = str_to_audio(input_string, pitch=pitch_shift)
# NOTE: We do not plot the pitch returned from str_to_audio.
# When we override the pitch, we want to plot the pitch that override the model with.
# In thise case, it is `pitch_shift`
# Let's see both results
print("The first unshifted sample")
display_pitch(audio_norm, pitch_norm_pred, durs=durs_norm_pred)
print("The second shifted sample. This sample is much deeper than the previous.")
display_pitch(audio_shift, pitch_shift, durs=durs_shift_pred)
# -
# ### Pitch Flattening
# Second, let's look at pitch flattening. To flatten the pitch, we just set it to 0. Let's run it and compare the results.
# +
#Define what we want the model to say
input_string = "Hey, what is my pitch?" # Feel free to change it and experiment
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec_norm, audio_norm, durs_norm_pred, pitch_norm_pred = str_to_audio(input_string)
# Now let's set the pitch to 0
pitch_flat = pitch_norm_pred * 0
# Now we can pass it to the model
spec_flat, audio_flat, durs_flat_pred, _ = str_to_audio(input_string, pitch=pitch_flat)
# Let's see both results
print("The first unaltered sample")
display_pitch(audio_norm, pitch_norm_pred, durs=durs_norm_pred)
print("The second flattened sample. This sample is more monotone than the previous.")
display_pitch(audio_flat, pitch_flat, durs=durs_flat_pred)
# -
# ### Pitch Inversion
# Third, let's look at pitch inversion. To invert the pitch, we just multiply it by -1. Let's run it and compare the results.
# +
#Define what we want the model to say
input_string = "Hey, what is my pitch?" # Feel free to change it and experiment
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec_norm, audio_norm, durs_norm_pred, pitch_norm_pred = str_to_audio(input_string)
# Now let's invert the pitch
pitch_inv = pitch_norm_pred * -1
# Now we can pass it to the model
spec_inv, audio_inv, durs_inv_pred, _ = str_to_audio(input_string, pitch=pitch_inv)
# Let's see both results
print("The first unaltered sample")
display_pitch(audio_norm, pitch_norm_pred, durs=durs_norm_pred)
print("The second inverted sample. This sample sounds less like a question and more like a statement.")
display_pitch(audio_inv, pitch_inv, durs=durs_inv_pred)
# -
# ### Pitch Amplify
# Lastly, let's look at pitch amplifying. To amplify the pitch, we just multiply it by a positive constant. Let's run it and compare the results.
# +
#Define what we want the model to say
input_string = "Hey, what is my pitch?" # Feel free to change it and experiment
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec_norm, audio_norm, durs_norm_pred, pitch_norm_pred = str_to_audio(input_string)
# Now let's amplify the pitch
pitch_amp = pitch_norm_pred * 1.5
# Now we can pass it to the model
spec_amp, audio_amp, durs_amp_pred, _ = str_to_audio(input_string, pitch=pitch_amp)
# Let's see both results
print("The first unaltered sample")
display_pitch(audio_norm, pitch_norm_pred, durs=durs_norm_pred)
print("The second amplified sample.")
display_pitch(audio_amp, pitch_amp, durs=durs_amp_pred)
# -
# ## Putting it all together
#
# Now that we understand how to control the duration and pitch of TTS models, we can show how to adjust the voice to sound more solemn (slower speed + lower pitch), or more excited (higher speed + higher pitch).
# +
#Define what we want the model to say
input_string = "I want to pass on my condolences for your loss."
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec_norm, audio_norm, durs_norm_pred, pitch_norm_pred = str_to_audio(input_string)
# Let's try to make the speech more solemn
# Let's deamplify the pitch and shift the pitch down by 75% of 1 standard deviation
pitch_sol = (pitch_norm_pred)*0.75-0.75
# Fastpitch tends to raise the pitch before "loss" which sounds inappropriate. Let's just remove that pitch raise
pitch_sol[0][-5] += 0.2
# Now let's pass our new pitch to fastpitch with a 90% pacing to slow it down
spec_sol, audio_sol, durs_sol_pred, _ = str_to_audio(input_string, pitch=pitch_sol, pace=0.9)
# Let's see both results
print("The first unaltered sample")
display_pitch(audio_norm, pitch_norm_pred, durs=durs_norm_pred)
print("The second solumn sample")
display_pitch(audio_sol, pitch_sol, durs=durs_sol_pred)
# +
#Define what we want the model to say
input_string = "Congratulations on your promotion."
# Run inference to get spectrogram and pitch
with torch.no_grad():
spec_norm, audio_norm, durs_norm_pred, pitch_norm_pred = str_to_audio(input_string)
# Let's amplify the pitch to make it sound more animated
# We also pitch shift up by 50% of 1 standard deviation
pitch_excite = (pitch_norm_pred)*1.7+0.5
# Now let's pass our new pitch to fastpitch with a 110% pacing to speed it up
spec_excite, audio_excite, durs_excite_pred, _ = str_to_audio(input_string, pitch=pitch_excite, pace=1.1)
# Let's see both results
print("The first unaltered sample")
display_pitch(audio_norm, pitch_norm_pred, durs=durs_norm_pred)
print("The second excited sample")
display_pitch(audio_excite, pitch_excite, durs=durs_excite_pred)
# -
# ## Other Models
#
# This notebook lists other models that allow for control of speech rate and pitch. However, please note that not all models accept a `pace`, nor a `pitch` parameter as part of their forward/generate_spectrogram functions. Users who are interested in adding this functionality can use this notebook as a guide on how to do so.
#
# ### Duration Control
#
# Adding duration control is the simpler of the two and one simply needs to add the `regulate_lens` function to the appropriate model for duration control.
#
# ### Pitch Control
#
# Pitch control is more complicated. There are numerous design decisions that differ between models: 1) Whether to normalize the pitch, 2) Whether to predict pitch per spectrogram frame or per token, and more. While the basic transformations presented here (shift, flatten, invert, and amplify) can be done with all pitch predicting models, where to add this pitch transformation will differ depending on the model.
# ## References
#
# [1] https://arxiv.org/abs/1905.09263
| tutorials/tts/Inference_DurationPitchControl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
# A UK based e-commerce company want to divide their customer into egments and develop marketing strategies according to this segmentation results.
# Data Set Story:
#
# https://archive.ics.uci.edu/ml/datasets/Online+Retail+II
#
# This Online Retail II data set contains all the transactions occurring for a UK-based and registered, non-store online retail between 01/12/2009 and 09/12/2011.
#
# Item listed are souvenirs and gift item.
#
# Majority of the customers are wholesale traders.
#
# Attribute Information:
#
# • InvoiceNo: Invoice number. Nominal. A 6-digit integral number uniquely assigned to each transaction. If this code starts with the letter 'c', it indicates a cancellation.\n
# • StockCode: Product (item) code. Nominal. A 5-digit integral number uniquely assigned to each product.\n
# • Description: Item name and description. Nominal.\n
# • Quantity: The quantities of each item per transaction. Numeric.\n
# • InvoiceDate: Invoice date and time. Numeric.\n
# • UnitPrice: Unit price. Numeric. Product price per unit (£).\n
# • CustomerID: Customer number. Nominal. A 5-digit number uniquely assigned to each customer.\n
# • Country: Country name. Nominal. The name of the country where a customer resides.
# In order to import excel files xlrd and opepyxl shoud be installed. By using pip indtall you can isntall them
# !pip install xlrd
# !pip install openpyxl
# Firs step is importing needed libraries: numpy, pandas and datetime
#
import numpy as np
import pandas as pd
import datetime as dt
# Then loading the dataset.
# +
df_ = pd.read_excel("../input/uci-online-retail-ii-data-set/online_retail_II.xlsx", sheet_name="Year 2010-2011" )
df = df_.copy()
df.head()
# -
# After loading data set, we are trying to get general information about our dataset.
df.info()
# Total Number of missing values
df.isnull().sum()
# Total number of unique items
df["StockCode"].nunique()
# Total quantity for each item
df["StockCode"].value_counts().head()
# Items ordered most
df.groupby("StockCode").agg({"Quantity": "sum"}).sort_values("Quantity", ascending=False).head
# Total number of invoices
df["Invoice"].nunique()
# # Data Preprocessing
# After getting familiar with the dataset, we are preparing data for analysis. For this case we are going to drop null values, clear out thereturned items. For this analysis outliers are neglected so we are not going to do any operation about outliers.
# Dropping out null values
df.dropna(inplace=True)
df.isnull().sum()
# Invocies starting with "C" are either cancelled or returned goods so we are dropping out the item from the data set.
df=df[~df["Invoice"].str.contains("C", na=False)]
# We also clear any items Quantity less than 1
df= df[(df["Quantity"]>0)]
df.describe().T
# # RFM
# We are startig with addin TotalPrice column
df["TotalPrice"]=df["Quantity"]*df["Price"]
df.head()
# ## Recency
#
# To calculate Recency value, we nned last transaction date.
# We fix the analysis date 2 days after the last transaction date to get resanable recency values.
# Last transaction date
df["InvoiceDate"].max()
today_date = dt.datetime(2011, 12, 11)
# We are forming a Data Frame for Customer ID withthe variables of InvoiceDate(will be changed to Rececny in next step), Invoice number(will be changed to "Frequency") and Total sum of transactions(will be changed to "Monetary")
rfm = df.groupby('Customer ID').agg({'InvoiceDate': lambda date: (today_date - date.max()).days,
'Invoice': lambda num: num.nunique(),
'TotalPrice': lambda TotalPrice: TotalPrice.sum()})
# We reassing the column names in previous cell.
# Recency: We subtracted today's date from the last transation date to calculate recency
# Frequency: We counted invoiced per each customer. Since each invoice refers to one transaction we can use them as frequency.
# Monetary: We sum up all transations per each customer as their monetary value.
rfm.columns = ['Recency', 'Frequency', 'Monetary']
# To avoid miscalculation we run a acheck querry to eleminate 0 values in Monetary and Frequency
rfm = rfm[(rfm["Monetary"]) > 0 & (rfm["Frequency"] > 0)]
rfm
# We divide all the score into 5 categories with qcut function where 5 is the best and 1 is the worst.
# For Frequency most recent transaction is the best.
rfm["RecencyScore"] = pd.qcut(rfm['Recency'], 5, labels=[5, 4, 3, 2, 1])
rfm["FrequencyScore"] = pd.qcut(rfm['Frequency'].rank(method="first"), 5, labels=[1, 2, 3, 4, 5])
rfm["MonetaryScore"] = pd.qcut(rfm['Monetary'], 5, labels=[1, 2, 3, 4, 5])
# After calculating RFM scores, we combine scores as a new column
rfm["RFM_SCORE"] = (rfm['RecencyScore'].astype(str) +
rfm['FrequencyScore'].astype(str) +
rfm['MonetaryScore'].astype(str))
rfm
# We create segment table.
seg_map = {
r'[1-2][1-2]': 'Hibernating',
r'[1-2][3-4]': 'At_Risk',
r'[1-2]5': 'Cant_Loose',
r'3[1-2]': 'About_to_Sleep',
r'33': 'Need_Attention',
r'[3-4][4-5]': 'Loyal_Customers',
r'41': 'Promising',
r'51': 'New_Customers',
r'[4-5][2-3]': 'Potential_Loyalists',
r'5[4-5]': 'Champions'
}
# We add a new column to rfm dataframe as "Segment"
rfm['Segment'] = rfm['RecencyScore'].astype(str) + rfm['FrequencyScore'].astype(str)
rfm['Segment'] = rfm['Segment'].replace(seg_map, regex=True)
rfm
# General statistics for each segment
rfm[["Segment", "Recency", "Frequency", "Monetary"]].groupby("Segment").agg(["mean", "count"])
rfm[["Segment", "Recency", "Frequency", "Monetary"]].groupby("Segment").agg(["mean", "count",
"min", "median", "max"])
# We can expoert desired segments and customer number as csv wıth following codes
# +
#new_df = pd.DataFrame()
# +
#new_df["Loyal_Customers"] = rfm[rfm["Segment"] == "Loyal_Customers"].index
# + jupyter={"outputs_hidden": true}
#new_df["Loyal_Customers"] = rfm[rfm["Loyal_customers"].astype(int)
# +
#new_df.to_excel("Loyal_Customers.xlsx")
| notebooks/interviews/JL/projects/rfm-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# # MoMA Online Archive Scrape
#
# 
#
# This project uses `urllib` and `BeautifulSoup` libraries to scrape data from the Museum of Modern Art's [online collection](https://www.moma.org/collection/works?&with_images=1&on_view=1).
# ## Structure of Response Data
#
# The relevant HTML section we want to extract data has the following structure:
#
# ```
# <li> - class "grid-item--work"
# └── <a> tag - link to artwork page
# ├── <div>
# │ └── <picture> tag
# │ └── <img> tag
# └── <div> with artwork data
# └── <h3>
# ├── <span> - artist name
# ├── <span> - artwork name
# └── <span> - artwork date
# ```
#
# The goal is to pull the artwork data inside the `h3` tag.
# Each artist's archive has an associated ID.
# Create a dictionary that contains a mapping between the artist name and their ID
ARTISTS = {
"le corbusier": {
"name": "<NAME>",
"id": 3426},
"<NAME>": {
"name": "<NAME>",
"id": 7166
},
"<NAME>": {
"name": "<NAME>",
"id": 2096
},
}
# +
import urllib
import re
from bs4 import BeautifulSoup
import numpy as np
class ExtraSoup(BeautifulSoup):
"""Child class of BeautifulSoup with additional methods"""
"""HTML Response Retrieval Helper Functions"""
def get_artist_info(self, artist_info):
"""Initialize artist and artist_id attributes"""
self.artist_name = artist_info['name']
self.artist_id = artist_info['id']
def build_url(self, query):
"""Build a url to query MoMA website"""
url = (f'https://www.moma.org/artists/{query}?locale=en&page=1')
return url
def get_url_response(self, url):
"""Get HTML response from url"""
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
return response.read()
def make_soup(self):
"""Create a BeautifulSoup class from HTML response"""
self.html = BeautifulSoup(
self.get_url_response(self.build_url(self.artist_id)),
'html.parser'
)
"""HTML Parsing Helper Functions"""
def get_artwork_year(self, string):
"""Extra year string from HTML tag.
Year data may come various forms like "1932-34", "1923-1924", "c.1934".
Not a perfect solution, but in this case get the first instance of a valid
year (ie. "1923-1924" would extract 1923)
Reference: https://developers.google.com/edu/python/regular-expressions
"""
result = re.search(r'\d{4}', string).group(0)
return int(result)
def validate_artist(self, metadata):
"""Validate whether artwork was made by artist we are querying for,
since search results may return a variety of artists.
"""
if metadata:
if self.artist_name.lower() in metadata.lower():
return True
return False
def validate_string(self, metadata):
"""Validate whether string exists and return a cleaned string"""
# Some titles are wrapped in an <em> tag inside span
if metadata.find("em"):
return metadata.find("em").string
elif metadata.string is not None:
# Span tag contents are listed with lots of spaces and newlines
return metadata.string.strip()
return None
def get_artwork_data(self):
"""Parse through HTML tags and extract artwork date"""
artist_works = self.html.find_all('li', 'grid-item--work')
self.artworks = []
for i, artwork in enumerate(artist_works):
artwork_dict = {}
# get span tags that contain artwork data
artwork_metadata = artwork.find_all("span")
artist = self.validate_string(artwork_metadata[0])
if self.validate_artist(artist):
artwork_dict['index'] = i
artwork_dict['artist'] = artist
artwork_dict['title'] = self.validate_string(artwork_metadata[1])
artwork_dict['year'] = self.get_artwork_year(
self.validate_string(artwork_metadata[2])
)
self.artworks.append(artwork_dict)
def get_artwork_urls(self):
"""Extract URL of each artwork from the artist page"""
artist_works = self.html.find_all('li', 'grid-item--work')
self.artwork_urls = []
for artwork in artist_works:
url = artwork.find('a', 'grid-item__link', href=True)
self.artwork_urls.append(url['href'])
"""Functions for analysis"""
def get_artwork_year_median(self):
"""Get the median year of all the artworks"""
years = []
for work in self.artworks:
years.append(work['year'])
return int(np.median(years))
def get_artwork_count(self):
"""Get a count of how many artworks were found in the online collection"""
return len(self.artworks)
"""Todo: Add export functions (to JSON, CSV)"""
# -
# ## Le Courbusier
#
# <img src="img/cb_savoye.jpg" width="500"/>
le_corbusier = ExtraSoup()
le_corbusier.get_artist_info(ARTISTS['le corbusier'])
le_corbusier.make_soup()
le_corbusier.get_artwork_data()
le_corbusier.artworks
# ## <NAME>
#
# <img src="img/mies_barcelona.jpg" width="500"/>
mies_van_der_rohe = ExtraSoup()
mies_van_der_rohe.get_artist_info(ARTISTS['m<NAME>'])
mies_van_der_rohe.make_soup()
mies_van_der_rohe.get_artwork_data()
mies_van_der_rohe.artworks
# ## <NAME>
#
# <img src="img/gaudi_sagrada.jpg" width="500"/>
antoni_gaudi = ExtraSoup()
antoni_gaudi.get_artist_info(ARTISTS['<NAME>'])
antoni_gaudi.make_soup()
antoni_gaudi.get_artwork_data()
antoni_gaudi.artworks
# ## Summary
def print_artwork_stats(artist):
print(f'{artist.artist_name}: \n\
1. Total artworks found in the collection: {artist.get_artwork_count()} \n\
2. Median year of the artist\'s artworks in the collection: {artist.get_artwork_year_median()}')
artists = [le_corbusier, mies_van_der_rohe, antoni_gaudi]
for artist in artists:
print_artwork_stats(artist)
# ## Week 10 - Additional Work: Scrape Individual Artwork Pages
#
# The relevant HTML section we want to extract data has the following structure:
#
# ```
# <section> - class "grid-item--work"
# └── <dl> tag - class "work__caption"
# ├── <dt> - class "work__caption__term"
# │ └── <span> class "work__caption__term__text"
# ├── <dd> - class "work__caption__description"
# │ └── <span> - class "work__caption__description__text"
# │
# │ ... <dt><dd> pair continues for each description
# | (e.g dimension, publisher, printer, edition, etc)
# ```
#
# __The goal is to pull the artwork data inside the `dt` and `dd` tag as dictionary key/value pairs.__
class ArtworkSoup(BeautifulSoup):
def build_url(self, query):
"""Build a url to query MoMA website"""
url = (f'https://www.moma.org/{query}')
return url
def get_url_response(self, url):
"""Get HTML response from url"""
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
return response.read()
def make_soup(self, queries):
"""Create a BeautifulSoup class from HTML response"""
self.htmls = []
for query in queries:
self.htmls.append(
BeautifulSoup(
self.get_url_response(self.build_url(query)),
'html.parser'
)
)
def create_artwork_dict(self, index, terms, descriptions):
artwork_dict = dict(zip(terms, descriptions))
artwork_dict['index'] = index
return artwork_dict
def get_artwork_data(self):
"""Parse through HTML tagsk extract artwork data and create a list of dictionaries"""
artwork_data = []
for i, html in enumerate(self.htmls):
terms = map(self.clean_html,
html.find_all('span',
'work__caption__term__text'))
descriptions = map(self.clean_html,
html.find_all('span',
'work__caption__description__text'))
artwork_data.append(self.create_artwork_dict(i, terms, descriptions))
self.artwork_data = artwork_data
def clean_html(self, html):
"""Strip tag and newline characters"""
# Some descriptions have <em> tags within the <span> tag.
if html.find('em'):
return html.find('em').string.strip()
return html.string.strip()
# ## Le Corbusier
# +
# Get list of artwork page urls from artist page
le_corbusier.get_artwork_urls()
# Create BeautifulSoup child class and scrape data
c_artworks = ArtworkSoup()
c_artworks.make_soup(le_corbusier.artwork_urls)
c_artworks.get_artwork_data()
# Check a sample from the scraped data
c_artworks.artwork_data[0]
# -
# ## <NAME>
# +
# Get list of artwork page urls from artist page
mies_van_der_rohe.get_artwork_urls()
# Create BeautifulSoup child class and scrape data
m_artworks = ArtworkSoup()
m_artworks.make_soup(mies_van_der_rohe.artwork_urls)
m_artworks.get_artwork_data()
# Check a sample from the scraped data
m_artworks.artwork_data[0]
# -
# ## Gaudi
antoni_gaudi.artwork_urls
# +
# Get list of artwork page urls from artist page
antoni_gaudi.get_artwork_urls()
# Create BeautifulSoup child class and scrape data
gaudi_artworks = ArtworkSoup()
gaudi_artworks.make_soup(antoni_gaudi.artwork_urls)
gaudi_artworks.get_artwork_data()
# Check a sample from the scraped data
gaudi_artworks.artwork_data[0]
# -
# ### Merging two dictionaries
#
# Now we have an `ExtraSoup()` object with the artist's artist name / artwork title / year, and `ArtworkSoup()` with extra artwork data.
#
# We can merge them together with Pandas.
# +
import pandas as pd
le_courbusier_df = pd.DataFrame(le_corbusier.artworks)
le_corbusier_artwork_df = pd.DataFrame(c_artworks.artwork_data)
le_corbusier_merged = le_courbusier_df.merge(le_corbusier_artwork_df, left_on='index', right_on='index')
le_corbusier_merged.drop('index', axis=1)
# -
mies_van_der_rohe_df = pd.DataFrame(mies_van_der_rohe.artworks)
mies_van_der_rohe_artwork_df = pd.DataFrame(m_artworks.artwork_data)
mies_merged = mies_van_der_rohe_df.merge(mies_van_der_rohe_artwork_df, left_on='index', right_on='index')
mies_merged.drop('index', axis=1)
gaudi_df = pd.DataFrame(antoni_gaudi.artworks)
gaudi_artwork_df = pd.DataFrame(gaudi_artworks.artwork_data)
gaudi_merged = gaudi_df.merge(gaudi_artwork_df, left_on='index', right_on='index')
gaudi_merged.drop('index', axis=1)
# ## Export Dataframes
le_corbusier_merged.to_csv('le_corbusier.tsv', index=False, sep='\t')
mies_merged.to_csv('mies_van_der_rohe.tsv', index=False, sep='\t')
gaudi_merged.to_csv('antoni_gaudi.tsv', index=False, sep='\t')
| wk_8/moma_scrape/moma_scrape.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # <center>Block 14: Empirical matching: The gravity equation</center>
# ### <center><NAME> (NYU)</center>
# ## <center>`math+econ+code' masterclass on matching models, optimal transport and applications</center>
# <center>© 2018-2019 by <NAME>. Support from NSF grant DMS-1716489 is acknowledged. <NAME> contributed.</center>
# ### Learning objectives
#
# * Regularized optimal transport
#
# * The gravity equation
#
# * Generalized linear models
#
# * Pseudo-Poisson maximum likelihood estimation
# ### References
#
# * Anderson and <NAME> (2003). "Gravity with Gravitas: A Solution to the Border Puzzle". *American Economic Review*.
#
# * Head and Mayer (2014). "Gravity Equations: Workhorse, Toolkit and Cookbook". *Handbook of International Economics*.
#
# * Gourieroux, <NAME> (1984). "Pseudo Maximum Likelihood Methods: Theory". *Econometrica*.
#
# * McCullagh and Nelder (1989). *Generalized Linear Models*. Chapman and Hall/CRC.
#
# * <NAME> and Tenreyro (2006). "The Log of Gravity". *Review of Economics and Statistics*.
#
# * Yotov et al. (2011). *An advanced guide to trade policy analysis*. WTO.
#
# * Guimares and Portugal (2012). "Real Wages and the Business Cycle: Accounting for Worker, Firm, and Job Title Heterogeneity". *AEJ: Macro*.
#
# * Dupuy and G (2014), "Personality traits and the marriage market". *Journal of Political Economy*.
#
# * Dupuy, G and Sun (2016), "Estimating matching affinity matrix under low-rank constraints". arxiv 1612.09585.
# ### Motivation
#
# The gravity equation is a very useful tool for explaining trade flows by various measures of proximity between countries.
#
# A number of regressors have been proposed. They include: geographic distance, common official languague, common colonial past, share of common religions, etc.
#
# The dependent variable is the volume of exports from country $i$ to country $n$, for each pair of country $\left( i,n\right)$.
#
# Today, we shall see a close connection between gravity models of international trade and separable matching models.
# ### Regularized optimal transport
#
# Consider the optimal transport duality
#
# \begin{align*}
# \max_{\pi\in\mathcal{M}\left( P,Q\right) }\sum_{xy}\pi_{xy}\Phi_{xy}=\min_{u_{x}+v_{y}\geq\Phi_{xy}}\sum_{x\in\mathcal{X}}p_{x}u_{x}+\sum_{y\in\mathcal{Y}}q_{y}v_{y}
# \end{align*}
#
# Now let's assume that we are adding an entropy to the primal objective function. For any $\sigma>0$, we get
#
# \begin{align*}
# & \max_{\pi\in\mathcal{M}\left( P,Q\right) }\sum_{xy}\pi_{xy}\Phi_{xy}-\sigma\sum_{xy}\pi_{xy}\ln\pi_{xy}\\
# & =\min_{u,v}\sum_{x\in\mathcal{X}}p_{x}u_{x}+\sum_{y\in\mathcal{Y}}q_{y}v_{y}+\sigma\sum_{xy}\exp\left( \frac{\Phi_{xy}-u_{x}-v_{y}-\sigma}{\sigma}\right)
# \end{align*}
#
# The latter problem is an unconstrained convex optimization problem. But the most efficient numerical computation technique is often coordinate descent, i.e. alternate between minimization in $u$ and minimization in $v$.
# ### Iterated fitting
#
# Maximize wrt to $u$ yields
#
# \begin{align*}
# e^{-u_{x}/\sigma}=\frac{p_{x}}{\sum_{y}\exp\left( \frac{\Phi_{xy}-v_{y}-\sigma}{\sigma}\right) }
# \end{align*}
#
# and wrt $v$ yields
#
# \begin{align*}
# e^{-v_{y}/\sigma}=\frac{q_{y}}{\sum_{x}\exp\left( \frac{\Phi_{xy}-v_{y}-\sigma}{\sigma}\right) }
# \end{align*}
#
# It is called the "iterated projection fitting procedure" (ipfp), aka "matrix scaling", "RAS algorithm", "Sinkhorn-Knopp algorithm", "Kruithof's method", "Furness procedure", "biproportional fitting procedure", "Bregman's procedure". See survey in Idel (2016).
#
# Maybe the most often reinvented algorithm in applied mathematics. Recently rediscovered in a machine learning context.
# ### Econometrics of matching
#
# The goal is to estimate the matching surplus $\Phi_{xy}$. For this, take a linear parameterization
#
# \begin{align*}
# \Phi_{xy}^{\beta}=\sum_{k=1}^{K}\beta_{k}\phi_{xy}^{k}.
# \end{align*}
#
# Following Choo and Siow (2006), Galichon and Salanie (2017) introduce logit heterogeneity in individual preferences and show that the equilibrium now maximizes the *regularized Monge-Kantorovich problem*
#
# \begin{align*}
# W\left( \beta\right) =\max_{\pi\in\mathcal{M}\left( P,Q\right) }\sum_{xy}\pi_{xy}\Phi_{xy}^{\beta}-\sigma\sum_{xy}\pi_{xy}\ln\pi_{xy}
# \end{align*}
#
# By duality, $W\left( \beta\right) $ can be expressed
#
# \begin{align*}
# W\left( \beta\right) =\min_{u,v}\sum_{x}p_{x}u_{x}+\sum_{y}q_{y}v_{y}+\sigma\sum_{xy}\exp\left( \frac{\Phi_{xy}^{\beta}-u_{x}-v_{y}-\sigma}{\sigma}\right)
# \end{align*}
#
# and w.l.o.g. can set $\sigma=1$ and drop the additive constant $-\sigma$ in the $\exp$.
# ### Estimation
#
# We observe the actual matching $\hat{\pi}_{xy}$. Note that $\partial W/ \partial\beta^{k}=\sum_{xy}\pi_{xy}\phi_{xy}^{k},$\ hence $\beta$ is estimated by running
#
# <a name='objFun'></a>
# \begin{align*}
# \min_{u,v,\beta}\sum_{x}p_{x}u_{x}+\sum_{y}q_{y}v_{y}+\sum_{xy}\exp\left(\Phi_{xy}^{\beta}-u_{x}-v_{y}\right) -\sum_{xy,k}\hat{\pi}_{xy}\beta_{k}\phi_{xy}^{k}
# \end{align*}
#
# which is still a convex optimization problem.
#
# This is actually the objective function of the log-likelihood in a Poisson regression with $x$ and $y$ fixed effects, where we assume
#
# \begin{align*}
# \pi_{xy}|xy\sim Poisson\left( \exp\left( \sum_{k=1}^{K}\beta_{k}\phi
# _{xy}^{k}-u_{x}-v_{y}\right) \right) .
# \end{align*}
# ### Poisson regression with fixed effects
#
# Let $\theta=\left( \beta,u,v\right) $ and $Z=\left( \phi,D^{x},D^{y}\right) $ where $D_{x^{\prime}y^{\prime}}^{x}=1\left\{ x=x^{\prime}\right\} $ and $D_{x^{\prime}y^{\prime}}^{y}=1\left\{ y=y^{\prime}\right\}$ are $x$-and $y$-dummies. Let $m_{xy}\left( Z;\theta\right) =\exp\left(\theta^{\intercal}Z_{xy}\right) $ be the parameter of the Poisson distribution.
#
# The conditional likelihood of $\hat{\pi}_{xy}$ given $Z_{xy}$ is
#
# \begin{align*}
# l_{xy}\left( \hat{\pi}_{xy};\theta\right) & =\hat{\pi}_{xy}\log m_{xy}\left( Z;\theta\right) -m_{xy}\left( Z;\theta\right) \\
# & =\hat{\pi}_{xy}\left( \theta^{\intercal}Z_{xy}\right) -\exp\left(\theta^{\intercal}Z_{xy}\right) \\
# & =\hat{\pi}_{xy}\left( \sum_{k=1}^{K}\beta_{k}\phi_{xy}^{k}-u_{x}-v_{y}\right) -\exp\left( \sum_{k=1}^{K}\beta_{k}\phi_{xy}^{k}-u_{x}-v_{y}\right)
# \end{align*}
#
# Summing over $x$ and $y$, the sample log-likelihood is
#
# \begin{align*}
# \sum_{xy}\hat{\pi}_{xy}\sum_{k=1}^{K}\beta_{k}\phi_{xy}^{k}-\sum_{x}p_{x}u_{x}-\sum_{y}q_{y}v_{y}-\sum_{xy}\exp\left( \sum_{k=1}^{K}\beta_{k}\phi_{xy}^{k}-u_{x}-v_{y}\right)
# \end{align*}
#
# hence we recover the [objective function](#objFun).
# ### From Poisson to pseudo-Poisson
#
# If $\pi_{xy}|xy$ is Poisson, then $\mathbb{E}\left[\pi_{xy}\right]=m_{xy}\left( Z_{xy};\theta\right) =\mathbb{V}ar\left( \pi_{xy}\right) $. While it makes sense to assume the former equality, the latter is a rather strong assumption.
#
# For estimation purposes, $\hat{\theta}$ is obtained by
#
# \begin{align*}
# \max_{\theta}\sum_{xy}l\left( \hat{\pi}_{xy};\theta\right) =\sum_{xy}\left(\hat{\pi}_{xy}\left( \theta^{\intercal}Z_{xy}\right) -\exp\left(\theta^{\intercal}Z_{xy}\right) \right)
# \end{align*}
#
# however, for inference purposes, one shall not assume the Poisson distribution. Instead
#
# \begin{align*}
# \sqrt{N}\left( \hat{\theta}-\theta\right) \Longrightarrow\left(A_{0}\right) ^{-1}B_{0}\left( A_{0}\right) ^{-1}
# \end{align*}
#
# where $N=\left\vert \mathcal{X}\right\vert \times\left\vert \mathcal{Y}\right\vert $ and $A_{0}$ and $B_{0}$ are estimated by
#
# \begin{align*}
# \hat{A}_{0} & =N^{-1}\sum_{xy}D_{\theta\theta}^{2}l\left( \hat{\pi}_{xy};\hat{\theta}\right) =N^{-1}\sum_{xy}\exp\left( \hat{\theta}^{\intercal}Z_{xy}\right) Z_{xy}Z_{xy}^{\intercal}\\
# \hat{B}_{0} & =N^{-1}\sum_{xy}\left( \hat{\pi}_{xy}-\exp\left( \hat{\theta}^{\intercal}Z_{xy}\right) \right) ^{2}Z_{xy}Z_{xy}^{\intercal}.
# \end{align*}
# ### Application: estimation of affinity matrix
#
# Dupuy and G (2014) focus on cross-dimensional interactions
#
# \begin{align*}
# \phi_{xy}^{A}=\sum_{p,q}A_{pq}\xi_{x}^{p}\xi_{y}^{q}
# \end{align*}
#
# and estimate "affinity matrix" $A$ on a dataset of married individuals where the "big 5" personality traits are measured.
#
# $A$ is estimated by
#
# \begin{align*}
# \min_{s_{i},m_{n}}\min_{A}\left\{
# \begin{array}
# [c]{c}%
# \sum_{x}p_{x}u_{x}+\sum_{y}q_{y}v_{y}\\
# +\sum_{xy}\exp\left( \sum_{p,q}A_{pq}\xi_{x}^{p}\xi_{y}^{q}-u_{x}%
# -v_{y}\right) \\
# -\sum_{x,y,p,q}\hat{\pi}_{xy}A_{pq}\xi_{x}^{p}\xi_{y}^{q}%
# \end{array}
# \right\} .
# \end{align*}
#
# Dupuy, Galichon and Sun (2016) consider the case when the space of characteristics is high-dimensional. More on this this afternoon.
# ### Estimation of affinity matrix: results
#
# | Husbands \ Wives | Education | Height | BMI | Health | Consc. | Extra. | Agree | Emotio | Auto. | Risk |
# |-------------------|-----------|--------|-------|--------|--------|--------|-------|--------|-------|-------|
# | Education | 0.46 | 0 | -0.06 | 0.01 | -0.02 | 0.03 | -0.01 | -0.03 | 0.04 | 0.01 |
# | Height | 0.04 | 0.21 | 0.04 | 0.03 | -0.06 | 0.03 | 0.02 | 0 | -0.01 | 0.02 |
# | BMI | -0.03 | 0.03 | 0.21 | 0.01 | 0.03 | 0 | -0.05 | 0.02 | 0.01 | -0.02 |
# | Health | -0.02 | 0.02 | -0.04 | 0.17 | -0.04 | 0.02 | -0.01 | 0.01 | 0 | 0.03 |
# | Conscienciousness | -0.07 | -0.01 | 0.07 | 0 | 0.16 | 0.05 | 0.04 | 0.06 | 0.01 | 0.01 |
# | Extraversion | 0 | -0.01 | 0 | 0.01 | -0.06 | 0.08 | -0.04 | -0.01 | 0.02 | -0.06 |
# | Agreeableness | 0.01 | 0.01 | -0.06 | 0.02 | 0.1 | -0.11 | 0 | 0.07 | -0.07 | -0.05 |
# | Emotional | 0.03 | -0.01 | 0.04 | 0.06 | 0.19 | 0.04 | 0.01 | -0.04 | 0.08 | 0.05 |
# | Autonomy | 0.03 | 0.02 | 0.01 | 0.02 | -0.09 | 0.09 | -0.04 | 0.02 | -0.1 | 0.03 |
# | Risk | 0.03 | -0.01 | -0.03 | -0.01 | 0 | -0.02 | -0.03 | -0.03 | 0.08 | 0.14 |
#
# Affinity matrix. Source: Dupuy and G (2014). Note: Bold coefficients are significant at the 5 percent level.
# ## The gravity equation}
#
# "Structural gravity equation" \ (<NAME> <NAME>, 2003) as reviewed in Head and Mayer (2014)
# handbook chapter:
#
# \begin{align*}
# X_{ni}=\underset{S_{i}}{\underbrace{\frac{Y_{i}}{\Omega_{i}}}}\underset{M_{n}}{\underbrace{\frac{X_{n}}{\Psi_{n}}}}\Phi_{ni}%
# \end{align*}
#
# where $n$=importer, $i$=exporter, $X_{ni}$=trade flow from $i$ to $n$, $Y_{i}=\sum_{n}X_{ni}$ is value of production, $X_{n}=\sum_{i}X_{ni}$ is importers' expenditures, and $\phi_{ni}$=bilateral accessibility of $n$ to $i$.
#
# $\Omega_{i}$ and $\Psi_{n}$ are \textquotedblleft multilateral resistances\textquotedblright, satisfying the set of implicit equations
#
# \begin{align*}
# \Psi_{n}=\sum_{i}\frac{\Phi_{ni}Y_{i}}{\Omega_{i}}\text{ and }\Omega_{i}%
# =\sum_{n}\frac{\Phi_{ni}X_{n}}{\Psi_{n}}%
# \end{align*}
#
# These are exactly the same equations as those of the regularized OT.
# ### Explaining trade
#
# Parameterize $\Phi_{ni}=\exp\left( \sum_{k=1}^{K}\beta_{k}D_{ni}^{k}\right) $, where the $D_{ni}^{k}$ are $K$ pairwise measures of distance between $n$ and $i$. We have
#
# \begin{align*}
# X_{ni}=\exp\left( \sum_{k=1}^{K}\beta_{k}D_{ni}^{k}-s_{i}-m_{n}\right)
# \end{align*}
#
# where fixed effects $s_{i}=-\ln S_{i}$ and $m_{n}=-\ln M_{n}$ are adjusted by
#
# \begin{align*}
# \sum_{i}X_{ni}=Y_{i}\text{ and }\sum_{n}X_{ni}=X_{n}.
# \end{align*}
#
# Standard choices of $D_{ni}^{k}$'s:
#
# * Logarithm of bilateral distance between $n$ and $i$
#
# * Indicator of contiguous borders; of common official language; of
# colonial ties
#
# * Trade policy variables: presence of a regional trade agreement; tariffs
#
# * Could include many other measures of proximity, e.g. measure of genetic/cultural distance, intensity of communications, etc.
# ## Application
# Our data comes from An Advanced Guide to Trade Policy Analysis: The Structural Gravity Mode. We will estimate the gravity model using optimal transport as well as using Poisson regression.
library(tictoc)
thePath = paste0(getwd(),"/../data_mec_optim/gravity_wtodata")
tradedata = read.csv(paste0(thePath,"/1_TraditionalGravity_from_WTO_book.csv"))
head(tradedata)
# Let's prepare the data so that we can use it. We want to construct
# * $D_{ni,t}^k$ which is the $k$th pairwise distance between importer $n$ and exporter $i$ at time $t$
#
# * $X_{n,t}$ total value of expenditure of importer $n$ at time $t$
#
# * $Y_{i,t}$ total value of production of exporter $i$ at time $t$
# +
# Unique list of importers
countrylist = sort(unique(tradedata$importer))
# Unique list of exporters
exportercountrylist = sort(unique(tradedata$exporter))
if (!identical(countrylist, exportercountrylist)) {
stop("exporter and importer country lists do not coincide")
}
# regressorsIndices = 4:13
regressorsIndices = c("ln_DIST", "CNTG", "LANG", "CLNY")
yearslist = c(1986, 1990, 1994, 1998, 2002, 2006)
regressors_raw = tradedata[regressorsIndices]
regressorsNames = names(regressors_raw)
flow_raw = tradedata$trade
nbt = length(yearslist) # number of years
nbk = dim(regressors_raw)[2] # number of regressors
nbi = length(countrylist) # number of countries
yearsIndices = 1:nbt
Dnikt = array(0, dim = c(nbi, nbi, nbk, nbt)) # basis functions
Xhatnit = array(0, dim = c(nbi, nbi, nbt)) # trade flows from n to i
missingObs = array(0, dim = c(0, 2, nbt))
for (year in 1:nbt) {
theYear = yearslist[year]
# print(theYear)
for (dest in 1:nbi) {
theDest = as.character(countrylist[dest])
# print(theDest)
for (orig in 1:nbi) {
if (orig != dest) {
theOrig = as.character(countrylist[orig])
extract = (tradedata$exporter == theOrig) & (tradedata$importer ==
theDest) & (tradedata$year == theYear)
line = regressors_raw[extract, ]
if (dim(line)[1] == 0) {
missingObs = rbind(missingObs, c(theOrig, theDest))
}
if (dim(line)[1] > 1) {
stop("Several lines with year, exporter and importer.")
}
if (dim(line)[1] == 1) {
Dnikt[orig, dest, , year] = as.numeric(line)
Xhatnit[orig, dest, year] = flow_raw[extract]
}
}
}
}
}
if (length(missingObs) > 0) {
stop("Missing observations")
}
Xnt = apply(X = Xhatnit, MARGIN = c(1, 3), FUN = sum)
Yit = apply(X = Xhatnit, MARGIN = c(2, 3), FUN = sum)
# -
# We will solve this model by fixing a $\beta$ and solving the matching problem using IPFP. Then in an outer loop we will solve for the $\beta$ which minimizes the distance between model and empirical moments.
# +
sigma = 1 # sigma for IPFP
maxiterIpfp = 1000 # max numbers of iterations
tolIpfp = 1e-12 # tolerance for IPFP
tolDescent = 1e-12 # tolerance for gradient descent
totmass_t = rep(sum(Xnt)/nbt, nbt) # total mass
p_nt = t(t(Xnt)/totmass_t) # proportion of importer expenditure
q_nt = t(t(Yit)/totmass_t) # proportion of exporter productions
IX = rep(1, nbi)
tIY = matrix(rep(1, nbi), nrow = 1)
f_nit = array(0, dim = c(nbi, nbi, nbt))
g_nit = array(0, dim = c(nbi, nbi, nbt))
pihat_nit = array(0, dim = c(nbi, nbi, nbt))
sdD_k = rep(1, nbk)
meanD_k = rep(0, nbk)
for (t in 1:nbt) {
f_nit[, , t] = p_nt[, t] %*% tIY
g_nit[, , t] = IX %*% t(q_nt[, t])
pihat_nit[, , t] = Xhatnit[, , t]/totmass_t[t]
}
for (k in 1:nbk) {
meanD_k[k] = mean(Dnikt[, , k, ])
sdD_k[k] = sd(Dnikt[, , k, ])
Dnikt[, , k, ] = (Dnikt[, , k, ] - meanD_k[k])/sdD_k[k]
}
v_it = matrix(rep(0, nbi * nbt), nbi, nbt)
beta_k = rep(0, nbk)
t_s = 0.03 # step size for the prox grad algorithm (or grad descent when lambda=0)
iterCount = 0
tic()
while (1) {
thegrad = rep(0, nbk)
pi_nit = array(0, dim = c(nbi, nbi, nbt))
for (t in 1:nbt) {
D_ij_k = matrix(Dnikt[, , , t], ncol = nbk)
Phi = matrix(D_ij_k %*% matrix(beta_k, ncol = 1), nrow = nbi)
contIpfp = TRUE
iterIpfp = 0
v = v_it[, t]
f = f_nit[, , t]
g = g_nit[, , t]
K = exp(Phi/sigma)
diag(K) = 0
gK = g * K
fK = f * K
while (contIpfp) {
iterIpfp = iterIpfp + 1
u = sigma * log(apply(gK * exp((-IX %*% t(v))/sigma), 1, sum))
vnext = sigma * log(apply(fK * exp((-u %*% tIY)/sigma), 2, sum))
error = max(abs(apply(gK * exp((-IX %*% t(vnext) - u %*% tIY)/sigma),
1, sum) - 1))
if ((error < tolIpfp) | (iterIpfp >= maxiterIpfp)) {
contIpfp = FALSE
}
v = vnext
}
v_it[, t] = v
pi_nit[, , t] = f * gK * exp((-IX %*% t(v) - u %*% tIY)/sigma)
if (iterIpfp >= maxiterIpfp) {
stop("maximum number of iterations reached")
}
thegrad = thegrad + c(c(pi_nit[, , t] - pihat_nit[, , t]) %*% D_ij_k)
}
# take one gradient step
beta_k = beta_k - t_s * thegrad
theval = sum(thegrad * beta_k) - sigma * sum(pi_nit[pi_nit > 0] * log(pi_nit[pi_nit >
0]))
iterCount = iterCount + 1
if (iterCount > 1 && abs(theval - theval_old) < tolDescent) {
break
}
theval_old = theval
}
beta_k = beta_k/sdD_k
toc()
print(beta_k)
# -
# ### Comparison
# We can compare the results and speed of our computation to that of Poisson regression packages. As a warning, these give the same results, but at the cost of a much longer run time, so use at your own risk. We can solve instead using the Poisson regression from the glm package.
# +
tic()
glm_pois = glm(as.formula(
paste("trade ~ ",
paste(grep("PORTER_TIME_FE", names(tradedata), value=TRUE), collapse=" + "),
" + ln_DIST + CNTG + LANG + CLNY")),
family = quasipoisson,
data=subset(tradedata, exporter!=importer) )
toc()
glm_pois$coefficients[regressorsIndices]
# -
# Which gives the same results but is much slower. We can also use the `pplm` function from the `gravity` package.
# +
#install.packages("gravity")
library(gravity)
tic()
grav_pois = ppml('trade', 'DIST', c(grep("PORTER_TIME_FE", names(tradedata), value=TRUE), 'CNTG', 'LANG', 'CLNY'),
vce_robust = FALSE, data = subset(tradedata, exporter!=importer))
toc()
grav_pois$coefficients[c("dist_log", "CNTG", "LANG", "CLNY"), 1]
# -
# Again which gives the same results but it is much slower!
| slides_ipynb_mec_optim/B15a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''gpu'': conda)'
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
import glob
import os, gc
import numpy as numpy
import pandas as pd
import scipy as sp
import datatable as dt
from collections import defaultdict
from tqdm.notebook import tqdm
from sklearn.utils import shuffle
from sklearn.metrics import r2_score
from numba import njit
from utils import *
from IPython.display import clear_output
from sklearn.preprocessing import MinMaxScaler
from pytorch_tabnet.metrics import Metric
from pytorch_tabnet.tab_model import TabNetRegressor
import torch
from torch.optim import Adam, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingWarmRestarts
# +
N_FOLD = 5
N_MINS = 5
MIN_SIZE = 600 // N_MINS
SOL_NAME = '601-TabNet'
DATA_NAME = '601'
mkdir(f'./models/{SOL_NAME}/')
# -
# CONSTANT
MEAN = -5.762330803300896
STD = 0.6339307835941186
EPS = 1e-9
# get ids
list_stock_id = get_stock_id()
list_time_id = get_time_id()
# # Functions
# +
def transform_target(target):
return (np.log(target + EPS) - MEAN) / STD
def inverse_target(target):
return np.exp(MEAN + STD * target) - EPS
def np_rmspe(y_true, y_pred):
y_true = inverse_target(y_true)
y_pred = inverse_target(y_pred)
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
class RMSPE(Metric):
def __init__(self):
self._name = "rmspe"
self._maximize = False
def __call__(self, y_true, y_pred):
y_true = inverse_target(y_true)
y_pred = inverse_target(y_pred)
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
def RMSPELoss(y_pred, y_true):
y_true = torch.exp(MEAN + STD * y_true) - EPS
y_pred = torch.exp(MEAN + STD * y_pred) - EPS
return torch.sqrt(torch.mean( ((y_true - y_pred) / y_true) ** 2 )).clone()
# -
# # Loading data
# +
# train
df_train = dt.fread(f'./dataset/train_{DATA_NAME}_NN.csv').to_pandas()
fea_cols = [f for f in df_train if f not in ['time_id', 'target', 'pred_NN', 'stock_id', 'row_id']]
# result
df_result = dt.fread('./dataset/train.csv').to_pandas()
df_result = gen_row_id(df_result)
# -
df_train['target'] = transform_target(df_train['target'])
df_train = gen_row_id(df_train)
# # Evaluation
# +
tabnet_params = dict(
cat_idxs=[0],
cat_dims=[127],
cat_emb_dim=1,
n_d = 16,
n_a = 16,
n_steps = 2,
gamma = 2,
n_independent = 2,
n_shared = 2,
lambda_sparse = 0,
optimizer_fn = Adam,
optimizer_params = dict(lr = (2e-2)),
mask_type = "entmax",
scheduler_params = dict(T_0=200, T_mult=1, eta_min=1e-4, last_epoch=-1, verbose=False),
scheduler_fn = CosineAnnealingWarmRestarts,
seed = 42,
verbose = 10,
# device_name='cpu'
)
list_seeds = [0, 11, 42]
# -
list_rmspe = []
for i_seed, seed in enumerate(list_seeds):
df_train = add_time_fold(df_train, N_FOLD, seed=seed)
list_rmspe += [[]]
for i_fold in range(N_FOLD):
gc.collect()
df_tr = df_train.loc[df_train.fold!=i_fold]
df_te = df_train.loc[df_train.fold==i_fold]
X_train = df_tr[['stock_id']+fea_cols].values
y_train = df_tr[['target']].values
X_test = df_te[['stock_id']+fea_cols].values
y_test = df_te[['target']].values
idx_test = df_train.loc[df_train.fold==i_fold].index
print(f'Fold {i_seed+1}/{len(list_seeds)} | {i_fold+1}/{N_FOLD}', X_train.shape, X_test.shape)
scaler = MinMaxScaler(feature_range=(-1, 1))
X_train[:, 1:] = scaler.fit_transform(X_train[:, 1:])
X_test[:, 1:] = scaler.transform(X_test[:, 1:])
save_pickle(scaler, f'./models/{SOL_NAME}/minmax_scaler_{i_seed}_{i_fold}.pkl')
# Callbacks
ckp_path = f'./models/{SOL_NAME}/model_{i_seed}_{i_fold}'
model = TabNetRegressor(**tabnet_params)
model.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
max_epochs=10000,
patience=50,
batch_size=1024*20,
virtual_batch_size=128*20,
num_workers=8,
drop_last=False,
eval_metric=[RMSPE],
loss_fn=RMSPELoss
)
y_pred = model.predict(X_test)
curr_rmspe = np_rmspe(y_test, y_pred)
list_rmspe[-1] += [curr_rmspe]
model.save_model(ckp_path)
# generate and save preds
df_result.loc[idx_test, f'pred_{i_seed}'] = inverse_target(y_pred)
clear_output()
print(list_rmspe)
df_result.to_csv(f'./results/{SOL_NAME}.csv', index=False)
for i in range(len(list_seeds)):
print(i, rmspe(df_result['target'], df_result[f'pred_{i}']))
print('All: ', rmspe(df_result['target'], df_result[[f'pred_{i}' for i in range(len(list_seeds))]].mean(axis=1)))
| 601-EVAL-TabNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.9 64-bit (''azureml_py36'': conda)'
# name: python3
# ---
# # Imports
# +
import azureml.core
from azureml.core import Workspace, Dataset
import pandas as pd
ws = Workspace.from_config()
print(f"Azure ML version: {azureml.core.VERSION}")
print(f"Workspace: {ws.name}")
# -
# # Datastores
# +
# get default datastore
default_ds = ws.get_default_datastore()
# list all datastores associated with workspace
for datastore in ws.datastores:
print(f"{datastore}: - Is Default = {datastore == default_ds.name}")
# -
default_ds.upload_files(
files=["../../data/diabetes.csv","../../data/diabetes2.csv"], # local dir
target_path="diabetes-data", # AML datastore path
overwrite=True,
show_progress=True,
)
# # Datasets
# +
default_ds = ws.get_default_datastore()
# create tabular dataset - NOTE: this stacks both dataframes on top of each other into a single 15,000 row dataset
tabular_dataset = Dataset.Tabular.from_delimited_files(path=(default_ds, "diabetes-data/*.csv"))
# -
tabular_dataset.take(20).to_pandas_dataframe()
# +
file_dataset = Dataset.File.from_files(path=(default_ds, "diabetes-data/*.csv"))
for file in file_dataset.to_path():
print(file)
# +
# register tabular dataset
try:
tabular_dataset = tabular_dataset.register(
workspace=ws,
name="diabetes dataset",
description="diabetes dataset",
tags={
"format": "CSV"
},
create_new_version=True
)
except Exception as e:
print(e)
# register file dataset
try:
file_dataset = file_dataset.register(
workspace=ws,
name="diabetes file dataset",
description="diabetes files",
tags={
"format": "CSV"
},
create_new_version=True
)
except Exception as e:
print(e)
# -
for dataset in list(ws.datasets.keys()):
dataset = Dataset.get_by_name(ws, dataset)
print(dataset.name, " ", dataset.version)
# # Train
# get dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
diabetes_ds.to_pandas_dataframe()
| custom/work-with-data-in-azure-machine-learning/scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="S9tsOMw8AhJ6" colab_type="code" colab={}
from __future__ import division
from torchvision import models
from torchvision import transforms
from PIL import Image
import argparse
import torch
import torchvision
import torch.nn as nn
import numpy as np
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_image(image_path, transform=None, max_size=None, shape=None):
"""Load an image and convert it to a torch tensor."""
image = Image.open(image_path)
if max_size:
scale = max_size / max(image.size)
size = np.array(image.size) * scale
image = image.resize(size.astype(int), Image.ANTIALIAS)
if shape:
image = image.resize(shape, Image.LANCZOS)
if transform:
image = transform(image).unsqueeze(0)
return image.to(device)
class VGGNet(nn.Module):
def __init__(self):
"""Select conv1_1 ~ conv5_1 activation maps."""
super(VGGNet, self).__init__()
self.select = ['0', '5', '10', '19', '28']
self.vgg = models.vgg19(pretrained=True).features
def forward(self, x):
"""Extract multiple convolutional feature maps."""
features = []
for name, layer in self.vgg._modules.items():
x = layer(x)
if name in self.select:
features.append(x)
return features
def main(config):
# Image preprocessing
# VGGNet was trained on ImageNet where images are normalized by mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225].
# We use the same normalization statistics here.
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))])
# Load content and style images
# Make the style image same size as the content image
content = load_image(config.content, transform, max_size=config.max_size)
style = load_image(config.style, transform, shape=[content.size(2), content.size(3)])
# Initialize a target image with the content image
target = content.clone().requires_grad_(True)
optimizer = torch.optim.Adam([target], lr=config.lr, betas=[0.5, 0.999])
vgg = VGGNet().to(device).eval()
for step in range(config.total_step):
# Extract multiple(5) conv feature vectors
target_features = vgg(target)
content_features = vgg(content)
style_features = vgg(style)
style_loss = 0
content_loss = 0
for f1, f2, f3 in zip(target_features, content_features, style_features):
# Compute content loss with target and content images
content_loss += torch.mean((f1 - f2)**2)
# Reshape convolutional feature maps
_, c, h, w = f1.size()
f1 = f1.view(c, h * w)
f3 = f3.view(c, h * w)
# Compute gram matrix
f1 = torch.mm(f1, f1.t())
f3 = torch.mm(f3, f3.t())
# Compute style loss with target and style images
style_loss += torch.mean((f1 - f3)**2) / (c * h * w)
# Compute total loss, backprop and optimize
loss = content_loss + config.style_weight * style_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (step+1) % config.log_step == 0:
print ('Step [{}/{}], Content Loss: {:.4f}, Style Loss: {:.4f}'
.format(step+1, config.total_step, content_loss.item(), style_loss.item()))
if (step+1) % config.sample_step == 0:
# Save the generated image
denorm = transforms.Normalize((-2.12, -2.04, -1.80), (4.37, 4.46, 4.44))
img = target.clone().squeeze()
img = denorm(img).clamp_(0, 1)
torchvision.utils.save_image(img, 'output-{}.png'.format(step+1))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--content', type=str, default='png/content.png')
parser.add_argument('--style', type=str, default='png/style.png')
parser.add_argument('--max_size', type=int, default=400)
parser.add_argument('--total_step', type=int, default=2000)
parser.add_argument('--log_step', type=int, default=10)
parser.add_argument('--sample_step', type=int, default=500)
parser.add_argument('--style_weight', type=float, default=100)
parser.add_argument('--lr', type=float, default=0.003)
config = parser.parse_args()
print(config)
main(config)
| style_transfer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import warnings
import statsmodels.formula.api as smf
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
# # Loading datasets.
telco = pd.read_csv('https://community.watsonanalytics.com/wp-content/uploads/2015/03/WA_Fn-UseC_-Telco-Customer-Churn.csv', sep = ',',na_values=' ')#()=['customerID', 'gender', 'SeniorCitizen', 'Partner', 'Dependents', 'tenure', 'PhoneService', 'MultipleLines', 'InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','Contract','PaperlessBilling','PaymentMethod','MonthlyCharges','TotalCharges','Churn'])
telco.head()
telco.columns
mall = pd.read_csv('https://tufts.box.com/shared/static/w2xf5yv5wczx7rfmdmbjpfd1u604hhvo.csv', error_bad_lines=False, sep = ',',na_values=' ')
mall.head()
# # Cleaning Data
telco_data.dtypes
# ### Replacing column values 'Male to 1' , 'Female to 2' and 'Yes to 1' and 'No to 2'
# +
telco.gender.loc[telco.gender == 'Male'] = 1
telco.gender.loc[telco.gender == 'Female'] = 2
telco.Partner.loc[telco.Partner == 'Yes'] = 1
telco.Partner.loc[telco.Partner == 'No'] = 2
telco.Dependents.loc[telco.Dependents == 'Yes'] = 1
telco.Dependents.loc[telco.Dependents == 'No'] = 2
telco.PhoneService.loc[telco.PhoneService == 'Yes'] = 1
telco.PhoneService.loc[telco.PhoneService == 'No'] = 2
telco.PaperlessBilling.loc[telco.PaperlessBilling == 'Yes'] = 1
telco.PaperlessBilling.loc[telco.PaperlessBilling == 'No'] = 2
telco.head()
# +
telco.MultipleLines.loc[telco.MultipleLines == 'No phone service'] = 'No'
telco.OnlineSecurity.loc[telco.OnlineSecurity == 'No internet service'] = 'No'
telco.OnlineBackup.loc[telco.OnlineBackup == 'No internet service'] = 'No'
telco.DeviceProtection.loc[telco.DeviceProtection == 'No internet service'] = 'No'
telco.TechSupport.loc[telco.TechSupport == 'No internet service'] = 'No'
telco.StreamingTV.loc[telco.StreamingTV == 'No internet service'] = 'No'
telco.StreamingMovies.loc[telco.StreamingMovies == 'No internet service'] = 'No'
telco.head()
# -
# Changed PaymentMethod into dummy variables and dropped one of value since we only need n−1 out of the 4 categories to represent the data.
telco = pd.concat([telco, pd.get_dummies(telco.PaymentMethod, drop_first=True)], axis=1)
telco.head()
# The TotalCharges column contains null values. The entires with missing values will be omited in the analysis
(telco_data.isnull().sum()/len(telco_data)).plot(kind='bar')
# # Analyzing Datasets
# ### Majority of the clients spend less than $40 per month on telecommunication services.
telco.MonthlyCharges.hist()
PaymentMethod_ranking = ['Electronic check', 'Mailed check', 'Bank transfer (automatic)', 'Credit card (automatic)']
print(PaymentMethod_ranking)
Contract_ranking = ['Month-to-month' ,'One year', 'Two year']
print(Contract_ranking)
# +
columns_n_orders = {'PaymentMethod': PaymentMethod_ranking, 'Contract': Contract_ranking}
for column, ordering in columns_n_orders.items():
telco.loc[:,column] = telco.loc[:,column]\
.astype('category', ordered=True, categories=ordering)
# -
# ### A big number of customers prefer month-to month mode of payment
telco['Contract'].value_counts().plot(kind='bar')
plt.title('Popular Contract Modes')
plt.ylabel('')
telco.groupby(['PaymentMethod','Contract'])['TotalCharges'].sum(level=[0,1])
# ## Gender balance is averagely equally spread out among our customers with males spending slightly more than females on all telco services.
telco.groupby(['gender'])['TotalCharges'].sum().reset_index()
# ## Among the payment methods available, the use of electronic check is the most popular. However, ther is also a significant and steady number of clients using the other payment channels available.
telco.groupby("PaymentMethod")["Churn"].count().plot.bar(rot=0, figsize=(14, 5))
# ### Majority of customers churn within the first couple of months (0-10 months) of subscribing to the service. This is valuable data to the telco provider because they can allocate more resources towards incentivizing clients to stay longer than just a few months. Thereafter, their tenure rate is fairly stable. There is also a high number of customers that remain loyal to their telco provider (70 months).
telco["tenure"].plot.hist(bins=40, edgecolor="k")
plt.xlabel('tenure')
# # Merging Datasets
mall.join([telco], how='inner', on=None)
| Kodhek/telco_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (cs7643-a2)
# language: python
# name: cs7643-a2
# ---
# ## 0. Download Data
# # cd data <br />
# wget https://os.zhdk.cloud.switch.ch/swift/v1/crowdai-public/spotify-sequential-skip-prediction-challenge/split-files/training_set_0.tar.gz <br />
# wget https://os.zhdk.cloud.switch.ch/swift/v1/crowdai-public/spotify-sequential-skip-prediction-challenge/split-files/training_set_9.tar.gz <br />
# tar -xf training_set_0.tar.gz && mv training_set training_set0 <br />
# tar -xf training_set_9.tar.gz && mv training_set training_set9 <br />
#
import pandas as pd
import glob
import math
import numpy as np
import matplotlib.pyplot as plt
# +
data_path = 'data/' # TODO: point this to your data folder
# submission_path = data_path + 'submissions/'
training_path = data_path + 'training_set0/'
testing_path = data_path + 'training_set9/'
train_input_logs = sorted(glob.glob(training_path + "log_0*.csv")) #Todo, if need >1 training tar.gz, this need to be modified
test_input_logs = sorted(glob.glob(testing_path + "log_9*.csv"))
# -
train_input_logs[:3]
test_input_logs[:3]
# ## 1. Visualize data
df = pd.read_csv(input_logs[0]) #only sample one file
print(df.shape)
print(df.columns)
df.head(3)
# ## 2. Exploratory analysis
# ### 2.1 Skip probability by feature
df.skip_2.value_counts()
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20, 16) , ncols=5, nrows=3)
for i, var in enumerate(['session_position','session_length','no_pause_before_play', 'short_pause_before_play',
'long_pause_before_play', 'hist_user_behavior_n_seekfwd',
'hist_user_behavior_n_seekback', 'hist_user_behavior_is_shuffle',
'hour_of_day', 'premium', 'context_type',
'hist_user_behavior_reason_start', 'hist_user_behavior_reason_end']):
r = i//5
c = i%5
dfg = df.groupby([var])['skip_2'].mean()
dfg.plot.bar(title=var, ylabel='skip rate', ax=axes[r,c])
# ### 2.2 Data frequency by feature
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20, 16) , ncols=5, nrows=3)
for i, var in enumerate(['session_position','session_length','no_pause_before_play', 'short_pause_before_play',
'long_pause_before_play', 'hist_user_behavior_n_seekfwd',
'hist_user_behavior_n_seekback', 'hist_user_behavior_is_shuffle',
'hour_of_day', 'premium', 'context_type',
'hist_user_behavior_reason_start', 'hist_user_behavior_reason_end']):
r = i//5
c = i%5
dfg = df.groupby([var])['skip_2'].count()
dfg.plot.bar(title=var, ylabel='data frequency', ax=axes[r,c], color='orange')
# Let's foucs on using the sequence only (namely using the columns 'session_id','session_position','session_length') as input first before incorperating these session descriptive vairables.
# Different sequence model has different ways incorperating session variables, let's first get a clean comparison of this model learns better representation of the sequence. plus the data is already very large.
# ## 3. Data preprocessing
# +
def get_input(x):
return x[:(len(x)//2)]
def get_output(x):
return x[(len(x)//2):]
def long_to_wide(input_logs, max_file=100, is_test=False):
"""
Aggregate session-track level data into session level.
input_logs: list of file names
max_file: (optional) Used to reduce size of final data frame size
is_test: if true, then split track_id and skip_2 into predicction input and groundtruth
"""
# combine csv from multiple files
dfs = []
for i,f in enumerate(input_logs[:max_file]):
print("reading %s th file %s"% (i, f))
dfi = pd.read_csv(f)
dfi = dfi[['session_id','skip_2','session_position','session_length']]
dfs.append(dfi)
df = pd.concat(dfs, axis=0, ignore_index=True)
# transform from long to wide
df['skip_2_bin'] = 1*df['skip_2']
df_wide = df[['session_id','skip_2_bin','session_length','track_id_clean']]\
.groupby(['session_id','session_length'])[['track_id_clean','skip_2_bin']]\
.agg({'track_id_clean':lambda x: list(x),
'skip_2_bin':lambda x: list(x),
})\
.reset_index()
# For rest set, the wide data needs to be split into input for the prediction and groundtruth.
if is_test:
df_wide['test_track_id_input'] = df_wide['track_id_clean'].apply(lambda x: get_input(x))
df_wide['test_track_id_groundtruth'] = df_wide['track_id_clean'].apply(lambda x: get_output(x))
df_wide['test_skip_2_input'] = df_wide['skip_2_bin'].apply(lambda x: get_input(x))
df_wide['test_skip_2_groundtruth'] = df_wide['skip_2_bin'].apply(lambda x: get_output(x))
return df_wide
# -
df_wide_train = long_to_wide(train_input_logs, max_file=2, is_test=False)
df_wide_test = long_to_wide(test_input_logs, max_file=2, is_test=True)
df_wide_train.head(3)
df_wide_test.head(3)
# ## 4. Mock data preparation and evaluation
# Evaluate on random prediction
# ### 4.1 skip rate mean accuracy
# +
def session_acc(prediction,groundtruth, top=20):
"""
For skip prediction, the mean of this metric is aligned with leader board metric
https://www.aicrowd.com/challenges/spotify-sequential-skip-prediction-challenge/leaderboards
Usage:
df_wide is session level data (as oppsed to df_long which is session_track level)
session_acc = df_wide.apply(lambda x: acc_one_session(x['prediction'],x['groundtruth']))
AA = np.mean(session_acc)
prediction: array of binary {0, 1} skip_2 prediction within the session
groundtruth: array of binary {0, 1} skip_2 groundtruth within the session
"""
if len(prediction)==0:
raise Exception('prediction length should be greater than 0')
elif len(prediction) != len(groundtruth):
raise Exception('prediction and groundtruth should have the same length')
session_acc = np.mean(1.0*(np.array(prediction[:top])==np.array(groundtruth[:top])))
return session_acc
print('session acc', session_acc([1,1,0], [1,0,0]),
'session acc@1', session_acc([1,1,0], [1,0,0], top=1))
# -
# ### 4.2 Sequential prediction accuracy
# Divide sequence in half (take floor if odd length), use first half as input, second half as prediction
# +
# @TODO define session_nDCG
# -
| DRL/model/exploratory_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 Spark - local
# language: python
# name: spark-3-python
# ---
# <i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
#
# <i>Licensed under the MIT License.</i>
# # Spark Collaborative Filtering (ALS) Deep Dive
# Spark MLlib provides a collaborative filtering algorithm that can be used for training a matrix factorization model, which predicts explicit or implicit ratings of users on items for recommendations.
#
# This notebook presents a deep dive into the Spark collaborative filtering algorithm.
# ## 1 Matrix factorization algorithm
#
# ### 1.1 Matrix factorization for collaborative filtering problem
#
# Matrix factorization is a common technique used in recommendation tasks. Basically, a matrix factorization algorithm tries to find latent factors that represent intrinsic user and item attributes in a lower dimension. That is,
#
# $$\hat r_{u,i} = q_{i}^{T}p_{u}$$
#
# where $\hat r_{u,i}$ is the predicted ratings for user $u$ and item $i$, and $q_{i}^{T}$ and $p_{u}$ are latent factors for item and user, respectively. The challenge to the matrix factorization problem is to find $q_{i}^{T}$ and $p_{u}$. This is achieved by methods such as matrix decomposition. A learning approach is therefore developed to converge the decomposition results close to the observed ratings as much as possible. Furthermore, to avoid overfitting issue, the learning process is regularized. For example, a basic form of such matrix factorization algorithm is represented as below.
#
# $$\min\sum(r_{u,i} - q_{i}^{T}p_{u})^2 + \lambda(||q_{i}||^2 + ||p_{u}||^2)$$
#
# where $\lambda$ is a the regularization parameter.
#
# In case explict ratings are not available, implicit ratings which are usually derived from users' historical interactions with the items (e.g., clicks, views, purchases, etc.). To account for such implicit ratings, the original matrix factorization algorithm can be formulated as
#
# $$\min\sum c_{u,i}(p_{u,i} - q_{i}^{T}p_{u})^2 + \lambda(||q_{i}||^2 + ||p_{u}||^2)$$
#
# where $c_{u,i}=1+\alpha r_{u,i}$ and $p_{u,i}=1$ if $r_{u,i}>0$ and $p_{u,i}=0$ if $r_{u,i}=0$. $r_{u,i}$ is a numerical representation of users' preferences (e.g., number of clicks, etc.).
#
# ### 1.2 Alternating Least Square (ALS)
#
# Owing to the term of $q_{i}^{T}p_{u}$ the loss function is non-convex. Gradient descent method can be applied but this will incur expensive computations. An Alternating Least Square (ALS) algorithm was therefore developed to overcome this issue.
#
# The basic idea of ALS is to learn one of $q$ and $p$ at a time for optimization while keeping the other as constant. This makes the objective at each iteration convex and solvable. The alternating between $q$ and $p$ stops when there is convergence to the optimal. It is worth noting that this iterative computation can be parallelised and/or distributed, which makes the algorithm desirable for use cases where the dataset is large and thus the user-item rating matrix is super sparse (as is typical in recommendation scenarios). A comprehensive discussion of ALS and its distributed computation can be found [here](http://stanford.edu/~rezab/classes/cme323/S15/notes/lec14.pdf).
# ## 2 Spark Mllib implementation
#
# The matrix factorization algorithm is available as `ALS` module in [Spark `ml`](https://spark.apache.org/docs/latest/ml-collaborative-filtering.html) for DataFrame or [Spark `mllib`](https://spark.apache.org/docs/latest/mllib-collaborative-filtering.html) for RDD.
#
# * The uniqueness of ALS implementation is that it distributes the matrix factorization model training by using "Alternating Least Square" method.
# * In the training method, there are parameters that can be selected to control the model performance.
# * Both explicit and implicit ratings are supported by Spark ALS model.
# ## 3 Spark ALS based MovieLens recommender
#
# In the following code, the MovieLens-100K dataset is used to illustrate the ALS algorithm in Spark.
# **Note**: This notebook requires a PySpark environment to run properly. Please follow the steps in [SETUP.md](https://github.com/Microsoft/Recommenders/blob/master/SETUP.md#dependencies-setup) to install the PySpark environment.
# +
# set the environment path to find Recommenders
import sys
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import sys
import pandas as pd
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml.recommendation import ALS
import pyspark.sql.functions as F
from pyspark.sql.functions import col
from pyspark.ml.tuning import CrossValidator
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import FloatType, IntegerType, LongType
from recommenders.datasets import movielens
from recommenders.utils.spark_utils import start_or_get_spark
from recommenders.evaluation.spark_evaluation import SparkRankingEvaluation, SparkRatingEvaluation
from recommenders.tuning.parameter_sweep import generate_param_grid
from recommenders.datasets.spark_splitters import spark_random_split
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("PySpark version: {}".format(pyspark.__version__))
# -
# Data column names
# +
COL_USER = "UserId"
COL_ITEM = "MovieId"
COL_RATING = "Rating"
COL_PREDICTION = "prediction"
COL_TIMESTAMP = "Timestamp"
schema = StructType(
(
StructField(COL_USER, IntegerType()),
StructField(COL_ITEM, IntegerType()),
StructField(COL_RATING, FloatType()),
StructField(COL_TIMESTAMP, LongType()),
)
)
# -
# Model hyper parameters - these parameters are selected with reference to the benchmarking results [here](http://mymedialite.net/examples/datasets.html).
RANK = 10
MAX_ITER = 15
REG_PARAM = 0.05
# Number of recommended items
K = 10
# Initialize a Spark session.
spark = start_or_get_spark("ALS Deep Dive", memory="16g")
# ### 3.1 Load and prepare data
# Data is read from csv into a Spark DataFrame.
dfs = movielens.load_spark_df(spark=spark, size="100k", schema=schema)
dfs.show(5)
# Data is then randomly split by 80-20 ratio for training and testing.
dfs_train, dfs_test = spark_random_split(dfs, ratio=0.75, seed=42)
# ### 3.2 Train a movielens model
# It is worth noting that Spark ALS model allows dropping cold users to favor a robust evaluation with the testing data. In case there are cold users, Spark ALS implementation allows users to drop cold users in order to make sure evaluations on the prediction results are sound.
# +
als = ALS(
maxIter=MAX_ITER,
rank=RANK,
regParam=REG_PARAM,
userCol=COL_USER,
itemCol=COL_ITEM,
ratingCol=COL_RATING,
coldStartStrategy="drop"
)
model = als.fit(dfs_train)
# -
# ### 3.3 Prediction with the model
#
# The trained model can be used to predict ratings with a given test data.
dfs_pred = model.transform(dfs_test).drop(COL_RATING)
# With the prediction results, the model performance can be evaluated.
# +
evaluations = SparkRatingEvaluation(
dfs_test,
dfs_pred,
col_user=COL_USER,
col_item=COL_ITEM,
col_rating=COL_RATING,
col_prediction=COL_PREDICTION
)
print(
"RMSE score = {}".format(evaluations.rmse()),
"MAE score = {}".format(evaluations.mae()),
"R2 score = {}".format(evaluations.rsquared()),
"Explained variance score = {}".format(evaluations.exp_var()),
sep="\n"
)
# -
# Oftentimes ranking metrics are also of interest to data scientists. Note usually ranking metrics apply to the scenario of recommending a list of items. In our case, the recommended items should be different from those that have been rated by the users.
# +
# Get the cross join of all user-item pairs and score them.
users = dfs_train.select('UserId').distinct()
items = dfs_train.select('MovieId').distinct()
user_item = users.crossJoin(items)
dfs_pred = model.transform(user_item)
# Remove seen items.
dfs_pred_exclude_train = dfs_pred.alias("pred").join(
dfs_train.alias("train"),
(dfs_pred['UserId'] == dfs_train['UserId']) & (dfs_pred['MovieId'] == dfs_train['MovieId']),
how='outer'
)
dfs_pred_final = dfs_pred_exclude_train.filter(dfs_pred_exclude_train["train.Rating"].isNull()) \
.select('pred.' + 'UserId', 'pred.' + 'MovieId', 'pred.' + "prediction")
dfs_pred_final.show()
# +
evaluations = SparkRankingEvaluation(
dfs_test,
dfs_pred_final,
col_user=COL_USER,
col_item=COL_ITEM,
col_rating=COL_RATING,
col_prediction=COL_PREDICTION,
k=K
)
print(
"Precision@k = {}".format(evaluations.precision_at_k()),
"Recall@k = {}".format(evaluations.recall_at_k()),
"NDCG@k = {}".format(evaluations.ndcg_at_k()),
"Mean average precision = {}".format(evaluations.map_at_k()),
sep="\n"
)
# -
# ### 3.4 Fine tune the model
#
# Prediction performance of a Spark ALS model is often affected by the parameters
#
# |Parameter|Description|Default value|Notes|
# |-------------|-----------------|------------------|-----------------|
# |`rank`|Number of latent factors|10|The larger the more intrinsic factors considered in the factorization modeling.|
# |`regParam`|Regularization parameter|1.0|The value needs to be selected empirically to avoid overfitting.|
# |`maxIters`|Maximum number of iterations|10|The more iterations the better the model converges to the optimal point.|
#
# It is always a good practice to start model building with default parameter values and then sweep the parameter in a range to find the optimal combination of parameters. The following parameter set is used for training ALS models for comparison study purposes.
param_dict = {
"rank": [10, 15, 20],
"regParam": [0.001, 0.1, 1.0]
}
# Generate a dictionary for each parameter combination which can then be fed into model training.
param_grid = generate_param_grid(param_dict)
# Train models with parameters specified in the parameter grid. Evaluate the model with, for example, the RMSE metric, and then record the metrics for visualization.
# +
rmse_score = []
for g in param_grid:
als = ALS(
userCol=COL_USER,
itemCol=COL_ITEM,
ratingCol=COL_RATING,
coldStartStrategy="drop",
**g
)
model = als.fit(dfs_train)
dfs_pred = model.transform(dfs_test).drop(COL_RATING)
evaluations = SparkRatingEvaluation(
dfs_test,
dfs_pred,
col_user=COL_USER,
col_item=COL_ITEM,
col_rating=COL_RATING,
col_prediction=COL_PREDICTION
)
rmse_score.append(evaluations.rmse())
rmse_score = [float('%.4f' % x) for x in rmse_score]
rmse_score_array = np.reshape(rmse_score, (len(param_dict["rank"]), len(param_dict["regParam"])))
# -
rmse_df = pd.DataFrame(data=rmse_score_array, index=pd.Index(param_dict["rank"], name="rank"),
columns=pd.Index(param_dict["regParam"], name="reg. parameter"))
fig, ax = plt.subplots()
sns.heatmap(rmse_df, cbar=False, annot=True, fmt=".4g")
# The calculated RMSE scores can be visualized to comparatively study how model performance is affected by different parameters.
# It can be seen from this visualization that RMSE first decreases and then increases as rank increases, due to overfitting. When the rank equals 20 and the regularization parameter equals 0.1, the model achieves the lowest RMSE score.
# ### 3.5 Top K recommendation
# #### 3.5.1 Top k for all users (items)
dfs_rec = model.recommendForAllUsers(10)
dfs_rec.show(10)
# #### 3.5.2 Top k for a selected set of users (items)
# +
users = dfs_train.select(als.getUserCol()).distinct().limit(3)
dfs_rec_subset = model.recommendForUserSubset(users, 10)
# -
dfs_rec_subset.show(10)
# #### 3.5.3 Run-time considerations for top-k recommendations
#
# It is worth noting that usually computing the top-k recommendations for all users is the bottleneck of the whole pipeline (model training and scoring) of an ALS based recommendation system. This is because
# * Getting the top k from all user-item pairs requires a cross join which is usually very computationally expensive.
# * Inner products of user-item pairs are calculated individually instead of leveraging matrix block multiplication features which are available in certain contemporary computing acceleration libraries (e.g., BLAS).
#
# More details about possible optimizations of the top k recommendations in Spark can be found [here](https://engineeringblog.yelp.com/2018/05/scaling-collaborative-filtering-with-pyspark.html).
# cleanup spark instance
spark.stop()
# ## References
# 1. <NAME>, <NAME>, and <NAME>, "Matrix Factorization Techniques for Recommender Systems
# ", ACM Computer, Vol. 42, Issue 8, pp 30-37, Aug., 2009.
# 2. <NAME>, <NAME>, and <NAME>, "Collaborative Filtering for Implicit Feedback Datasets
# ", Proc. IEEE ICDM, 2008, Dec, Pisa, Italy.
# 3. Apache Spark. url: https://spark.apache.org/docs/latest/ml-collaborative-filtering.html
# 4. Seaborn. url: https://seaborn.pydata.org/
# 5. Scaling collaborative filtering with PySpark. url: https://engineeringblog.yelp.com/2018/05/scaling-collaborative-filtering-with-pyspark.html
# 6. Matrix Completion via Alternating Least Square (ALS). url: http://stanford.edu/~rezab/classes/cme323/S15/notes/lec14.pdf
| examples/02_model_collaborative_filtering/als_deep_dive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Analysis of two oyster samples where Lotterhos did methylRAD
# The M2 and M3 samples are here:
#
# http://owl.fish.washington.edu/nightingales/C_gigas/9_GATCAG_L001_R1_001.fastq.gz
# http://owl.fish.washington.edu/nightingales/C_gigas/10_TAGCTT_L001_R1_001.fastq.gz
#
#
bsmaploc="/Applications/bioinfo/BSMAP/bsmap-2.74/"
# ## Genome version
# !curl \
# ftp://ftp.ensemblgenomes.org/pub/release-32/metazoa/fasta/crassostrea_gigas/dna/Crassostrea_gigas.GCA_000297895.1.dna_sm.toplevel.fa.gz \
# > /Volumes/caviar/wd/data/Crassostrea_gigas.GCAz_000297895.1.dna_sm.toplevel.fa.gz
# !curl ftp://ftp.ensemblgenomes.org/pub/release-32/metazoa/fasta/crassostrea_gigas/dna/CHECKSUMS
# !ls /Volumes/caviar/wd/data/
# !md5 /Volumes/caviar/wd/data/Crassostrea_gigas.GCAz_000297895.1.dna_sm.toplevel.fa.gz
# cd /Volumes/caviar/wd/
# mkdir $(date +%F)
# ls
# ls /Volumes/web/nightingales/C
# !curl \
# http://owl.fish.washington.edu/nightingales/C_gigas/9_GATCAG_L001_R1_001.fastq.gz \
# > /Volumes/caviar/wd/2016-10-11/9_GATCAG_L001_R1_001.fastq.gz
# !curl \
# http://owl.fish.washington.edu/nightingales/C_gigas/10_TAGCTT_L001_R1_001.fastq.gz \
# > /Volumes/caviar/wd/2016-10-11/10_TAGCTT_L001_R1_001.fastq.gz
# cd 2016-10-11/
# !cp 9_GATCAG_L001_R1_001.fastq.gz M2.fastq.gz
# !cp 10_TAGCTT_L001_R1_001.fastq.gz M3.fastq.gz
for i in ("M2","M3"):
# !{bsmaploc}bsmap \
# -a {i}.fastq.gz \
# -d ../data/Crassostrea_gigas.GCAz_000297895.1.dna_sm.toplevel.fa \
# -o bsmap_out_{i}.sam \
# -p 6
for i in ("M2","M3"):
# !python {bsmaploc}methratio.py \
# -d ../data/Crassostrea_gigas.GCAz_000297895.1.dna_sm.toplevel.fa \
# -u -z -g \
# -o methratio_out_{i}.txt \
# -s {bsmaploc}samtools \
# bsmap_out_{i}.sam \
# +
# !curl https://raw.githubusercontent.com/che625/olson-ms-nb/master/scripts/mr3x.awk \
# > /Users/sr320/git-repos/sr320.github.io/jupyter/scripts/mr3x.awk
# !curl https://raw.githubusercontent.com/che625/olson-ms-nb/master/scripts/mr_gg.awk.sh \
# > /Users/sr320/git-repos/sr320.github.io/jupyter/scripts/mr_gg.awk.sh
# -
#first methratio files are converted to filter for CG context, 3x coverage (mr3x.awk), and reformatting (mr_gg.awk.sh).
#due to issue passing variable to awk, simple scripts were used (included in repository)
for i in ("M2","M3"):
# !echo {i}
# !grep "[A-Z][A-Z]CG[A-Z]" <methratio_out_{i}.txt> methratio_out_{i}CG.txt
# !awk -f /Users/sr320/git-repos/sr320.github.io/jupyter/scripts/mr3x.awk methratio_out_{i}CG.txt \
# > mr3x.{i}.txt
# !awk -f /Users/sr320/git-repos/sr320.github.io/jupyter/scripts/mr_gg.awk.sh \
# mr3x.{i}.txt > mkfmt_{i}.txt
#first methratio files are converted to filter for CG context, 3x coverage (mr3x.awk), and reformatting (mr_gg.awk.sh).
#due to issue passing variable to awk, simple scripts were used (included in repository)
for i in ("M2","M3"):
# !echo {i}
# !grep -i "[A-Z][A-Z]CG[A-Z]" <methratio_out_{i}.txt> methratio_out_{i}CGi.txt
# !awk -f /Users/sr320/git-repos/sr320.github.io/jupyter/scripts/mr3x.awk methratio_out_{i}CGi.txt \
# > mr3xi.{i}.txt
# !awk -f /Users/sr320/git-repos/sr320.github.io/jupyter/scripts/mr_gg.awk.sh \
# mr3xi.{i}.txt > mkfmti_{i}.txt
# +
#maybe we need to ignore case
# -
# !md5 mkfmt_M2.txt mkfmti_M2.txt | head
# +
#nope
# -
# !head -100 mkfmt_M2.txt
# # Products
# cd /Users/sr320/git-repos/sr320.github.io/jupyter
# mkdir analyses
# mkdir analyses/$(date +%F)
for i in ("M2","M3"):
# !cp /Volumes/caviar/wd/2016-10-11/mkfmt_{i}.txt analyses/$(date +%F)/mkfmt_{i}.txt
# !head analyses/$(date +%F)/*
# urls
#
# ```
# https://raw.githubusercontent.com/sr320/sr320.github.io/master/jupyter/analyses/2016-10-11/mkfmt_M2.txt
#
# https://raw.githubusercontent.com/sr320/sr320.github.io/master/jupyter/analyses/2016-10-11/mkfmt_M3.txt
# ```
| jupyter/Cgigas/.ipynb_checkpoints/Lotterhos BS samples-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Clara Train SDK Hyper Parameter Tuning Using AutoML
#
# By the end of this notebook you should be able to:
# 1. Run AutoML and use it as a scheduler for hyperparameter tuning to search:
# 1. Network architecture.
# 2. loss functions
# 3. Optimizers
# 4. Transformations
# 2. Run AutoML with reinforcement learning to search float parameters such as:
# 1. Learning rate
# 2. Transformation probabilities and ranges
#
# + [markdown] pycharm={"metadata": false}
# ## Prerequisites
# - Familiar with Clara train main concepts. See [Getting Started Notebook](../GettingStarted/GettingStarted.ipynb)
# - Nvidia GPU with 8Gb of memory
# - Have multiple GPUs is preferred
#
# + [markdown] pycharm={"metadata": false}
# ### Resources
# It might be helpful to watch the GTC Digital 2020 presentation on Clara Train SDK
# - [S22563](https://developer.nvidia.com/gtc/2020/video/S22563)
# Clara train Getting started: cover basics, BYOC, AIAA, AutoML
#
# + [markdown] pycharm={"metadata": false}
# ## Dataset
# This notebook uses a sample dataset (ie. a single image volume of the spleen dataset) provided in the package to train a small neural network for a few epochs.
# This single file is duplicated 32 times for the training set and 9 times for validation to mimic the full spleen data set.
#
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# # Lets get started
# It might be helpful to check the available NVIDIA GPU resources in the docker by running the cell below
# + pycharm={"metadata": false, "name": "#%%\n"}
# following command should show all gpus available
# !nvidia-smi
# + [markdown] pycharm={"metadata": false}
# This cell we define the root path for AutoML
# + pycharm={"metadata": false, "name": "#%%\n"}
MMAR_ROOT="/claraDevDay/AutoML/"
print ("setting MMAR_ROOT=",MMAR_ROOT)
# %ls $MMAR_ROOT
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# This cell defines some helper functions that we will use throughout the notebook
# + pycharm={"metadata": false, "name": "#%%\n"}
# !chmod 777 $MMAR_ROOT/commands/*
def runAutoML(configPath):
# %cd $MMAR_ROOT/commands
# ! ./automl.sh $configPath
def printFile(filePath,lnSt,lnOffset):
print ("showing ",str(lnOffset)," lines from file ",filePath, "starting at line",str(lnSt))
lnOffset=lnSt+lnOffset
!< $filePath head -n "$lnOffset" | tail -n +"$lnSt"
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# # AutoML Main Components
# - [automl.sh](./commands/automl.sh) is where we configure the number of workers and which gpus to use.
# In this notebook we will use very small neural networks that will consume <2GB of GPU memory.
# We are setting the number of workers to 8 and they will all use GPU 0 by specifying
# `workers=0:0:0:0:0:0:0:0`.
# If you have multiple GPU and would like to have more workers you could change this to:
# - `workers=fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b` this will let workers 0-3 use GPU 0 while workers 4-7 will use GPU 1
# - `workers=0:1:2:3` this will let 4 workers each use a GPU
# - `workers=0,1:2,3` this will let 2 workers each use a 2 GPUs
#
# lets check out the contents of automl.sh
# + pycharm={"metadata": false, "name": "#%%\n"}
configFile=MMAR_ROOT+"/commands/automl.sh"
printFile(configFile,0,30)
# + [markdown] pycharm={"metadata": false}
# - [config_automl.json](./config/config_automl.json) is where any custom controls would be defined
# along with maximum number of MMARs to search and the number of MMARs to keep
# + pycharm={"metadata": false, "name": "#%%\n"}
configFile=MMAR_ROOT+"/config/config_automl.json"
printFile(configFile,0,30)
# + [markdown] pycharm={"metadata": false}
# - [automl_train_round.sh](./commands/automl_train_round.sh) is the script which will be triggered for each job
#
# + pycharm={"metadata": false, "name": "#%%\n"}
configFile=MMAR_ROOT+"/commands/automl_train_round.sh"
printFile(configFile,30,30)
# + [markdown] pycharm={"metadata": false}
# # Using Auto ML for Hyper Parameter Enum Search
#
# You can use AutoML as a scheduler to run multiple configurations using the Enum options.
#
# + [markdown] pycharm={"metadata": false}
# ### Example 1: Network Parameter Search
# In this example we are using the Enum option to search different network
# architecture arguments for the SegResnet neural network using [trn_autoML_Enum.json](./config/trn_autoML_Enum.json):
# - Blocks_down, blocks_up
# - Init_filters
# - Final_activation
#
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets see how to define this in the config
configFile=MMAR_ROOT+"/config/trn_autoML_Enum.json"
printFile(configFile,46,28)
# we also seach different lr policies
printFile(configFile,32,13)
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# let us run it a see the generated configurations
#
# + pycharm={"metadata": false, "name": "#%% \n"}
runAutoML("trn_autoML_Enum")
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# AutoML now has created a sub-folder under automl/ and created 8 folders for all combinations of our experiments.
# Let us run the cell below and see the folders created
# + pycharm={"metadata": false, "name": "#%%\n"}
# ! ls -la $MMAR_ROOT/automl/trn_autoML_Enum
# + [markdown] pycharm={"metadata": false}
# Let us now examine each config generated from the a couple of experiments
# + pycharm={"metadata": false, "name": "#%%\n"}
exp_name="trn_autoML_Enum"
for i in range(1,3):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,32,5)
for i in range(1,6,2):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,38,12)
# + [markdown] pycharm={"metadata": false}
# ### Example 2: Loss and optimizer Search
# In this Example we are using the Enum option to search different losses
# using [trn_autoML_Loss_Optimizer.json](./config/trn_autoML_Loss_Optimizer.json).
# In order to do this we
# - use first level search to create an binary Enum tag set to `[true, false]`
# - apply the first tag to loss
# - apply the second tag to optimizer
#
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets see how to define this in the config
configFile=MMAR_ROOT+"/config/trn_autoML_Loss_Optimizer.json"
# top level binary Enum
printFile(configFile,8,14)
# apply to loss
printFile(configFile,25,5)
# apply to optimizer
printFile(configFile,32,5)
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets run it a see configurations generated
runAutoML("trn_autoML_Loss_Optimizer")
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# Lets run cell below and see the folders created
# + pycharm={"metadata": false, "name": "#%%\n"}
# ! ls -la $MMAR_ROOT/automl/trn_autoML_Loss_Optimizer
# + [markdown] pycharm={"metadata": false}
# Lets now examine each config generated from the a couple of experiments
# + pycharm={"metadata": false, "name": "#%%\n"}
exp_name="trn_autoML_Loss_Optimizer"
for i in [1,2]:
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,9,16)
for i in [1,3]:
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,25,12)
# + [markdown] pycharm={"metadata": false}
# # Using Auto ML for float parameter using reinforcement learning (RL)
# So far we have used only the Enum option of AutoML. But what if we have a float value?
# We now will set a range in the search space and use reinforcement learning (RL).
#
# + [markdown] pycharm={"metadata": false}
# ### Example 3: Learning Rate and Learning Policy (RL) Search
# In this Example we will use [trn_autoML_LR_LP.json](./config/trn_autoML_LR_LP.json).
# In order to do this we
# - Use first level search learning rate
# - Search for learning policy
#
# Note that this will only use 1 worker since we will run a model wait for result then trigger another.
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets see how to define this in the config
configFile=MMAR_ROOT+"/config/trn_autoML_LR_LP.json"
# top level search for learning rate
printFile(configFile,8,10)
# search for learning policy
printFile(configFile,40,10)
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets run it a see configurations generated
runAutoML("trn_autoML_LR_LP")
# + [markdown] pycharm={"metadata": false}
# Lets run cell below and see the folders created
# + pycharm={"metadata": false, "name": "#%%\n"}
# ! ls -la $MMAR_ROOT/automl/trn_autoML_LR_LP
# + [markdown] pycharm={"metadata": false}
# Lets now examine each config generated from the a couple of experiments
# + pycharm={"metadata": false, "name": "#%%\n"}
exp_name="trn_autoML_LR_LP"
for i in range(2,5):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,5,2)
for i in range(2,6,1):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,32,4)
# + [markdown] pycharm={"metadata": false}
#
#
# ### Example 4: Transformation Search
# In this Example we are using Enum option to search different transforms
# using [trn_autoML_Transform.json](./config/trn_autoML_Transform.json).
# Here, we will use Enum to enable/disable `AddGaussianNoise` transform and also use top level Enum
# to link 2 transforms to be enabled/disabled together.
# In order to do this we
# - Use first level search to create an binary Enum tag set to `[true, false]` and
# tag it with args `myTransformDisable`
# - Apply this tag `myTransformDisable` to transforms `RandomSpatialFlip`
# - Apply this tag `myTransformDisable` to transforms `ScaleShiftIntensity`
# - Add `search` section to `AddGaussianNoise` with a `"args": ["@disabled"]` and `"targets": [[true],[false]]`
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets see how to define this in the config
configFile=MMAR_ROOT+"/config/trn_autoML_Transform.json"
# top level binary Enum
printFile(configFile,8,14)
# apply to 1st transform
printFile(configFile,115,10)
# apply to 2nd transform
printFile(configFile,126,10)
# Add separate search for AddGaussianNoise transform
printFile(configFile,140,12)
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets run it a see configurations generated
runAutoML("trn_autoML_Transform")
# + [markdown] pycharm={"metadata": false}
# Let us run cell below and see the folders created
# + pycharm={"metadata": false, "name": "#%%\n"}
# ! ls -la $MMAR_ROOT/automl/trn_autoML_Transform
# + [markdown] pycharm={"metadata": false}
# Let us now examine each config generated from the a couple of experiments
# + pycharm={"metadata": false, "name": "#%%\n"}
exp_name="trn_autoML_Transform"
for i in range(1,5):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,125,16)
for i in range(1,4,2):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,153,7)
# + [markdown] pycharm={"metadata": false}
# ### Example 5: Transform Reinforcement Learning (RL) Search with linking
# In this Example we are using RL option to search float parameter as well as linking these parameters between 2 transforms
# using [trn_autoML_TransformProb.json](./config/trn_autoML_TransformProb.json).
# In order to do this we
# - Set a search in the first transformation `ScaleIntensityOscillation`
# - Give this transform `ScaleIntensityOscillation` an alias like `myProb`
# - Apply this alias to the second transform `AddGaussianNoise` using the `apply` field
#
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets see how to define this in the config
configFile=MMAR_ROOT+"/config/trn_autoML_TransformProb.json"
# Search first transformation ScaleIntensityOscillation and give a tag
printFile(configFile,120,15)
# apply to AddGaussianNoise transformation
printFile(configFile,134,9)
# + pycharm={"metadata": false, "name": "#%%\n"}
# lets run it a see configurations generated
runAutoML("trn_autoML_TransformProb")
# + [markdown] pycharm={"metadata": false}
# Let us run cell below and see the folders created
# + pycharm={"metadata": false, "name": "#%%\n"}
# ! ls -la $MMAR_ROOT/automl/trn_autoML_TransformProb
# + [markdown] pycharm={"metadata": false}
# Let us now examine each config generated from the a couple of experiments
# + pycharm={"metadata": false, "name": "#%%\n"}
exp_name="trn_autoML_TransformProb"
for i in range(2,7):
configFile=MMAR_ROOT+"/automl/"+exp_name+"/W"+str(i)+"_1_J"+str(i)+"/config/config_train.json"
printFile(configFile,144,16)
# + [markdown] pycharm={"metadata": false}
# # BYO AutoML logic
# As you could BYOC for regular components in train configuration,
# you could also bring your own AutoML logic.
# To do this, you simply have to follow the programmers guide and implement the APIs.
# As an example we are providing a simple controller [myAutoMLController.py](./BYOC/myAutoMLController.py).
# you should then point to in the [config_automl.json](./config/config_automl.json).
# Unfortunately you can't overwrite that name so we have provided it in [config_automl_.json](./config/config_automl_.json).
#
# + [markdown] pycharm={"metadata": false}
# # Exercise
# 1. You can use BYO `ScaleIntensityRange` transformation created in Example 1 in [BYOC notebook](../GettingStarted/BYOC.ipynb)
# to do a search on BYO Transform. _Hint: you must use ref in the validation transforms to use the same values as in the training transforms_
# 2. You should change different parameters of automl and do more search
# 3. You can now change / rename the config_automl.json to point to the custom controller and try using it.
#
| NoteBooks/AutoML/AutoML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment
# ## Stability (deterministic evolution)
# - Code for the hopfield model <br>
# find a formula for the patterns 1<<p<<N;
# - Choose the coupling with patterns;
# - Test the stability given the deterministic dynamics;
#
# The key idea is to show the stability of the dynamics: starting from a $\xi_i$ you end up in the same $\xi_i$
# ## Corruption (deterministic evolution)
# Starting from a black/white image (a pattern) add some random noise: flip a pixel with probability $q$.
# - What happens when we increase $q$? Which is the limit in which the pattern is recognisible?
# - How many time does the reconstruction takes? How does it scale with $q$?
# ## Random evolution
# With the metropolis algorithm perform Montecarlo simulations of the system. <br>
# Repeat all the previous steps.<br>
# Remember the metropolis updtate rule: the probability of accept a spin flip is:
# $$
# p = min(1, e^{-\beta \Delta\mathcal{H}})
# $$
# where $\Delta\mathcal{H}$ is the difference between the new and the old hamiltonian
# ## Mnist dataset
# If all goes well try to implement handwriting recognition. <br>
# Good luck.
| Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='FF6F1F'>Case Gol</font>
#
# ## <font color='FF6F1F'>Respostas às perguntas de interesse</font>
# # Tabela de conteúdo
#
# [1. Informação básica](#1.-Informação-básica)
#
# [2. Bibliotecas](#2.-Bibliotecas)
#
# [3. Dados](#3.-Dados)
#
# [4. Perguntas](#4.-Perguntas)
#
# * [1. Faça um ranking para o número total de PAX por dia da semana.](#1.-Faça-um-ranking-para-o-número-total-de-PAX-por-dia-da-semana.)
#
# * [2. Qual a correlação de sábado e domingo somados com o total de RPK?](#2.-Qual-a-correlação-de-sábado-e-domingo-somados-com-o-total-de-RPK?)
#
# * [3. Qual a média de ‘Monetário’ por mês por Canal? E a mediana?](#3.-Qual-a-média-de-‘Monetário’-por-mês-por-Canal?-E-a-mediana?)
#
# * [4. Crie um forecast de PAX por ‘Local de Venda’ para os próximos 15 dias a contar da última data de venda.](#4.-Crie-um-forecast-de-PAX-por-‘Local-de-Venda’-para-os-próximos-15-dias-a-contar-da-última-data-de-venda.)
#
# * [5. Supondo que você precisa gerar um estudo para a área responsável, com base em qualquer modelo ou premissa, qual ‘Local de Venda’ você considera mais crítico. Por quê?](#5.-Supondo-que-você-precisa-gerar-um-estudo-para-a-área-responsável,-com-base-em-qualquer-modelo-ou-premissa,-qual-‘Local-de-Venda’-você-considera-mais-crítico.-Por-quê?)
#
# * [6. Criar modelo relacionando o comportamento de venda com variáveis não apresentadas nos dados (Ex: PIB, Dolar, etc.)](#6.-Criar-modelo-relacionando-o-comportamento-de-venda-com-variáveis-não-apresentadas-nos-dados-(Ex:-PIB,-Dolar,-etc.))
#
# ## 1. Informação básica
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# **Fonte:** Gol
#
# **Período em análise:** de 2016-11-01 a 2017-04-01.
#
# **Descrição das colunas:**
#
# | Variável | Descrição | Tipo |
# |:---|:---|:---|
# | Data Venda | Data (AAAA-MM-DD) | Data |
# | Canal de Venda | Canais de venda. | Nominal |
# | Local de Venda | Locais de venda. | Nominal |
# | PAX | Total de passageiros. | Discreto |
# | Monetário Vendido | Valores da venda. | Contínuo |
# | RPK | Indicador diretamente relacionado com o número de PAX. | Discreto |
# ## 2. Bibliotecas
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
import pandas as pd # dataframe
import numpy as np # numeric
import seaborn as sns # graph
from matplotlib import pyplot as plt # plot
from pylab import rcParams # figsize
# ## 3. Dados
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# suprime a notação científica
pd.set_option('display.float_format', lambda x: '%.4f' % x)
df = pd.read_excel('case_analytics-ds.xlsx', sheet_name='Dados')
df.sample(5)
df.info()
print(f'O dataframe possui {df.shape[0]} linhas e {df.shape[1]} colunas.')
print(f"Período em análise: de {df['Data Venda'].min()} a {df['Data Venda'].max()}.")
# Dados faltantes
total = df.isnull().sum().sort_values(ascending=False)
porcentagem = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
faltante = pd.concat([total, porcentagem], axis=1, keys=['Total', 'Porcentagem'])
faltante.head(45)
# * Não há dados faltantes.
# lista dos canais de venda
df['Canal de Venda'].unique().tolist()
# lista dos locais de venda
df['Local de Venda'].unique().tolist()
df.describe()
# ## 4. Perguntas
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# ### 1. Faça um ranking para o número total de PAX por dia da semana.
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# cópia do dataframe
df_ = df.copy()
# cria dia da semana
df_['dia_semana'] = df_['Data Venda'].dt.day_name()
# en --> pt
df_.loc[df_['dia_semana'] == 'Monday', 'dia_semana'] = 'segunda-feira'
df_.loc[df_['dia_semana'] == 'Tuesday', 'dia_semana'] = 'terça-feira'
df_.loc[df_['dia_semana'] == 'Wednesday', 'dia_semana'] = 'quarta-feira'
df_.loc[df_['dia_semana'] == 'Thursday', 'dia_semana'] = 'quinta-feira'
df_.loc[df_['dia_semana'] == 'Friday', 'dia_semana'] = 'sexta-feira'
df_.loc[df_['dia_semana'] == 'Saturday', 'dia_semana'] = 'sábado'
df_.loc[df_['dia_semana'] == 'Sunday', 'dia_semana'] = 'domingo'
df_.sample(5)
df_.groupby(by='dia_semana')['PAX'].sum().sort_values(ascending=False)
# | Dia da semana | Total de passageiros |
# |:--|:--:|
# | quarta-feira | 293025 |
# | quinta-feira | 258488 |
# | sexta-feira | 255625 |
# | sábado | 253467 |
# | domingo | 241620 |
# | segunda-feira| 236316 |
# | terça-feira | 219802 |
# * O dia com maior número de passageiros, em todo período, é quarta-feira.
# ### 2. Qual a correlação de sábado e domingo somados com o total de RPK?
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# dia da semana: segunda-feira == 0; domingo == 6
df_['dia_semana_num'] = df_['Data Venda'].dt.dayofweek
df_.sample(3)
df_.groupby('dia_semana_num').agg({'RPK': ['sum']})
sabado_RPK = df_[['dia_semana_num','RPK']].query('`dia_semana_num` == 5').sum()
sabado_RPK
domingo_RPK = df_[['dia_semana_num','RPK']].query('`dia_semana_num` == 6').sum()
domingo_RPK
total_RPK = df_['RPK'].sum()
total_RPK
RPK_sab_dom = (sabado_RPK[1] + domingo_RPK[1]) / total_RPK
# +
labels = df_['dia_semana'].value_counts(sort = True).index
sizes = df_['dia_semana'].value_counts(sort = True)
colors = ["navajowhite", "orange", "papayawhip", "darkorange", "yellow", "gold", "coral"]
rcParams['figure.figsize'] = 5, 5
# Plot
plt.pie(sizes, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=270,)
plt.title('Porcentagem de cada dia da semana')
plt.show()
# +
fig_dims = (8, 4)
fig, ax = plt.subplots(figsize=fig_dims)
sns.barplot(x='dia_semana', y='RPK', data=df_)
# -
print(f'Sábado e domingo representam 28.3% dos dias registrados e {round(RPK_sab_dom * 100, 2)}% do total RPK.')
# ### 3. Qual a média de ‘Monetário’ por mês por Canal? E a mediana?
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# coluna 'mês'
df_['mês'] = df['Data Venda'].dt.month
df_.sample(5)
df_.groupby(['Canal de Venda', 'mês']).agg({'Monetário Vendido': ['mean', 'median']})
# ### 4. Crie um forecast de PAX por ‘Local de Venda’ para os próximos 15 dias a contar da última data de venda.
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# seleção das colunas
df_ts = df_[['Data Venda', 'PAX']].copy()
df_ts.sample(5)
df_ts.dtypes
# define data como index
ts = df_ts.set_index('Data Venda')
ts.head()
ts.index
# adiciona colunas
ts['ano'] = ts.index.year
ts['mes'] = ts.index.month
ts['dia_semana'] = ts.index.weekday
# Display a random sampling of 5 rows
ts.sample(5, random_state=0)
ts.loc['2016-11']
# +
#sns.set(rc={'figure.figsize':(18, 8)})
# -
# gráfico de linha da série de tempo completa
ts['PAX'].plot(linewidth=0.5);
cols_plot = ['PAX']
axes = ts[cols_plot].plot(marker='.', alpha=0.5, linestyle='None', figsize=(11, 9), subplots=True)
for ax in axes:
ax.set_ylabel('PAX Total')
# * Há certa sazonalidade, alguns períodos se repetindo apesar de alguns picos em algumas datas (provável influência de feriados).
ax = ts.loc['2016', 'PAX'].plot()
ax.set_ylabel('PAX')
# pico em 2016-11
ax = ts.loc['2016-11', 'PAX'].plot(marker='o', linestyle='-')
ax.set_ylabel('PAX')
# * Véspera de feriado.
ax = ts.loc['2017', 'PAX'].plot()
ax.set_ylabel('PAX')
# pico em 2017-01
ax = ts.loc['2017-01', 'PAX'].plot(marker='o', linestyle='-')
ax.set_ylabel('PAX')
# * Pico em 2017-01-09.
# ### 5. Supondo que você precisa gerar um estudo para a área responsável, com base em qualquer modelo ou premissa, qual ‘Local de Venda’ você considera mais crítico. Por quê?
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# Monerário Vendido por Canal de Venda - período total
df_.groupby('Local de Venda').agg({'Monetário Vendido': ['mean', 'median', 'sum']})
sns.barplot(x='Local de Venda', y='Monetário Vendido', data=df_)
# * O local de venda mais crítico é Ellipsis, possui um total de vendas no período mais baixo comparando-o com os outros locais.
# ### 6. Criar modelo relacionando o comportamento de venda com variáveis não apresentadas nos dados (Ex: PIB, Dolar, etc.)
# [Retorna à Tabela de conteúdo](#Tabela-de-conteúdo)
# cria ano
df_['ano'] = df['Data Venda'].dt.year
# lista anos
df_['ano'].unique().tolist()
# lista meses
df_['mês'].unique().tolist()
# +
# https://economia.acspservicos.com.br/indicadores_iegv/iegv_dolar.html
## dict = {mês: dólar}
dolar_2016 = {1: 4.054, 2: 3.978, 3: 3.694, 4: 3.551, 11: 3.339, 12: 3.351}
dolar_2017 = {1: 3.197, 2: 3.103, 3: 3.127, 4: 3.140, 11: 3.257, 12: 3.297}
# cria coluna dólar
for d in df_['ano']:
if df_['ano'][d] == 2016:
df_['dólar'] = df_['mês'].map(dolar_2016)
else:
df_['dólar'] = df_['mês'].map(dolar_2017)
# +
# https://www.ibge.gov.br/estatisticas/economicas/precos-e-custos/9256-indice-nacional-de-precos-ao-consumidor-amplo.html?=&t=series-historicas
## dict = {mês: ipca(%)}
ipca_2016 = {1: 1.27, 2: 0.90, 3: 0.43, 4: 0.61, 11: 0.18, 12: 0.30}
ipca_2017 = {1: 0.38, 2: 0.33, 3: 0.25, 4: 0.14, 11: 0.28, 12: 0.44}
# cria coluna ipca
for i in df_['ano']:
if df_['ano'][i] == 2016:
df_['ipca'] = df_['mês'].map(ipca_2016)
else:
df_['ipca'] = df_['mês'].map(ipca_2017)
# -
df_.sample(3)
df_[['Monetário Vendido','mês', 'ano', 'dólar', 'ipca']].describe()
corr = df_[['Monetário Vendido','mês', 'ano', 'dólar', 'ipca']].corr()
corr
sns.clustermap(corr, cmap='Oranges', annot = True, figsize=(6, 6))
# +
#df_['PAX'].to_csv('pax.txt', sep='\t', index=False, header=False)
# -
| case_gol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Text classification with an RNN
#
# Referenced from https://www.tensorflow.org/tutorials/text/text_classification_rnn
#
# This text classification tutorial trains a recurrent neural network on the IMDB large movie review dataset for sentiment analysis.
# # Setup
#
# ``` bash
# # install dataset
# pip3 install -q tensorflow_datasets
# ```
# +
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# -
# # Dataset
# +
dataset, info = tfds.load('imdb_reviews', with_info=True, as_supervised=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
print(f"Description:\n\n{info.description}\n")
print(f"Features:\n\n{info.features}\n")
print(f"Train Element:\n\n{train_dataset.element_spec}\n")
print(f"{len(train_dataset)} train samples and {len(test_dataset)} samples\n")
# -
for example, label in train_dataset.take(1):
print('text: ', example.numpy())
print('label: ', label.numpy())
# # Prepare data for training
# +
BUFFER_SIZE = 10000
BATCH_SIZE = 64
# shuffle and batch data
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
# -
for example, label in train_dataset.take(1):
print(f"{len(example.numpy())} text and {len(label.numpy())} in a batch")
# +
VOCAB_SIZE = 1000
# encode text data
encoder = tf.keras.layers.experimental.preprocessing.TextVectorization(max_tokens=VOCAB_SIZE)
encoder.adapt(train_dataset.map(lambda text, label: text))
# -
# get vocabulary using encoder
vocab = np.array(encoder.get_vocabulary())
print(f"vocabulary: {vocab[:5]}")
# tokenize string with encoder
for text, label in train_dataset.take(1):
original = text.numpy()[0]
tokenized = encoder(original).numpy()
recovered = vocab[tokenized]
print("original\n", original)
print("\ntokenize\n", tokenized)
print("\nrecovered\n", recovered)
# # Model
# +
VOCAB_SIZE = 1000
encoder = tf.keras.layers.experimental.preprocessing.TextVectorization(max_tokens=VOCAB_SIZE)
encoder.adapt(train_dataset.map(lambda text, label: text))
model = tf.keras.Sequential([
encoder,
tf.keras.layers.Embedding(
input_dim=len(encoder.get_vocabulary()),
output_dim=64,
mask_zero=True
),
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64)
),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1),
])
# -
print([layer.supports_masking for layer in model.layers])
# +
# predict on a sample text without padding.
sample_text = ('The movie was cool. The animation and the graphics '
'were out of this world. I would recommend this movie.')
predictions = model.predict(np.array([sample_text]))
print(predictions[0])
# predict on a sample text with padding
padding = "the " * 2000
predictions = model.predict(np.array([sample_text, padding]))
print(predictions[0])
# -
# # Train Model
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy']
)
history = model.fit(
train_dataset,
epochs=10,
validation_data=test_dataset,
validation_steps=30
)
# # Evaluate Model
# +
test_loss, test_acc = model.evaluate(test_dataset)
print('Test Loss: {}'.format(test_loss))
print('Test Accuracy: {}'.format(test_acc))
# +
import matplotlib.pyplot as plt
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
# -
plt.figure(figsize=(16,6))
plt.subplot(1,2,1)
plot_graphs(history, 'accuracy')
plt.subplot(1,2,2)
plot_graphs(history, 'loss')
| modelmaker/resources/templates/text_classification/notebooks/explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="4iDcj6TIgbzI"
# # Creation of molecular embeddings for classifying compounds using Transformers
# + [markdown] id="Gx6C47FhgCHA"
# ## Obtaining Data
# + [markdown] id="8KALcmt7p0Ax"
# Get the latest version of the dataset from the repository
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="jFRrBSPmpjCQ" outputId="0fc00b44-854d-4d73-80ae-e00ebd645ca8"
import numpy as np
import pandas as pd
import requests
import io
url = 'https://github.com/GLambard/Molecules_Dataset_Collection/raw/master/originals/HIV.csv'
data = requests.get(url).content
df = pd.read_csv(io.StringIO(data.decode('utf-8')), index_col = 0)
df.reset_index(inplace=True)
df
# + [markdown] id="H8xfVjtAgOOu"
# ## Pre-Processing Data
# + [markdown] id="kTbjNjsZrfT9"
# Create a vocabulary of tokens based on the SMILES specifications
#
# http://opensmiles.org/opensmiles.html
# + id="VRuKPbvQhrAs"
elements = 'H,He,Li,Be,B,C,N,O,F,Ne,Na,Mg,Al,Si,P,S,Cl,Ar,K,Ca,Sc,Ti,V,Cr,Mn,Fe,Co,Ni,Cu,Zn,Ga,Ge,As,Se,Br,Kr,Rb,Sr,Y,Zr,Nb,Mo,Tc,Ru,Rh,Pd,Ag,Cd,In,Sn,Sb,Te,I,Xe,Cs,Ba,La,Ce,Pr,Nd,Pm,Sm,Eu,Gd,Tb,Dy,Ho,Er,Tm,Yb,Lu,Hf,Ta,W,Re,Os,Ir,Pt,Au,Hg,Tl,Pb,Bi,Po,At,Rn,Fr,Ra,Ac,Th,Pa,U,Np,Pu,Am,Cm,Bk,Cf,Es,Fm,Md,No,Lr,Rf,Db,Sg,Bh,Hs,Mt,Ds,Rg,Cn,Uut,Fl,Uup,Lv,Uus,Uuo'
aromatic_atoms = 'b,c,n,o,p,s,as,se,te'
symbols = '[,],(,),=,+,-,#,:,@,.,%'
isotopes = '0,1,2,3,4,5,6,7,8,9'
elements = str(elements).split(',')
aromatic_atoms = str(aromatic_atoms).split(',')
symbols = str(symbols).split(',')
isotopes = str(isotopes).split(',')
smiles_vocabulary = elements + aromatic_atoms + symbols + isotopes
# + [markdown] id="2iEuhSs0rno9"
# Method to process a SMILES by spliting it into an array of tokens that are part of the SMILES vocabulary
# + id="Z1TWKgxjNExR"
def process_smiles(smiles, vocabulary):
tokens = []
i = 0;
found = False;
while i < len(smiles):
if len(smiles[i:]) >= 3:
if smiles[i:i+3] in vocabulary:
tokens.append(smiles[i:i+3])
i += 3
found = True
if len(smiles[i:]) >= 2 and not found:
if smiles[i:i+2] in vocabulary:
tokens.append(smiles[i:i+2])
i += 2
found = True
if len(smiles[i:]) >= 1 and not found:
if smiles[i] in vocabulary:
tokens.append(smiles[i])
i += 1
found = True
if not found:
print('Error in value', smiles[i])
print(smiles)
break
found = False
return tokens
# + [markdown] id="6cktfXtsQXIy"
# Method to process an array of SMILES into a list of processed SMILES and respective lengths (number of tokens)
# + id="PYOkDDrCMFVG"
def process_smiles_array(smiles_array):
processed_list = list()
lengths = list()
for i in range(len(smiles_array)):
processed_smiles = process_smiles(smiles_array[i], smiles_vocabulary)
processed_list.append(' '.join(processed_smiles))
lengths.append(len(processed_smiles))
return processed_list, lengths
# + [markdown] id="KMlCOV7idlhO"
# Process all SMILES in the dataset
# + id="Zep3WMMROu9e"
processed_smiles, smiles_lengths = process_smiles_array(df['smiles'].values)
# + [markdown] id="f9zXjaETdpsZ"
# Insert processed SMILES and respective lengths into the dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="lw6OXkW8PQqh" outputId="abdb23d8-283d-4080-b2c1-180d9b3f99bb"
df['processed_smiles'] = processed_smiles
df['smiles_length'] = smiles_lengths
df
# + [markdown] id="aSZ4FYa9W8NR"
# Plot a histogram with the distribution of the lengths of the SMILES in the dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="pnzLW9gJRzLj" outputId="80a7d817-d722-493c-cf5b-b992775d799a"
import matplotlib.pyplot as plt
plt.hist(smiles_lengths, bins=100)
plt.ylabel('Number of SMILES')
plt.xlabel('Length of SMILES')
plt.show()
# + [markdown] id="kBcIlhUBj3zV"
# Check the percentage of the instances from the dataset where the length of the SMILES is too small/big
# + colab={"base_uri": "https://localhost:8080/"} id="AKBinb_7kpfI" outputId="c980c47d-e2e0-4822-fa84-35da03521a2b"
length_range = (15, 125)
filtered = filter(lambda x: length_range[0] <= x <= length_range[1], smiles_lengths)
percentage = len(list(filtered)) / len(processed_smiles)
print('Percentage of instances with SMILES\' length between %s and %s: %s' % (length_range[0], length_range[1], percentage))
sequence_length = length_range[1]
# + [markdown] id="rV4L9JilCK4e"
# Remove instances from the dataset where the length of the SMILES is too small/big
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="JTSV4eLXCM8V" outputId="db4134cf-2530-443e-dfd7-09d349446c6f"
df = df[(df['smiles_length'] >= length_range[0]) & (df['smiles_length'] <= length_range[1])]
df = df.drop('smiles_length', axis='columns')
df
# + [markdown] id="q2q8y9mlCJRm"
# ## Transformer model to extract embeddings and use them as input to another classifier
# + [markdown] id="f6M_xnKWpnOn"
# Install necessary packages
# + colab={"base_uri": "https://localhost:8080/"} id="pp8HBXwqCNQC" outputId="ffaa1da6-10a3-4b9c-f310-0e50249c0e2a"
# ! pip install transformers
# + [markdown] id="F_VBm96dxaQO"
# Check if an GPU is being used
# + colab={"base_uri": "https://localhost:8080/"} id="Gr7icVnyCN7I" outputId="ad793d50-bf9a-4b73-92b7-c63d50c678ed"
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# + [markdown] id="9wBZpsKmxf_x"
# Initialize model and tokenizer
#
# > The model selected is the BERT cased model since the tokens in the SMILES vocabulary are cased dependent, for example, the token for the chemical element Carbon 'C' must be differentiated from the token for the aromatic element 'c'.
# + colab={"base_uri": "https://localhost:8080/"} id="cYOMwU12CQOy" outputId="fbd1b377-d55b-47c8-8819-8090566dbac4"
from transformers import TFAutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
model = TFAutoModel.from_pretrained('bert-base-cased')
# + [markdown] id="KGNraIVaxr0X"
# Method to handle tokenization
# + id="2Jmw5CqXCS4P"
def tokenize(sequence):
return tokenizer.encode_plus(sequence, # sequence to tokenize
max_length=sequence_length, # maximum length for the sequence
truncation=True, # truncate any sequence longer than the maximum length
padding='max_length', # allow any sequence shorter than the maximum length to be padded
add_special_tokens=True, # allow special tokens (important for BERT)
return_attention_mask=True, # output attention_mask needed
return_token_type_ids=False, # output token_type_ids not needed
return_tensors='tf') # return TensorFlow tensors
return tokens
# + [markdown] id="GP9JQA3F79Nv"
# Demonstration of BERT's tokenization
# + colab={"base_uri": "https://localhost:8080/"} id="pA0emsLM54J6" outputId="ef309957-9cfa-4352-845f-cecb3c449ede"
processed_smiles = df['processed_smiles'].values
smiles = processed_smiles[0]
input_ids = tokenize(smiles)
ids = input_ids['input_ids'][0].numpy()
print('Sequence of tokens:\n')
print(smiles)
print('\n')
print('List of BERT\'s vocabulary indeces:\n')
print(ids)
print('\n')
print('Decoding of the indeces:\n')
print(tokenizer.decode(ids))
# + [markdown] id="12NTJLurTaAb"
# Method to generate an embedding for a sequence as a single vector
#
# > The model BERT has 12 hidden states, each being the output of its 12 layers (transformer blocks).
#
# > Each token is represented by a vector with 768 features. This means that each sequence of tokens is represented by an array with the dimensions (length of the sequence, 768).
#
# > The strategy to generate an embedding from a sequence of tokens is to average, for each token, the values its vector. This means that each sequence of tokens is represented by a one-dimensional vector with 768 features.
# + id="FN02lZ-TSTl1"
def get_embedding(sequence):
input_ids = tokenize(sequence) # sequence tokenization
outputs = model(input_ids) # run the sequence through BERT
last_hidden_state = outputs[0] # get the last hidden state
vectors = last_hidden_state[0] # get the token vectors from the last hidden state
embedding = tf.reduce_mean(vectors, axis=0) # calculate the average for all token vectors
return embedding.numpy()
# + [markdown] id="F9k_k2et5FRl"
# Generate embeddings for all the SMILES in the dataset
# + colab={"base_uri": "https://localhost:8080/"} id="qFU_qccTTpP_" outputId="a5dc84fa-7d63-41aa-fce6-09974b3c0336"
from tqdm import tqdm
embeddings = []
#for smiles in tqdm(processed_smiles):
for smiles in tqdm(processed_smiles[0:9]):
embedding = get_embedding(smiles)
embeddings.append(embedding)
# + [markdown] id="jEHffPvzpUp-"
# ## References
# + [markdown] id="ShBNX0YMpYLv"
# https://towardsdatascience.com/tensorflow-and-transformers-df6fceaf57cc
#
# https://www.kaggle.com/sameerpixelbot/bert-embeddings-with-tensorflow-2-0-example
#
# https://betterprogramming.pub/build-a-natural-language-classifier-with-bert-and-tensorflow-4770d4442d41
#
# https://medium.com/@dhartidhami/understanding-bert-word-embeddings-7dc4d2ea54ca
#
# https://www.sbert.net/examples/applications/computing-embeddings/README.html
#
# https://www.analyticsvidhya.com/blog/2020/07/transfer-learning-for-nlp-fine-tuning-bert-for-text-classification/
#
| smiles_transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ** Notebook - Run SQL Queries for EDA, Data Wrangling, Data Visualization and Modeling ** <br>
# Dataset: Wyoming Oil and Gas Conservation Commission
#
# Importing the basic modules <br>
# Data Analysis Library: Pandas, https://pandas.pydata.org/
# Visualization Library: Matplotlib, https://matplotlib.org/
# +
import sqlite3
import pandas as pd # data processing and csv file IO library
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns # python graphing library
plt.style.use('seaborn')
sns.set(style="white", color_codes=True)
# plt.rcdefaults() # resest to default matplotlib parameters
import warnings #ignore unwanted messages
warnings.filterwarnings("ignore")
# -
# In order to access the database components, we need to open the connection to the .sqlite file and use the cursor
conn = sqlite3.connect(r'C:\Users\AyushRastogi\Downloads\PowderDb.sqlite')
cur = conn.cursor()
# Now we need to bring the entire table into Pandas dataframe, so we can run SQL queries and obtain the desired part of data
# - https://sqlite.org/docs.html
# - https://sqlite.org/optoverview.html
# SQL Query - Entire data from database converted to a dataframe
data = pd.read_sql_query(''' SELECT * FROM Production;''', conn)
print (data.head(10)) #default head() function prints 5 results
print (data.shape) # in the form of rows x columns
index_data = data.index
print(index_data)
col = data.columns # good idea to print this if multiple columns are present
print(col)
# looking into the number of companies we have in the dataset - descending order
print (data['Formation'].value_counts())
data['Sum'] = data[['Jan_oil', 'Feb_oil', 'Mar_oil', 'Apr_oil', 'May_oil', 'Jun_oil', 'Jul_oil', 'Aug_oil', 'Sep_oil',
'Oct_oil', 'Nov_oil', 'Dec_oil']].sum(axis=1)
data = data.sort_values(by=['Sum'], ascending = False)
data.head() # will show top 5 results
data_top25 = data.iloc[0:25] #Selecting the top 25 rows of the dataframe
data_top25
# +
# Install using this command -> conda install -c conda-forge basemap
# https://matplotlib.org/basemap/
#Identify the location of top 25 wells
from mpl_toolkits.basemap import Basemap
lon = data_top25.Lon.values # remove the index column
lat = data_top25.Lat.values # remove the index column
plt.figure(figsize=(10,15))
m = Basemap(projection='merc',llcrnrlat=30,urcrnrlat=50,\
llcrnrlon=-120,urcrnrlon=-100,lat_ts=20,resolution='l')
m.shadedrelief()
#m.etopo()
#m.bluemarble()
m.drawcounties(linewidth=0.1, linestyle='solid', color='k', antialiased=1, facecolor='none', ax=None, zorder=None, drawbounds=False)
m.drawstates(linewidth=0.5, linestyle='solid', color='k', antialiased=1, ax=None, zorder=None)
#m.readshapefile(shapefile=r'C:\Users\ayush\Desktop\WOGCC\WY Shape Files\tl_2013_56_cousub',name = m)
plt.title("Rockies State Map",fontname="Times New Roman",fontweight="bold", fontsize = 20)
x,y = m(lon, lat)
m.plot(x,y, 'ro', markersize=5, alpha=1, zorder=0.8)
plt.show()
# +
# Plot 1: Identify the company with the highest production
x = np.arange(len(data_top25['Company']))
fig, ax = plt.subplots()
axes = plt.gca()
fig.set_size_inches(10, 10)
plt.xticks(x, rotation=90)
plt.bar(data_top25.Company,data_top25.Sum, align='center', alpha=0.5, color = 'g')
ax.set_title('Plot 1:Companies with Highest Production', fontsize=14, fontweight='bold')
ax.set_xlabel('Company', fontsize = 12, fontweight='bold')
ax.set_ylabel('Production', fontsize = 12, fontweight='bold')
# +
#Query to sum the monthly production
data2 = pd.read_sql_query('''
SELECT Yr, Sum(Jan_oil), Sum(Feb_oil), Sum(Mar_oil), Sum(Apr_oil), Sum(May_oil), Sum(Jun_oil),
Sum(Jul_oil), Sum(Aug_oil), Sum(Sep_oil), Sum(Oct_oil), Sum(Nov_oil), Sum(Dec_oil)
FROM Production
GROUP BY Yr
;''', conn)
data2
# +
#data2 = data2[data2.Yr != 2018] # Get rid of 2018 row since its incomplete
# -
data2.set_index('Yr').plot(figsize=(10,10), grid=True)
# +
# Visualization to identify top producing fields, here we will look at top 100 wells
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
fig.subplots_adjust(top=0.85)
ax.set_title('Plot 3: Activity (Histogram)', fontsize=14, fontweight='bold')
ax.set_xlabel('Field Name', fontsize = 12, fontweight='bold')
ax.set_ylabel('Frequency', fontsize = 12, fontweight='bold')
plt.grid(True)
plt.xticks(rotation=90)
ax.hist(data[:100]['Formation'], bins = 25)
plt.show()
# -
# **Start of the session 2**
# +
data_sess2= pd.read_sql_query('''SELECT Apino, Yr, Jan_oil, Feb_oil, Mar_oil, Apr_oil, May_oil, Jun_oil, Jul_oil, Aug_oil, Sep_oil, Oct_oil, Nov_oil, Dec_oil, Jan_days, Feb_days, Mar_days, Apr_days, May_days, Jun_days, Jul_days, Aug_days, Sep_days, Oct_days, Nov_days, Dec_days
FROM Production
ORDER BY Apino;''', conn)
data_sess2
# -
data_test = pd.read_sql_query('''SELECT Apino, Yr, Jan_oil, Dec_oil,Jan_days, Dec_days
FROM Production
ORDER BY Apino;''', conn)
data_test
p = data_test.pivot_table(index='Apino', columns = 'Yr')
p
# Running Query for Data Visualization
# SQL Query - Categorical data with highest Jan production
data_viz = pd.read_sql_query(''' SELECT Wellname, County, Company, Field_name, Yr, Jan_oil, Formation, Wellclass
FROM Production
ORDER BY Jan_oil DESC
LIMIT 25
;''', conn)
print (data_viz.head(10)) #default head() function prints 5 results
print (data_viz.shape) # in the form of rows x columns
# Trying different types of ways we can plot - Scatter
data_viz.plot(kind="scatter", x="Yr", y="Jan_oil") # Style 1
sns.jointplot(x="Yr", y="Jan_oil", data=data, size=5) # Style 2 - using seaborn
# We'll use seaborn's FacetGrid to color the scatterplot by County
sns.FacetGrid(data, hue="County", size=5) \
.map(plt.scatter, "Yr", "Jan_oil") \
.add_legend()
'''Boxplot: Adding a layer of individual points on top of it through
Seaborn's striplot. We'll use jitter=True so that all the points don't fall in single vertical lines
above the species. Saving the resulting axes as ax each time causes the resulting plot to be shown
on top of the previous axes'''
ax = sns.boxplot(x="Yr", y="Jan_oil", data=data_viz)
ax = sns.stripplot(x="Yr", y="Jan_oil", data=data_viz, jitter=True, edgecolor='gray')
'''A violin plot combines the benefits of the previous two plots and simplifies them.
Denser regions of the data are fatter, and sparser thiner in a violin plot'''
sns.violinplot(x="Yr", y="Jan_oil", data=data_viz, size=6)
# +
'''A final seaborn plot useful for looking at univariate relations is the kdeplot,
Creates and visualizes a kernel density estimate of the underlying feature'''
sns.FacetGrid(data_viz, hue="County", size=6) \
.map(sns.kdeplot, "Yr") \
.add_legend()
# -
# pairplot, which shows the bivariate relation
# between each pair of features
sns.pairplot(data_viz, hue="Yr", size=3)
# The diagonal elements in a pairplot show the histogram by default
# We can update these elements to show other things, such as a kde
sns.pairplot(data_viz, hue="Yr", size=3, diag_kind="kde")
# Now that we've covered seaborn, let's go back to some of the ones we can make with Pandas
# We can quickly make a boxplot with Pandas on each feature split out by species
data_viz.drop("County", axis=1).boxplot(by="Yr", figsize=(12, 6))
# +
# One cool more sophisticated technique pandas has available is called Andrews Curves
# Andrews Curves involve using attributes of samples as coefficients for Fourier series and then plotting these
# Parallel coordinates plots each feature on a separate column & then draws lines connecting the features for each data sample
# Radviz puts each feature as a point on a 2D plane, and then simulates having each sample attached to those points through a spring weighted by the relative value for that feature
from pandas.tools.plotting import andrews_curves
from pandas.tools.plotting import parallel_coordinates
from pandas.tools.plotting import radviz
#andrews_curves(data_viz, "Jan_oil")
#parallel_coordinates(data_viz.drop("County", axis=1), "Jan_oil")
#radviz(data_viz.drop("County", axis=1), "Jan_oil")
| QueryPB.Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bonus
# Following is the pytorch implementation of the of the given network architecture.
#
# #### Note
# With the given architecture, the model could not be converged.
import torch
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv("titanic.csv")
df.head()
# Check for null values
df.isnull().sum()
# +
X = df.drop(["Price", "PassengerId"], axis=1) # independent variable
y = df["Price"]
X.shape, y.shape
# -
X = df.drop(["Price", "PassengerId"], axis=1).values # independent variable
y = df["Price"].values
# +
## Data split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=12)
## Creating tensors
X_train = torch.FloatTensor(X_train)
y_train = torch.FloatTensor(y_train)
X_test = torch.FloatTensor(X_test)
y_test = torch.FloatTensor(y_test)
# +
### Creating ANN Model class with PyTorch
class ANN_Model(nn.Module):
## Define all the network architecture and variables
def __init__(self, input_features=3, hidden1=4, out_feature=1):
super().__init__()
## First fully connected layer
self.f_connected1 = nn.Linear(input_features, hidden1)
## Output layer
self.out = nn.Linear(hidden1, out_feature)
def forward(self, x):
## aplying activation function
x = F.relu(self.f_connected1(x))
x = self.out(x)
return x
# -
### Instantiate my ANN module
torch.manual_seed(42)
model = ANN_Model()
## Print the network architecture
model.parameters
# +
## Backward propgation
## -- Define the loss function and define the optimizer
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)
# +
epochs = 500
final_losses = []
for i in range(epochs):
i += 1
y_pred = model.forward(X_train)
loss = loss_function(y_pred, y_train)
final_losses.append(loss)
if i%100 == 0:
print("Epoch no. {} and the loss is: {} ".format(i, loss.item()))
## Important step: we need to reduce the loss. So after epoch this line
# erases the gradient of all the optimized class
optimizer.zero_grad()
loss.backward() ## to backpropogate and findout the derivative
optimizer.step()
# -
## Plot the loss function
plt.plot(range(epochs), final_losses)
plt.ylabel("Loss")
plt.xlabel("Epoch")
## Pickout the model
predictions = []
with torch.no_grad():
for i, data in enumerate(X_test):
y_pred = model(data)
predictions.append(y_pred.item())
d = {'Predicted Price':predictions,'Actual Price':y_test.tolist()}
df = pd.DataFrame(d, columns=['Predicted Price','Actual Price'])
df
| neural-networks/assignment6/ass6_Bonus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] nbpresent={"id": "34f921f8-5eb3-4ef2-8586-508ee5b7ea58"}
# # Talktorial 9
#
# # Ligand-based pharmacophores
#
# #### Developed in the CADD seminars 2017 and 2018, AG Volkamer, Charité/FU Berlin
#
# <NAME>, <NAME> and <NAME>
# -
# **Note**: Please run this notebook cell by cell. Running all cells in one is possible also, however, a few PyMol images might not turn out as intended.
# + [markdown] nbpresent={"id": "7f9bc5ae-85af-41dd-ab26-d16fb282caf4"}
# ## Aim of this talktorial
#
# In this talktorial, we use known EGFR ligands, which were selected and aligned in the previous talktorial, to identify donor, acceptor, and hydrophobic pharmacophoric features for each ligand. Those features are then clustered to define an ensemble pharmacophore, which represents the properties of the set of known EGFR ligands and can be used to search for novel EGFR ligands via virtual screening.
#
# ## Learning goals
#
# ### Theory
#
# * Pharmacophore modeling
# * Structure- and ligand-based pharmacophore modeling
# * Virtual screening with pharmacophores
# * Clustering: k means
#
# ### Practical
#
# * Get pre-aligned ligands from previous talktorial
# * Start PyMoL
# * Show ligands with PyMol
# * Extract pharmacophore features
# * Show the pharmacophore features of all ligands
# * Hydrogen bond donors
# * Hydrogen bond acceptors
# * Hydrophobic contacts
# * Collect coordinates of features per feature type
# * Generate ensemble pharmacophores
# * Set static parameters for k-means clustering
# * Set static parameters for cluster selection
# * Define k-means clustering and cluster selection functions
# * Cluster features
# * Select relevant clusters
# * Get selected cluster coordinates
# * Show clusters
# * Hydrogen bond donors
# * Hydrogen bond acceptors
# * Hydrophobic contacts
# * Show ensemble pharmacophore
#
#
# ## References
#
# * IUPAC pharmacophore definition
# ([<i>Pure & Appl. Chem</i> (1998), <b>70</b>, 1129-43](https://iupac.org/publications/pac/70/5/1129/))
# * 3D pharmacophores in LigandScout
# ([<i>J. Chem. Inf. Model.</i> (2005), <b>45</b>, 160-9](http://pubs.acs.org/doi/pdf/10.1021/ci049885e))
# * Book chapter: Pharmacophore Perception and Applications
# ([Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 259-82](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6f))
# * Book chapter: Structure-Based Virtual Screening ([Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 313-31](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6h)).
# * <NAME> and the origin of the pharmacophore concept
# ([<i>Internet Electron. J. Mol. Des.</i> (2007), <b>6</b>, 271-9](http://biochempress.com/Files/iejmd_2007_6_0271.pdf))
# * PyMol integration with RDKit ([rdkit.Chem.PyMol documentation](http://rdkit.org/docs/source/rdkit.Chem.PyMol.html))
# * <NAME>'s demonstration of pharmacophore modeling with RDKit
# ([RDKit UGM 2016 on GitHub](https://github.com/rdkit/UGM_2016/blob/master/Notebooks/Stiefl_RDKitPh4FullPublication.ipynb))
# -
# ## Theory
#
# ### Pharmacophores
#
# In computer-aided drug design, the description of drug-target interactions with pharmacophores is a well-established method. The term pharmacophore was defined in 1998 by a IUPAC working party:
# "A pharmacophore is the ensemble of steric and electronic features that is necessary to ensure the optimal supramolecular interactions with a specific biological target structure and to trigger (or to block) its biological response."
# ([<i>Pure & Appl. Chem.</i> (1998), <b>70</b>, 1129-43](https://iupac.org/publications/pac/70/5/1129/))
#
# In other words, a *pharmacophore* consists of several *pharmacophoric features*, which describe important steric and physico-chemical properties of a ligand observed to bind a target under investigation.
# Such *physico-chemical properties* (also called feature types) can be hydrogen bond donors/acceptors, hydrophobic/aromatic interactions, or positively/negatively charged groups, and the *steric properties* are defined by the 3D arrangement of these features.
#
# #### Structure- and ligand-based pharmacophore modeling
#
# In pharmacophore modeling, two main approaches are used, depending on the biological question and available data sources, i.e. structure- and ligand-based pharmacophore modeling.
#
# *Structure-based pharmacophore models* are derived from protein-ligand complexes. Features are defined by observed interactions between the protein and ligand, ensuring that only those ligand moieties are used for virtual screening that have already been shown to be involved in ligand binding.
# However, structures of protein-ligand complexes are not available for all targets.
# In this case, either complex structures can be generated by modeling the ligand into the target binding site, e.g. via molecular docking, or pharmacophore modeling methods can be invoked that only use the target binding site to detect potential protein-ligand interaction sites.
#
# *Ligand-based pharmacophore models* are based on a set of ligands known to bind the target under investigation. The common chemical features of these ligands build the pharmacophore model. This method is used for targets with multiple known ligands and in case of missing protein-ligand complex structures. In this talktorial, we will use ligand-based pharmacophore modeling using a set of known EGFR ligands.
#
# For more information on pharmacophore modeling, we recommend ([Pharmacophore Perception and Applications: Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 259-82](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6f)) and ([<i>J. Chem. Inf. Model.</i> (2005), <b>45</b>, 160-9](http://pubs.acs.org/doi/pdf/10.1021/ci049885e)).
#
# <img src="images/target+ligand+pharma_3x_feather.png" align="above" alt="Image cannot be shown" width="400">
# <div align="center"> Figure 1: Structure-based pharmacophore representing protein-ligand interactions (figure by <NAME>).</div>
#
# ### Virtual screening with pharmacophores
#
# As described earlier in **talktorial 4**, virtual screening (VS) describes the screening of a query (e.g. here in **talktorial 9** a pharmacophore model or in **talktorial 4** a query compound) against a large library of compounds, in order to identify those small molecules (in the library) that are most likely to bind a target under investigation (represented by the query). In pharmacophore-based virtual screening, the compound library is matched compound-by-compound into a pharmacophore model and ranked by the best matching results ([Structure-Based Virtual Screening: Applied Chemoinformatics, Wiley-VCH Verlag GmbH & Co. KGaA, Weinheim, (2018), **1**, 313-31](https://onlinelibrary.wiley.com/doi/10.1002/9783527806539.ch6h)).
#
# ### Clustering: k means
#
# In this talktorial, we will generate an ensemble pharmacophore by clustering the feature points of several ligand-based pharmacophores. The clustering algorithm used is the k means clustering, which aims to cluster a data set into k clusters:
#
# 1. k different centroids are selected and each point of the data set is assigned to its closest centroids.
# 2. New centroids are calculated based on the current clusters and each point of the data set is newly assigned to its closest centroids.
# 3. This procedure is repeated until the centroids are stable.
#
# ([K means wikipedia](https://de.wikipedia.org/wiki/K-Means-Algorithmus))
# + [markdown] nbpresent={"id": "ff3cfaf6-567f-4d95-8ee6-f489c6b77d96"}
# ## Practical
# + nbpresent={"id": "cd835152-951a-4233-9752-c613475177ab"}
import os, glob
# RDKit
from rdkit import RDConfig, Chem, Geometry, DistanceGeometry
from rdkit.Chem import ChemicalFeatures, rdDistGeom, Draw, rdMolTransforms, AllChem
from rdkit.Chem.Draw import IPythonConsole, DrawingOptions
from rdkit.Chem.Pharm3D import Pharmacophore, EmbedLib
from rdkit.Numerics import rdAlignment
IPythonConsole.ipython_useSVG=True
# PyMOL related
from rdkit.Chem import PyMol
from pymol import *
import time # Needed for waiting a second
from PIL import Image # For export the image to disk
import collections
import pandas as pd
import math
from sklearn import datasets, cluster
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from collections import Counter # For handling the labels
import operator
# -
# ### Get pre-aligned ligands from previous talktorial
#
# We retrieve all ligands that were aligned in the previous talktorial.
#
# First, we get the file paths to all ligand PDB files.
# + nbpresent={"id": "12cac0a1-ddd6-4a2c-b21a-f69d18e98919"}
mol_files = []
for file in glob.glob("../data/T8/*_lig.pdb"):
mol_files.append(file)
mol_files
# -
pdb_ids = [i.split("/")[-1].split("_")[0] for i in mol_files]
pdb_ids
# Second, we read all ligands from these PDB files using RDKit.
# + nbpresent={"id": "42814bc4-7ab7-4a4c-971b-21f4c7df9656"}
mols = []
for mol_file in mol_files:
mol = Chem.MolFromPDBFile(mol_file, removeHs=False)
if mol is None:
print(mol_file, 'could not be read')
else:
Chem.SanitizeMol(mol)
print(Chem.MolToSmiles(mol))
mols.append(mol)
rangeMols = range(1, len(mols)+1)
print('Number of molecules: ', len(mols))
# + nbpresent={"id": "f4747057-291c-4ec5-b3b3-6db07d56d9b5"}
Draw.MolsToGridImage(mols, molsPerRow=4, legends=["From PDB ID: "+i for i in pdb_ids])
# -
# We encounter a problem here: When loading ligands from a PDB file, RDKit does not assign e.g. aromatic rings to the ligand. We use the RDKit function `AssignBondOrdersFromTemplate`, which assigns bonds to a molecule based on a reference molecule, e.g. in our case based on the SMILES pattern of the molecule.
#
# Check for further information: ([RDKit discussion on "Aromaticity of non-protein molecules in PDB not detected"](https://github.com/rdkit/rdkit/issues/1031)) and ([RDKit documentation on `AssignBondOrdersFromTemplate`](http://rdkit.org/docs/source/rdkit.Chem.AllChem.html#AssignBondOrdersFromTemplate)).
# +
# Load SMILES for PDB ligand structures
ligs = pd.read_csv("../data/T8/PDB_top_ligands.csv", sep="\t")
# Get SMILES in the same order as in pdb_ids
ligs_smiles = [ligs[ligs["@structureId"]==pdb_id]["smiles"].values[0] for pdb_id in pdb_ids]
# Generate RDKit Mol object from SMILES
refmols = [Chem.MolFromSmiles(smiles) for smiles in ligs_smiles]
# Assign bond orders to molecules (mols) based on SMILES patterns (refmols)
mols = [AllChem.AssignBondOrdersFromTemplate(refmol, mol) for refmol, mol in zip(refmols, mols)]
# -
Draw.MolsToGridImage(mols, molsPerRow=4, legends=["From PDB ID: "+i for i in pdb_ids])
# We can also have a look at the molecules in 2D (we copy the molecules for this example to keep the original coordinates).
mols_2D = []
for mol in mols:
tmp=Chem.Mol(mol)
AllChem.Compute2DCoords(tmp)
mols_2D.append(tmp)
Draw.MolsToGridImage(mols_2D, molsPerRow=4, legends=["From PDB ID: "+i for i in pdb_ids])
# + [markdown] nbpresent={"id": "d5eff4a8-0f9d-4493-8429-496ebee43102"}
# ### Start PyMol
#
# We start PyMol within the terminal.
# + nbpresent={"id": "745bab97-c916-4324-ad98-6eed0c309fff"}
# Open PyMol in shell
os.popen('pymol -R')
# -
# Note: If no separate PyMol window opens at this stage, something may be wrong with your PyMol installation and needs to be fixed first! You can also try to manually start PyMol by typing `pymol -R` in your shell.
# We need to wait until PyMol is launched completely. Then we can link PyMol to the Jupyter notebook via RDKit: `objPMV = PyMol.MolViewer()`. Check general functionalities of PyMol integration with RDKit on ([rdkit.Chem.PyMol documentation](http://rdkit.org/docs/source/rdkit.Chem.PyMol.html)).
# +
# Error handling: wait until PyMol is loaded
nrTry = 0 # Number of current attempt
ttw = 10 # Time to wait in seconds
while nrTry < ttw: # Try until PyMol is loaded and the object can be saved
nrTry += 1
try:
objPMV = PyMol.MolViewer() # Save the PyMol object
break # Stop the loop when PyMol is loaded
except ConnectionRefusedError: # Exception handling if PyMol is not loaded yet
time.sleep(1) # Wait...
if nrTry == ttw: # After ttw trys: print error message
print("Error: PyMol did not start correctly.\n" +
"Try again and/or check if PyMol is installed completely.")
# -
# This is the RDKit PyMol object, which we will be using in the following in order to control PyMol from within the Jupyter notebook:
objPMV
# The two most important commands of the RDKit PyMol integration are:
# * `objPMV.ShowMol(object, name, showOnly)` to load the object in PyMol
# * `objPMV.GetPNG(h=height)` to show the figure in the jupyter notebook
# + [markdown] nbpresent={"id": "a6f67949-7456-404f-8aae-d00535774585"}
# ### Show ligands with PyMol
#
# We show all ligands (pre-aligned in previous talktorial) with PyMol.
# We load each molecule individually into PyMol and set a unique PyMol name (m1, m2, ...).
# + nbpresent={"id": "443dc920-abf9-4468-9f69-b0cfdb5a8dd7"}
for mol, i in zip(mols, rangeMols):
objPMV.ShowMol(mol, name='m%d'%i, showOnly=False)
i += 1
# -
# All ligands should be visible now in the PyMol window. We display a picture of the PyMol window in this talktorial via retrieving a *png* picture.
objPMV.GetPNG(h=300)
# Next, we define a function from the steps described above.
# We add some styling PyMol commands. You can pass PyMol commands from RDKit to PyMol using:
#
# `objPMV.server.do("any_pymol_command")`
def visualize_ligands(objPMV, molecules):
'''
This function shows all input molecules in PyMol (within the Jupyter notebook).
'''
# Initialize PyMol in order to remove all previous objects
objPMV.server.do("reinitialize")
# Load ligands
rangeMols = range(1, len(molecules)+1)
for mol, i in zip(molecules, rangeMols):
objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False)
toStickCmd='cmd.show("sticks","mol_'+str(i)+'")'
objPMV.server.do(toStickCmd)
i += 1
# Turn camera
objPMV.server.do("turn x, -40")
# Set background to white
objPMV.server.do("bg_color white")
# Zoom in on ligands
objPMV.server.do("zoom")
# Turn on ray tracing for better image quality
objPMV.server.do("ray 1800, 1000")
# Export as PNG file
outputPNG = objPMV.GetPNG(w=1800, h=1000)
outputPNG.save("../data/T9/ligands.png", )
# Display in Jupyter notebook
return objPMV.GetPNG(h=300)
visualize_ligands(objPMV, mols)
# + [markdown] nbpresent={"id": "664c7a54-f015-4cf2-be85-53a3c4e2ba26"}
# ### Extract pharmacophore features
#
# As described above, the aim of this talktorial is to generate a ligand-based ensemble pharmacophore from a set of ligands.
# First, we need to extract pharmacophore features per ligand.
# Therefore, we load a feature factory (with the default feature definitions).
#
# See also [rdkit docu on chemical features and pharmacophores](https://rdkit.readthedocs.io/en/latest/GettingStartedInPython.html#chemical-features-and-pharmacophores).
# + nbpresent={"id": "429a52f1-4203-4a49-a84c-951881850a49"}
ffact = AllChem.BuildFeatureFactory(os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef'))
# -
# We take a look at the pharmacophore features that are implemented in RDKit:
list(ffact.GetFeatureDefs().keys())
# As an example, we get all feature for an example molecule.
# + nbpresent={"id": "90070fb5-064b-4bf6-ba19-11cd9f8be392"}
m1 = mols[0]
feats = ffact.GetFeaturesForMol(m1)
print('Number of features found:',len(feats))
# -
# The type (in RDKit called family) of a feature can be retrieved with `GetFamily()`.
feats[0].GetFamily()
# We get the frequency of features types for our example molecule.
feats_freq = collections.Counter([x.GetFamily() for x in feats])
feats_freq
# We apply the functions shown above to all molecules in our ligand set. We display the frequency of feature types per molecule as DataFrame.
# +
# Get feature type frequency per molecule
mols_feats_freq = []
for i in mols:
feats = [x.GetFamily() for x in ffact.GetFeaturesForMol(i)]
feats_freq = collections.Counter(feats)
mols_feats_freq.append(feats_freq)
# Show data as DataFrame
p = pd.DataFrame(mols_feats_freq, index=["m"+str(i) for i in range(1, len(mols)+1)]).fillna(0).astype(int)
p.transpose()
# -
# Furtheron, we concentrate in this talktorial only on the following feature types: hydrogen bond acceptors (acceptors), hydrogen bond donors (donors), and hydrophobic contacts (hydrophobics).
#
# We retrieve the feature RDKit objects per feature type and per molecule.
# + nbpresent={"id": "f9137113-0afc-4729-adec-34d5f3b68c4a"}
acceptors = []
donors = []
hydrophobics = []
for i in mols:
acceptors.append(ffact.GetFeaturesForMol(i, includeOnly='Acceptor'))
donors.append(ffact.GetFeaturesForMol(i, includeOnly='Donor'))
hydrophobics.append(ffact.GetFeaturesForMol(i, includeOnly='Hydrophobe'))
features = {"donors": donors,
"acceptors": acceptors,
"hydrophobics": hydrophobics}
# -
# ### Show the pharmacophore features of all ligands
#
# Pharmacophore feature types usually are displayed in defined colors, e.g. usually hydrogen bond donors, hydrogen bond acceptors, and hydrophobic contacts are colored green, red, and yellow, respectively.
feature_colors = {"donors": (0,0.9,0), # Green
"acceptors": (0.9,0,0), # Red
"hydrophobics": (1,0.9,0)} # Yellow
# RDKit's PyMol integration allows us to draw spheres (representing pharmacophore features) in PyMoL using the following command:
#
# `objPMV.server.sphere(loc, sphereRad, colors[i], label, 1)`
def visualize_features(objPMV, molecules, feature_type, features, feature_color):
'''
This function displays all input molecules and all input features as spheres in PyMOL.
A png picture from the PyMOL window is loaded into the Jupyter Notebook and saved as file to disc.
At the end, the PyMOL session is cleaned from all objects.
'''
# Initialize PyMol in order to remove all previous objects
objPMV.server.do("reinitialize")
print("Number of " + feature_type + " in all ligands: " + str(sum([len(i) for i in features])))
# Load ligands
rangeMols = range(1, len(molecules)+1)
for mol, i in zip(molecules, rangeMols):
objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False)
toStickCmd='cmd.show("sticks","mol_'+str(i)+'")'
objPMV.server.do(toStickCmd)
i += 1
# Load features
for i in range(len(features)):
for feature in features[i]:
loc = list(feature.GetPos())
sphere_radius = 0.5
label = feature_type + '_%d'%(i+1)
objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1) # show the sphere (pharmacophore feature)
# Turn camera
objPMV.server.do("turn x, -40")
# Set background to white
objPMV.server.do("bg_color white")
# Zoom in on ligands
objPMV.server.do("zoom")
# Turn on ray tracing for better image quality
objPMV.server.do("ray 1800, 1000")
# Export as PNG file
outputPNG = objPMV.GetPNG(w=1800, h=1000)
outputPNG.save("../data/T9/ligands_features_"+feature_type+".png", )
# Display in Jupyter notebook
return objPMV.GetPNG(h=300)
# We use this function to visualize the features for the feature types under consideration.
# #### Hydrogen bond donors
feature_type = "donors"
visualize_features(objPMV, mols, feature_type, features[feature_type], feature_colors[feature_type])
# #### Hydrogen bond acceptors
feature_type = "acceptors"
visualize_features(objPMV, mols, feature_type, features[feature_type], feature_colors[feature_type])
# #### Hydrophobic contacts
feature_type = "hydrophobics"
visualize_features(objPMV, mols, feature_type, features[feature_type], feature_colors[feature_type])
# ### Collect coordinates of features per feature type
#
# Since we want to cluster features (per feature type), we now collect all coordinates of features (per feature type).
features_coord = {"donors": [list(item.GetPos()) for sublist in features["donors"] for item in sublist],
"acceptors": [list(item.GetPos()) for sublist in features["acceptors"] for item in sublist],
"hydrophobics": [list(item.GetPos()) for sublist in features["hydrophobics"] for item in sublist]}
# Now, we have the positions of e.g. all acceptor features:
features_coord["acceptors"]
# + [markdown] nbpresent={"id": "158eefe4-423d-4245-bf13-a2c043badad9"}
# ### Generate ensemble pharmacophores
#
# In order to generate ensemble pharmacophores, we use k-means clustering to cluster features per feature type.
# -
# #### Set static parameters for k-means clustering
#
# `kq`: With this paramter, we determine the number of clusters `k` per feature type depending on the number of feature points, i.e. per feature type:
#
# `k` = number_of_features / `kq`
# k quotient (kq) used to determine k in k-means: k = number of feature points / kq
# kq should be selected so that k (feature clusters) is for all clusters at least 1 and not larger than 4-5 clusters
kq = 7
# #### Set static parameters for cluster selection
#
# `min_cluster_size`: We only want to retain clusters that potentially contain features from most molecules in our ligand ensemble. Therefore, we set this variable to 75% of the number of molecules in our ligand ensemble.
#
# `top_cluster_number`: With this parameter, we select only the largest cluster.
# +
# Threshold for clustering: number = percentage of threshold value
min_cluster_size = int(len(mols) * 0.75)
# Show only top features
top_cluster_number = 4
# + [markdown] nbpresent={"id": "d6398f57-7ee2-4e9c-bc19-271c975c5882"}
# #### Define k-means clustering and cluster selection functions
#
# We define a function that calculates the centers of clusters, which are derived from k-means clustering.
# -
def clustering(feature_coord, kd):
'''
This functions computes the k-means clustering of input feature coordinates.
'''
# Define parameter k as feature number divided by "k quotient"
k = math.ceil(len(feature_coord) / kq)
k = 2 if k == 1 else k # Tailor-made adaption of k for hydrophobics in for the example in this talktorial
print('Clustering: \nVariable k in k-means: %d of %d points\n'%(k, len(feature_coord)))
# Initialize of k-means
k_means = cluster.KMeans(n_clusters=k)
# Compute the k-means clustering
k_means.fit(feature_coord)
# Return the clusters
return k_means
# We define a function that sorts the clusters by size and outputs a list of indices of the largest clusters.
# + nbpresent={"id": "2d15b7c3-846f-4c64-b454-bde719d5aaff"}
def get_clusters(k_means, min_cluster_size, top_cluster_number):
'''
This function retrieves information on a input k-means clustering:
* gets cluster label for each feature
* counts cluster sizes and sort cluster indices by cluster size
* selects clusters based on size
* returns selected cluster indices
'''
# Sort clusters by size and only show largest
feature_labels = k_means.labels_
print('Cluster labels for all features: \n%s\n'% feature_labels)
feature_labels_count = Counter(feature_labels)
print('Cluster label counter: \n%s\n'% feature_labels_count)
feature_labels_count = sorted(feature_labels_count.items(),
key=operator.itemgetter(1),
reverse=True)
print('Sorted cluster label counters: \n%s\n'% feature_labels_count)
# Get number of the largest clusters, which are larger then the threshold (selected clusters)
cluster_indices_sel = []
for cluster_index, cluster_size in feature_labels_count: # feature_labels_count = list of (cluster_index, cluster_size)
if cluster_size >= min_cluster_size and top_cluster_number > 0:
cluster_indices_sel.append(cluster_index)
top_cluster_number -= 1
print('Cluster indices of selected clusters: \n%s\n'% cluster_indices_sel)
return cluster_indices_sel
# -
# #### Cluster features
#
# For each feature type, we perform the k-means clustering with our defined `clustering` function.
k_means = {"donors": clustering(features_coord["donors"], kq),
"acceptors": clustering(features_coord["acceptors"], kq),
"hydrophobics": clustering(features_coord["hydrophobics"], kq)}
# #### Select relevant clusters
#
# For each feature type, we select relevant clusters with our defined `get_clusters` function.
print("Hydrogen bond donors\n")
cluster_indices_sel_don = get_clusters(k_means["donors"], min_cluster_size, top_cluster_number)
print("Hydrogen bond acceptors\n")
cluster_indices_sel_acc = get_clusters(k_means["acceptors"], min_cluster_size, top_cluster_number)
print("Hydrophobic contacts\n")
cluster_indices_sel_h = get_clusters(k_means["hydrophobics"], min_cluster_size, top_cluster_number)
cluster_indices_sel = {"donors": cluster_indices_sel_don,
"acceptors": cluster_indices_sel_acc,
"hydrophobics": cluster_indices_sel_h}
# #### Get selected cluster coordinates
def get_selected_cluster_center_coords(k_means, cluster_indices_sel, feature_type):
'''
This function retrieves cluster center coordinates for selected clusters (by their indices).
'''
# Get cluster centers for a certain feature type
cluster_centers = k_means[feature_type].cluster_centers_
# Cast to list and then to pandas Series (for element selection by indices)
cluster_centers = pd.Series(cluster_centers.tolist())
# Select cluster centers by indices of selected clusters
cluster_centers_sel = cluster_centers[cluster_indices_sel[feature_type]]
# Cast to list and return
return list(cluster_centers_sel)
cluster_centers_sel = {"donors": get_selected_cluster_center_coords(k_means, cluster_indices_sel, "donors"),
"acceptors": get_selected_cluster_center_coords(k_means, cluster_indices_sel, "acceptors"),
"hydrophobics": get_selected_cluster_center_coords(k_means, cluster_indices_sel, "hydrophobics")}
cluster_centers_sel["acceptors"]
# ### Show clusters
#
# Per feature type, we visualize cluster centers alongside with all molecules and all feature points.
def visualize_clusters(objPMV, molecules, feature_type, features, cluster_centers_sel, feature_color):
'''
This function displays
* all input molecules,
* all input features as spheres, and
* the resulting cluster centers in PyMoL.
A png picture from the PyMoL window is loaded into the Jupyter notebook and saved as file to disc.
'''
# Initialize PyMol in order to remove all previous objects
objPMV.server.do("reinitialize")
print("Number of " + feature_type + " in all ligands: " + str(sum([len(i) for i in features])))
# Load ligands
rangeMols = range(1, len(molecules)+1)
for mol, i in zip(molecules, rangeMols):
objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False)
toStickCmd='cmd.show("sticks","mol_'+str(i)+'")'
objPMV.server.do(toStickCmd)
i += 1
# Load features
for i in range(len(features)):
for feature in features[i]:
loc = list(feature.GetPos())
sphere_radius = 0.5
label = feature_type + '_%d'%(i+1)
objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1) # show the sphere (pharmacophore feature)
# Load clusters
for i in range(len(cluster_centers_sel)):
loc = cluster_centers_sel[i]
sphere_radius = 1
label = feature_type + '_c%d'%(i+1)
objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1)
# Turn camera
objPMV.server.do("turn x, -40")
# Set PyMol styling
objPMV.server.do("bg_color white")
objPMV.server.do("zoom")
objPMV.server.do("ray 1800, 1000")
# Export as PNG file
outputPNG = objPMV.GetPNG(w=1800, h=1000)
outputPNG.save("../data/T9/ligands_features_clusters_"+feature_type+".png")
# Display in Jupyter notebook
return objPMV.GetPNG(h=300)
# #### Hydrogen bond donors
feature_type = "donors"
visualize_clusters(objPMV, mols, feature_type, features[feature_type],
cluster_centers_sel[feature_type],
feature_colors[feature_type])
# #### Hydrogen bond acceptor
feature_type = "acceptors"
visualize_clusters(objPMV, mols, feature_type, features[feature_type],
cluster_centers_sel[feature_type],
feature_colors[feature_type])
# #### Hydrophobic contacts
feature_type = "hydrophobics"
visualize_clusters(objPMV, mols, feature_type, features[feature_type],
cluster_centers_sel[feature_type],
feature_colors[feature_type])
# ### Show ensemble pharmacophore
#
# In this last step, we combine the clustered pharmacophoric features (i.e. hydrogen bond donors and acceptors as well as hydrophobic contacts), to one ensemble pharmacophore, representing the pharmacophoric properties of the four selected ligands.
# +
# Initialize PyMol in order to remove all previous objects
objPMV.server.do("reinitialize")
# Load ligands
rangeMols = range(1, len(mols)+1)
for mol, i in zip(mols, rangeMols):
objPMV.ShowMol(mol, name='mol_%d'%i, showOnly=False)
toStickCmd='cmd.show("sticks","mol_'+str(i)+'")'
objPMV.server.do(toStickCmd)
i += 1
# Load clusters
for feature_type in cluster_indices_sel.keys():
centers = cluster_centers_sel[feature_type]
for i in range(len(centers)):
loc = centers[i]
sphere_radius = 1
feature_color = feature_colors[feature_type]
label = feature_type + '_c%d'%(i+1)
objPMV.server.sphere(loc, sphere_radius, feature_color, label, 1)
# Turn camera
objPMV.server.do("turn x, -40")
# Set PyMol styling
objPMV.server.do("bg_color white")
objPMV.server.do("zoom")
objPMV.server.do("ray 1800, 1000")
# Export as PNG file
outputPNG = objPMV.GetPNG(w=1800, h=1000)
outputPNG.save("../data/T9/ligands_ensemble_ph4.png")
# Display in Jupyter notebook
objPMV.GetPNG(h=300)
# -
# ## Discussion
#
# In this talktorial, we used a set of pre-aligned ligands, which are known to bind EGFR, to generate an ensemble pharmacophore model. This model could now be used for virtual screening against a large library of small molecules, in order to find novel small molecules that show the observed steric and physico-chemical properties and might therefore also bind to the EGFR binding site.
#
# Before screening, the pharmacophore models are usually further optimized, e.g. features might be omitted in order to reduce the number of features for screening based on biological knowledge (some interaction might be reported as important whereas others are not) or based on chemical expertise.
#
# We do not cover the virtual screening in this talktorial, however refer to an excellent tutorial by <NAME>, demonstrating pharmacophore modeling and virtual screening with RDKit ([RDKit UGM 2016 on GitHub](https://github.com/rdkit/UGM_2016/blob/master/Notebooks/Stiefl_RDKitPh4FullPublication.ipynb)).
#
# We used K-means clustering to cluster pharmacophore feature. This clustering approach has the disadvantage that the user needs to define the number of clusters beforehand, which is usually based on a visual inspection of the point distribution before clustering (or during cluster refinement) and is therefore hindering for an automated pharmacophore generation. Density-based clustering methods (also in combination with K-means clustering) can be a solution for this.
#
# ## Quiz
#
# 1. Explain the terms pharmacophoric features and pharmacophore.
# 2. Explain the difference between structure- and ligand-based pharmacophore modeling.
# 3. Explain how we derived an ensemble pharmacophore.
| talktorials/9_ligand_based_pharmacophores/T9_ligand_based_pharmacophores.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import sampler
run -i sampler.py
sampler = Sampler(z_dim = 4, c_dim = 1, scale = 8.0, net_size = 32)
sampler.generate()
sampler.show_image()
img = sampler.generate()
sampler.show_image(img)
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.0 64-bit ('3.7')
# name: python3
# ---
# ## CartPole Skating
#
# > **Problem**: If Peter wants to escape from the wolf, he needs to be able to move faster than him. We will see how Peter can learn to skate, in particular, to keep balance, using Q-Learning.
#
# First, let's install the gym and import required libraries:
# +
import sys
# !pip install gym
import gym
import matplotlib.pyplot as plt
import numpy as np
import random
# -
# ## Create a cartpole environment
env = gym.make("CartPole-v1")
print(env.action_space)
print(env.observation_space)
print(env.action_space.sample())
# To see how the environment works, let's run a short simulation for 100 steps.
# +
env.reset()
for i in range(100):
env.render()
env.step(env.action_space.sample())
env.close()
# -
# During simulation, we need to get observations in order to decide how to act. In fact, `step` function returns us back current observations, reward function, and the `done` flag that indicates whether it makes sense to continue the simulation or not:
# +
env.reset()
done = False
while not done:
env.render()
obs, rew, done, info = env.step(env.action_space.sample())
print(f"{obs} -> {rew}")
env.close()
# -
# We can get min and max value of those numbers:
print(env.observation_space.low)
print(env.observation_space.high)
# ## State Discretization
def discretize(x):
return tuple((x/np.array([0.25, 0.25, 0.01, 0.1])).astype(np.int))
# Let's also explore other discretization method using bins:
# +
def create_bins(i,num):
return np.arange(num+1)*(i[1]-i[0])/num+i[0]
print("Sample bins for interval (-5,5) with 10 bins\n",create_bins((-5,5),10))
ints = [(-5,5),(-2,2),(-0.5,0.5),(-2,2)] # intervals of values for each parameter
nbins = [20,20,10,10] # number of bins for each parameter
bins = [create_bins(ints[i],nbins[i]) for i in range(4)]
def discretize_bins(x):
return tuple(np.digitize(x[i],bins[i]) for i in range(4))
# -
# Let's now run a short simulation and observe those discrete environment values.
# +
env.reset()
done = False
while not done:
#env.render()
obs, rew, done, info = env.step(env.action_space.sample())
#print(discretize_bins(obs))
print(discretize(obs))
env.close()
# -
# ## Q-Table Structure
# +
Q = {}
actions = (0,1)
def qvalues(state):
return [Q.get((state,a),0) for a in actions]
# -
# ## Let's Start Q-Learning!
# hyperparameters
alpha = 0.3
gamma = 0.9
epsilon = 0.90
# +
def probs(v,eps=1e-4):
v = v-v.min()+eps
v = v/v.sum()
return v
Qmax = 0
cum_rewards = []
rewards = []
for epoch in range(100000):
obs = env.reset()
done = False
cum_reward=0
# == do the simulation ==
while not done:
s = discretize(obs)
if random.random()<epsilon:
# exploitation - chose the action according to Q-Table probabilities
v = probs(np.array(qvalues(s)))
a = random.choices(actions,weights=v)[0]
else:
# exploration - randomly chose the action
a = np.random.randint(env.action_space.n)
obs, rew, done, info = env.step(a)
cum_reward+=rew
ns = discretize(obs)
Q[(s,a)] = (1 - alpha) * Q.get((s,a),0) + alpha * (rew + gamma * max(qvalues(ns)))
cum_rewards.append(cum_reward)
rewards.append(cum_reward)
# == Periodically print results and calculate average reward ==
if epoch%5000==0:
print(f"{epoch}: {np.average(cum_rewards)}, alpha={alpha}, epsilon={epsilon}")
if np.average(cum_rewards) > Qmax:
Qmax = np.average(cum_rewards)
Qbest = Q
cum_rewards=[]
# -
# ## Plotting Training Progress
plt.plot(rewards)
# From this graph, it is not possible to tell anything, because due to the nature of stochastic training process the length of training sessions varies greatly. To make more sense of this graph, we can calculate **running average** over series of experiments, let's say 100. This can be done conveniently using `np.convolve`:
# +
def running_average(x,window):
return np.convolve(x,np.ones(window)/window,mode='valid')
plt.plot(running_average(rewards,100))
# -
# ## Varying Hyperparameters and Seeing the Result in Action
#
# Now it would be interesting to actually see how the trained model behaves. Let's run the simulation, and we will be following the same action selection strategy as during training: sampling according to the probability distribution in Q-Table:
obs = env.reset()
done = False
while not done:
s = discretize(obs)
env.render()
v = probs(np.array(qvalues(s)))
a = random.choices(actions,weights=v)[0]
obs,_,done,_ = env.step(a)
env.close()
#
# ## Saving result to an animated GIF
#
# If you want to impress your friends, you may want to send them the animated GIF picture of the balancing pole. To do this, we can invoke `env.render` to produce an image frame, and then save those to animated GIF using PIL library:
from PIL import Image
obs = env.reset()
done = False
i=0
ims = []
while not done:
s = discretize(obs)
img=env.render(mode='rgb_array')
ims.append(Image.fromarray(img))
v = probs(np.array([Qbest.get((s,a),0) for a in actions]))
a = random.choices(actions,weights=v)[0]
obs,_,done,_ = env.step(a)
i+=1
env.close()
ims[0].save('images/cartpole-balance.gif',save_all=True,append_images=ims[1::2],loop=0,duration=5)
print(i)
| 8-Reinforcement/2-Gym/solution/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regularization
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import scale
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
import warnings
warnings.filterwarnings("ignore")
from sklearn.cross_validation import train_test_split
data_credit = pd.read_csv("C:\\Users\\apoghosyan\\Desktop\\YSU_ML\\ISLR_Data\\Hitters.csv")
data_credit = data_credit.dropna()
print(data_credit.shape)
print(data_credit.columns)
data_credit = data_credit.drop(['Unnamed: 0'], axis = 1)
print(data_credit.columns)
data_credit.head(5)
# +
cat_list = ["League","Division","NewLeague"]
df_dummies= pd.get_dummies(data_credit[cat_list], drop_first = True)
print(df_dummies.head())
add = pd.concat([data_credit, df_dummies], axis = 1)
add.drop(cat_list, inplace = True, axis = 1)
print(add.head())
X = add.drop("Salary", axis = 1)
names = X.columns
y = add.Salary
# -
# ## Ridge Regression
names
# +
ridge = Ridge()
ridge.set_params(alpha = 0)
ridge.fit(scale(X),y)
print("Ridge Coefficients -> ", ridge.coef_)
coef_list = ridge.coef_
print(np.sum(coef_list**2))
print("score->", ridge.score(scale(X),y))
lin = LinearRegression()
lin.fit(scale(X),y)
print("Linear Regression Coefficients ->", lin.coef_)
print(np.sum(coef_list**2))
print(lin.score(scale(X),y))
# -
ridge = Ridge()
ridge.set_params(alpha = 0.400170056022409)
ridge.fit(scale(X),y)
print(ridge.coef_)
coef_list = ridge.coef_
print(np.sum(coef_list**2))
print("score ->", ridge.score(scale(X),y))
ridge = Ridge()
ridge.set_params(alpha = 100)
ridge.fit(scale(X),y)
print(ridge.coef_)
coef_list = ridge.coef_
print(np.sum(coef_list**2))
print("score -> ", ridge.score(scale(X),y))
ridge = Ridge()
ridge.set_params(alpha = 1000000)
ridge.fit(scale(X),y)
print(ridge.coef_)
coef_list = ridge.coef_
print(np.sum(coef_list**2))
print("score ->", ridge.score(scale(X),y))
# +
alphas = np.linspace(0,200,1000)
ridge = Ridge()
coef_list = []
for a in alphas:
ridge.set_params(alpha = a)
ridge.fit(scale(X),y)
coef_list.append(ridge.coef_)
coef_list
plt.figure(figsize = (15,12))
plt.plot(alphas, coef_list)
plt.legend(names)
plt.show()
# -
# ## Lasso
# +
alphas = np.linspace(0.0001,200,1000)
lasso = Lasso( max_iter=100000)
coef_list = []
for a in alphas:
lasso.set_params(alpha = a)
lasso.fit(scale(X),y)
coef_list.append(lasso.coef_)
coef_list
plt.figure(figsize = (15,12))
plt.plot(alphas, coef_list)
plt.legend(names)
plt.show()
# -
# ## Ridge CV
alphas = np.linspace(0.00001,500,2500)
ridgecv = RidgeCV(alphas = alphas, scoring = 'neg_mean_squared_error', cv = 10)
ridgecv.fit(scale(X),y)
ridgecv.alpha_
ridgecv.coef_
ridgecv.score(scale(X),y)
pd.Series(ridgecv.coef_.flatten(), index = names)
plt.figure(figsize = (15,8))
plt.plot(ridgecv.predict(scale(X)),color = "red")
plt.xlim(0,100)
plt.plot(y)
# ## Lasso CV
alphas = np.linspace(0.001,100,2500)
lassocv = LassoCV(alphas = alphas, cv = 10, max_iter = 100000, verbose = True)
lassocv.fit(scale(X),y)
lassocv.alpha_
lassocv.coef_
lassocv.score(scale(X),y)
pd.Series(lassocv.coef_.flatten(), index = names)
plt.figure(figsize = (15,8))
plt.plot(lassocv.predict(scale(X)),color = "red")
plt.xlim(0,100)
plt.plot(y)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 42)
X_train.shape
X_test.shape
X.shape
X_test.shape[0]/X.shape[0]
| Class/Regression/L2_Regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Advanced Wind Turbine Simulation Workflow
#
# * A more advanced turbine workflow will include many steps (Which have been showcased in previous examples)
# * The process exemplified here includes:
# 1. Weather data extraction from a MERRA dataset (windspeed, pressure, temperature)
# 2. Spatial adjustment of the windspeeds
# 3. Vertical projection of the wind speeds
# 4. Wind speed density correction
# 5. Power curve convolution
# 6. Capacity Factor Estimation
# +
import reskit as rk
from reskit import windpower
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# # Simulate a Single Location
# +
# Set some constants for later
TURBINE_CAPACITY = 4200 # kW
TURBINE_HUB_HEIGHT = 120 # meters
TURBINE_ROTOR_DIAMETER = 136 # meters
TURBINE_LOCATION = (6.0,50.5) # (lon, lat)
# +
# 1. Create a weather source, load, and extract weather variables
src = rk.weather.sources.MerraSource(rk._TEST_DATA_["weather_data"], bounds=[5,49,7,52], verbose=False)
src.loadWindSpeed()
src.loadPressure()
src.loadTemperature()
raw_windspeeds = src.get("windspeed", locations=TURBINE_LOCATION, interpolation='bilinear')
raw_pressure = src.get("pressure", locations=TURBINE_LOCATION, interpolation='bilinear')
raw_temperature = src.get("air_temp", locations=TURBINE_LOCATION, interpolation='bilinear')
print(raw_windspeeds.head())
# +
# 2. Adjust wind speeds to turbine location
spatially_adjusted_windspeeds = rk.weather.windutil.adjustLraToGwa(windspeed=raw_windspeeds,
targetLoc=TURBINE_LOCATION,
gwa=rk._TEST_DATA_['gwa50-like.tif'],
longRunAverage=src.LONG_RUN_AVERAGE_50M_SOURCE,
interpolation='bilinear')
spatially_adjusted_windspeeds.head()
# +
# 3. Vertically project wind speeds to hub height
roughness = rk.weather.windutil.roughnessFromCLC(clcPath=rk._TEST_DATA_['clc-aachen_clipped.tif'],
loc=TURBINE_LOCATION)
projected_windspeed = rk.weather.windutil.projectByLogLaw(measuredWindspeed=spatially_adjusted_windspeeds,
measuredHeight=50, # The MERRA dataset offers windspeeds at 50m
targetHeight=TURBINE_HUB_HEIGHT,
roughness=roughness)
print(projected_windspeed.head())
# +
# 4. Apply density correction
pressure_corrected_windspeeds = rk.weather.windutil.densityAdjustment(windspeed=projected_windspeed,
pressure=raw_pressure,
temperature=raw_temperature,
height=TURBINE_HUB_HEIGHT)
pressure_corrected_windspeeds.head()
# +
# 5 Power curve estimation and convolution
power_curve = windpower.SyntheticPowerCurve(capacity=TURBINE_CAPACITY, rotordiam=TURBINE_ROTOR_DIAMETER)
convoluted_power_curve = windpower.convolutePowerCurveByGuassian(powerCurve=power_curve, stdScaling=0.06, stdBase=0.1)
# +
# 6 Capacity factor estimation
capacity_factors = windpower.simulateTurbine(windspeed=pressure_corrected_windspeeds,
powerCurve=convoluted_power_curve)
capacity_factors.head()
# -
capacity_factors.plot()
plt.show()
# # Simulate multiple locations at once (recommended)
# +
TURBINE_CAPACITY = 4200 # kW
TURBINE_HUB_HEIGHT = 120 # meters
TURBINE_ROTOR_DIAMETER = 136 # meters
TURBINE_LOCATION = [(6.25,51.), (6.50,51.), (6.25,50.75)] # (lon,lat)
# 1
raw_windspeeds = src.get("windspeed", locations=TURBINE_LOCATION, interpolation='bilinear')
raw_pressure = src.get("pressure", locations=TURBINE_LOCATION, interpolation='bilinear')
raw_temperature = src.get("air_temp", locations=TURBINE_LOCATION, interpolation='bilinear')
#2
spatially_adjusted_windspeeds = rk.weather.windutil.adjustLraToGwa(windspeed=raw_windspeeds,
targetLoc=TURBINE_LOCATION,
gwa=rk._TEST_DATA_['gwa50-like.tif'],
longRunAverage=src.LONG_RUN_AVERAGE_50M_SOURCE,
interpolation='bilinear')
# 3
roughness = rk.weather.windutil.roughnessFromCLC(clcPath=rk._TEST_DATA_['clc-aachen_clipped.tif'],
loc=TURBINE_LOCATION)
projected_windspeed = rk.weather.windutil.projectByLogLaw(measuredWindspeed=spatially_adjusted_windspeeds,
measuredHeight=50, # The MERRA dataset offers windspeeds at 50m
targetHeight=TURBINE_HUB_HEIGHT,
roughness=np.array(roughness))
# 4
pressure_corrected_windspeeds = rk.weather.windutil.densityAdjustment(windspeed=projected_windspeed,
pressure=raw_pressure,
temperature=raw_temperature,
height=TURBINE_HUB_HEIGHT)
# 5
power_curve = windpower.SyntheticPowerCurve(capacity=TURBINE_CAPACITY, rotordiam=TURBINE_ROTOR_DIAMETER)
convoluted_power_curve = windpower.convolutePowerCurveByGuassian(powerCurve=power_curve, stdScaling=0.06, stdBase=0.1)
# 6
capacity_factors = windpower.simulateTurbine(windspeed=pressure_corrected_windspeeds,
powerCurve=convoluted_power_curve)
# Print result
capacity_factors.head()
# -
capacity_factors.plot()
plt.show()
| examples/3.05b-WindPower-Turbine_Simulation_Workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import StockDataManager as sm
settings = sm.Settings()
jobmgr = sm.JobManager(settings)
import TimeSeries as ts
tsmgr = ts.TimeSeries(settings)
data = tsmgr.get_stock_series(['300382', '603008'], start='2015-01-01', fields=['open', 'close'])
# +
import matplotlib.pyplot as plt
# %matplotlib inline
data['close'].plot()
# -
# # CSV loading
# +
import pandas as pd
price = pd.read_csv('/Users/jianboxue/Downloads/sh600000.csv')
# -
import Data.StockDataManager as sm
settings = sm.Settings()
price.to_sql('yc', settings.get_mysql_conn(), if_exists='append')
| Trading_Strategies/Data/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dgpg
# language: python
# name: dgpg
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import os
import matplotlib.pyplot as plt
from numpy import exp, abs, log
from metrics import *
import universal as up
from universal import tools, algos
from universal.algos import *
sns.set_context("notebook")
plt.rcParams["figure.figsize"] = (10, 5)
# +
metrics_lst =['cumulative_return(rt_v)', 'daily_return(rt_v)',
'cumulative_return_fee(rt_v, x_vec, paras["c"])', 'daily_return_fee(rt_v, x_vec, paras["c"])',
'max_redraw(rt_v)', 'sharpe_ratio(rt_v, paras["rf"])', 'volatility(rt_v)', 'turnover(rt_v)']
paras = {'c':0.0025, 'rf':0.001}
# -
# ## data
# filename='./stock_data/artificial/arma_0.005_1.csv'
# filename='./stock_data/artificial/arma_0.005_2.csv'
# filename='./stock_data/artificial/arma_-0.005_1.csv'
filename='./stock_data/artificial/arma_-0.005_2.csv'
df_close = pd.read_csv(filename, index_col=0)
df_close=df_close.iloc[:, :20]
data = df_close[:1301].to_numpy()
for i in list(range(0, len(data)))[::-1]:
data[i, :] = data[i, :] / data[0, :]
data = data[:1300]
data = pd.DataFrame(data)
# plot first three of them as example
ax=pd.DataFrame(data).iloc[:,:].plot(legend=False, figsize=(10,4))
ax.set(xlabel='Trading Period', ylabel='Relative Price')
fig=ax.get_figure()
# fig.savefig('syn3.eps')
data
# ## baselines
# ### BAH
# +
# set algo parameters
algo = algos.BAH()
# run
result = algo.run(data)
# -
print(result.summary())
print('Total wealth:', result.total_wealth)
ax = result.plot(weights=False, assets=False, ucrp=True, logy=False);
# ### UCRP
# +
# set algo parameters
algo = algos.CRP()
# run
result = algo.run(data)
# -
print(result.summary())
print('Total wealth:', result.total_wealth)
# ax = result.plot(weights=False, assets=False, ucrp=True, logy=False);
# ### PAMR
# +
# set algo parameters
algo = algos.PAMR()
# run
result = algo.run(data)
# -
result.fee = 0.0
print(result.summary())
print('Total wealth:', result.total_wealth)
ax = result.plot(weights=False, assets=False, ucrp=True, logy=False);
# ### OLMAR
# +
# set algo parameters
algo = algos.OLMAR(window=5, eps=10)
# run
result = algo.run(data)
# -
result.fee = 0.0
print(result.summary())
print('Total wealth:', result.total_wealth)
ax = result.plot(weights=False, assets=False, ucrp=True, logy=False);
| baselines-synthetic-fee0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intesive training
#
# Here I will try to train the convolutional neural network as much as possible and I will hope that the result will be more accurate then when trained during active learning.
import h5py
from IPython.display import SVG
from keras import callbacks, utils
import numpy as np
import pandas as pd
from active_cnn import model
from active_cnn import performance
from active_cnn import plot
# load the training data
with h5py.File('data/run-09-11-2018/learning-data.h5', 'r') as f:
it_grp = f['iteration_17']
X = it_grp['X'][...]
ids = it_grp['ids'][...]
X_tr = it_grp['X_tr'][...]
y_tr = it_grp['y_tr'][...]
ids_tr = it_grp['ids_tr'][...]
labels_old = it_grp['labels'][...]
# encode the vector of labels into one-hot-encoded vector
one_hot_y_tr = utils.to_categorical(y_tr, num_classes=3)
one_hot_y_tr.shape
callback = callbacks.EarlyStopping(
monitor='loss',
min_delta=10e-5,
patience=100,
restore_best_weights=True
)
cnn.fit(
X_tr.reshape(-1, 140, 1),
one_hot_y_tr,
epochs=1000,
callbacks=[callback],
verbose=1
)
cnn.save('candidates/cnn.h5')
y = cnn.predict(X.reshape(-1, 140, 1), verbose=1, batch_size=2 ** 14)
labels = np.argmax(y, axis=1)
np.unique(labels, return_counts=True)
# prediction from active learning
np.unique(labels_old, return_counts=True)
# ## Estimate Performance
#
# Because there are no true labels in LAMOST data, the accuracy need to be estimated. Here I use confidence interval to estimate the uncertainty in the estimate. I provide the 95% lower bound.
#
# ### Double-peak
double_peak_ids = ids[labels == 2]
double_peak_ids.shape
np.random.shuffle(double_peak_ids)
shuffled_double_peak_ids = (f for f in double_peak_ids)
double_peak_sample = []
plot.preview_lamost_spectrum(next(shuffled_double_peak_ids), 2)
double_peak_sample.append(1)
double_peak_sample.append(0)
performance.lower_confidence_limit(double_peak_sample)
# ### Emission
emission_ids = ids[labels == 1]
emission_ids.shape
np.random.shuffle(emission_ids)
shuffled_emission_ids = (f for f in emission_ids)
emission_sample = []
plot.preview_lamost_spectrum(next(shuffled_emission_ids), 1)
emission_sample.append(1)
emission_sample.append(0)
performance.lower_confidence_limit(emission_sample)
# ## Candidates
# +
cans = pd.DataFrame(ids[labels != 0], columns=['filename'])
filepaths = list()
for idx, row in cans.iterrows():
filename = row['filename']
directory = filename.split('-')[2].split('_sp')[0]
filepath = '/lamost/fits/' + directory + '/' + filename
filepaths.append(filepath)
cans['path'] = filepaths
cans['label'] = labels[labels != 0]
cans['label'] = cans['label'].replace({1: 'emission', 2: 'double-peak'})
cans = cans.drop('filename', axis=1)
# add spectra from oracle
oracle_df = pd.read_csv('data/run-9-11-2018/oracle-df.csv')
oracle_df = oracle_df[oracle_df['correct'] != 'not-interesting']
for idx, row in oracle_df.iterrows():
filename = row['identifier']
directory = filename.split('-')[2].split('_sp')[0]
filepath = '/lamost/fits/' + directory + '/' + filename
cans = cans.append(pd.Series({
'path': filepath,
'label': row['correct']
}), ignore_index=True)
cans.to_csv('candidates.csv', index=False)
cans.shape
| intensive-training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yohanesnuwara/66DaysOfData/blob/main/D01_PCA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="yoB1IWeXiU6J"
# # Principal Component Analysis
# + id="xAdFrg2lPsT0"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits, fetch_lfw_people
from sklearn.preprocessing import StandardScaler
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="GlnBhshvQNWR" outputId="c491e7ff-d169-41f9-9325-5ec52b652a65"
rng = np.random.RandomState(1)
X = np.dot(rng.rand(2, 5), rng.randn(5, 200)).T
plt.scatter(X[:, 0], X[:, 1])
plt.axis('equal')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="QJdRa0K-Si2M" outputId="17242cc5-5de6-46d4-de71-7a1e8c7ef1e2"
pca = PCA(n_components=2)
pca.fit(X)
# + [markdown] id="Nv3vu6Y8WcHX"
# PCA components are called eigenvectors.
# + colab={"base_uri": "https://localhost:8080/"} id="F-fqeC1ySsMW" outputId="d4a15df0-7888-4d1e-8ac3-8844dafc8729"
print(pca.components_)
print(pca.explained_variance_)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="2ekcRV4zTW2b" outputId="c3a582fb-c77b-4816-fe09-1e9f376ffd02"
def draw_vector(v0, v1, ax=None):
ax = ax or plt.gca()
arrowprops=dict(arrowstyle='->',
linewidth=2,
shrinkA=0, shrinkB=0)
ax.annotate('', v1, v0, arrowprops=arrowprops)
# plot data
plt.scatter(X[:, 0], X[:, 1])
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
draw_vector(pca.mean_, pca.mean_ + v)
plt.axis('equal');
# + [markdown] id="3Wc5c3w6W0TA"
# ## PCA to reduce dimension.
# + colab={"base_uri": "https://localhost:8080/"} id="7-3dZqcXUaM9" outputId="6d86406e-615f-44b2-d8c0-a23bf28a8b6c"
pca = PCA(n_components=1)
pca.fit(X)
X_pca = pca.transform(X)
print("original shape: ", X.shape)
print("transformed shape:", X_pca.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="KnRjduyMUIyG" outputId="07483301-c66e-4829-acad-d890d5464e00"
X_new = pca.inverse_transform(X_pca)
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)
plt.axis('equal')
plt.show()
# + [markdown] id="bsLNYz0LXblH"
# ## PCA for digit classification.
# + colab={"base_uri": "https://localhost:8080/"} id="YbA41B4WW_9X" outputId="2b6465d9-bdaf-419d-e7d5-5db97aeeaac9"
digits = load_digits()
print(digits.data.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="aeZOq3HsXnBT" outputId="57de4023-c703-42b9-8390-38cc075d1adf"
pca = PCA(2) # project from 64 to 2 dimensions
projected = pca.fit_transform(digits.data)
print(digits.data.shape)
print(projected.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="I-apsb0yXvRX" outputId="198b3132-7ac9-4931-f683-7d020b720150"
plt.scatter(projected[:, 0], projected[:, 1],
c=digits.target, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('jet', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar()
plt.show()
# + [markdown] id="cytMfei9hgBC"
# Here, PCA can be used to approximate a digit. For instance, a 64-pixel image can be approximated by a dimensionality reduced 8-pixel image. Reconstructing using PCA as a basis function:
#
# $$image(x)=mean+x1⋅(basis 1)+x2⋅(basis 2)+x3⋅(basis 3)⋯$$
# + id="NI-FSqNQaGTO"
def plot_pca_components(x, coefficients=None, mean=0, components=None,
imshape=(8, 8), n_components=8, fontsize=12,
show_mean=True):
if coefficients is None:
coefficients = x
if components is None:
components = np.eye(len(coefficients), len(x))
mean = np.zeros_like(x) + mean
fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))
g = plt.GridSpec(2, 4 + bool(show_mean) + n_components, hspace=0.3)
def show(i, j, x, title=None):
ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])
ax.imshow(x.reshape(imshape), interpolation='nearest', cmap='binary')
if title:
ax.set_title(title, fontsize=fontsize)
show(slice(2), slice(2), x, "True")
approx = mean.copy()
counter = 2
if show_mean:
show(0, 2, np.zeros_like(x) + mean, r'$\mu$')
show(1, 2, approx, r'$1 \cdot \mu$')
counter += 1
for i in range(n_components):
approx = approx + coefficients[i] * components[i]
show(0, i + counter, components[i], r'$c_{0}$'.format(i + 1))
show(1, i + counter, approx,
r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1))
if show_mean or i > 0:
plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',
transform=plt.gca().transAxes, fontsize=fontsize)
show(slice(2), slice(-2, None), approx, "Approx")
return fig
# + colab={"base_uri": "https://localhost:8080/", "height": 181} id="3V8FQ3q_afub" outputId="1b30261b-1cd8-4624-f0a3-38ce19edf244"
pca = PCA(n_components=8)
Xproj = pca.fit_transform(digits.data)
fig = plot_pca_components(digits.data[3], Xproj[3],
pca.mean_, pca.components_, show_mean=False)
# + [markdown] id="yrbypxMfbSov"
# Choose the optimum number of components. 20 is good to account over 90% of variance.
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="O0Ug1XEEbNEA" outputId="59859980-de62-4340-991f-111c7a8d4649"
pca = PCA().fit(digits.data)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.show()
# + [markdown] id="DEDCHW4Sico_"
# ## PCA for noise filtering
# + colab={"base_uri": "https://localhost:8080/", "height": 252} id="m6fCBVBQd3kH" outputId="7f52accf-519e-41d3-ebb6-ce8bb28e31ce"
def plot_digits(data):
fig, axes = plt.subplots(4, 10, figsize=(10, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(data[i].reshape(8, 8),
cmap='binary', interpolation='nearest',
clim=(0, 16))
plot_digits(digits.data)
# + [markdown] id="5WNjlF5enl5Y"
# Add random noise.
# + colab={"base_uri": "https://localhost:8080/", "height": 252} id="RFUJ6mybnYFl" outputId="62711d68-05fb-498f-90a3-3da6e5778c76"
np.random.seed(42)
noisy = np.random.normal(digits.data, 5) # Tweak this number as level of noise
plot_digits(noisy)
# + [markdown] id="r1eIQ6naqat0"
# Make the PCA preserve 50% of the variance. There are 12 components the most fit one.
# + colab={"base_uri": "https://localhost:8080/"} id="Ud5XOKompQIQ" outputId="a3017954-7436-4960-9e5d-7b878afb9539"
pca = PCA(0.50).fit(noisy)
print(pca.n_components_)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="VpU3w-uAqe0l" outputId="79211fb3-ec2c-48e4-f596-f9df02dd28e6"
# See the number of components given % preservations
x = np.linspace(0.1, 0.9, 19)
comp = [(PCA(i).fit(noisy)).n_components_ for i in x]
plt.plot(x, comp)
plt.xlabel('Preservation')
plt.ylabel('Number of components fit')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 252} id="vsnQb_xlpaPw" outputId="385efc8c-94dc-40ca-fc57-d50285cde449"
components = pca.transform(noisy)
filtered = pca.inverse_transform(components)
plot_digits(filtered)
# + [markdown] id="gvMTjJuX-vlJ"
# ## Eigenfaces
# + colab={"base_uri": "https://localhost:8080/"} id="wL62lMFe6OpV" outputId="48666e39-6314-4919-961b-41ee2b2cee5d"
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
# + [markdown] id="D38OesVM_aEm"
# There are 3,000 dimensions. Take a look at first 150 components.
# + colab={"base_uri": "https://localhost:8080/"} id="ObQJsf2__ZC3" outputId="950c2622-343b-4647-ca0d-ea5c4c561dba"
pca = PCA(150)
pca.fit(faces.data)
# + [markdown] id="0GWehEM7WU6L"
# Look at the first 24 components (eigenvectors or "eigenfaces").
# + colab={"base_uri": "https://localhost:8080/", "height": 252} id="zo9NhC-0_9NW" outputId="2fb39665-f618-4ada-a774-d8bd4ba443c7"
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone')
# + [markdown] id="BZjKtYlPA2fk"
# 150 is good to account for 90% of variance. Using these 150 components, we would recover most of the essential characteristics of the data.
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="8VAiIskRAnpG" outputId="5c2b519c-5f7a-4674-f7e0-a7213719e048"
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
# + id="sLxZxpx3F1xy"
# Compute the components and projected faces
pca = PCA(150).fit(faces.data)
components = pca.transform(faces.data)
projected = pca.inverse_transform(components)
# + [markdown] id="wYbBAXnYXGTE"
# Reconstructing the full 3,000 pixel input image reduced to 150.
# + colab={"base_uri": "https://localhost:8080/", "height": 171} id="G7bDmaaWF464" outputId="c69c580d-665f-4408-a7d5-3d795b096853"
# Plot the results
fig, ax = plt.subplots(2, 10, figsize=(10, 2.5),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i in range(10):
ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')
ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')
ax[0, 0].set_ylabel('full-dim\ninput')
ax[1, 0].set_ylabel('150-dim\nreconstruction');
# + [markdown] id="8NTyw0Ydd2Ge"
# ## Feature selection
# + colab={"base_uri": "https://localhost:8080/", "height": 261} id="pWkMKoNPj5Ql" outputId="2b99cc3b-9e5c-49ca-f647-1d95faad6eba"
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
df.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
df.head()
# + id="uwASWMEzkB5B"
X, y = df.iloc[:, 1:], df.iloc[:, 0]
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
# + id="meQh35RHkb44"
pca=PCA()
Xt = pca.fit_transform(X_std)
# + colab={"base_uri": "https://localhost:8080/"} id="ym923CZAkk2n" outputId="16ecbf27-f295-4588-cadb-062f7370303d"
pca.explained_variance_ratio_
# + [markdown] id="TdzM2fBGlM0j"
# From the bar plot below, 6 features are important, until it reach 90% of variance (red curve).
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="740vgjJOkojP" outputId="ba92fe99-83b7-47d8-b534-374b9531de9e"
plt.bar(range(1,14),pca.explained_variance_ratio_,label='Variance Explained')
plt.step(range(1,14),np.cumsum(pca.explained_variance_ratio_),label='CumSum Variance Explained',c='r')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
# + [markdown] id="9aIxMD2Pc-gv"
# References:
#
# * https://jakevdp.github.io/PythonDataScienceHandbook/05.10-manifold-learning.html
# * https://github.com/dishaaagarwal/Dimensionality-Reduction-Techniques
# * Other resources:
# * https://www.ritchieng.com/machine-learning-dimensionality-reduction-feature-transform/
# * https://medium.com/analytics-vidhya/implementing-pca-in-python-with-sklearn-4f757fb4429e
| D01_PCA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from sympy import *
init_printing()
def printcode(vs, es, names):
for var, val in vs:
print ' %s = %s' % (var, val)
for i, val in enumerate(es):
print ' %s = %s' % (names[i], val)
# +
# Landmark state (just its position)
lx, ly = symbols("l_x l_y")
X = Matrix([lx, ly])
# Particle state variables
x, y, theta = symbols("p_x p_y theta", real=True)
# Bearing measurement of landmark
def bearing_measurement():
s, c = sin(theta), cos(theta)
# Note: the Y coordinate of the rotation matrix is flipped here
# because the relative bearing of the cone w.r.t. the center of the camera is flipped
# or something, I forget
R = Matrix([[c, s], [s, -c]])
lo = R * Matrix([lx - x, ly - y])
return Matrix([atan2(lo[1], lo[0])])
h_x_bearing = bearing_measurement()
l_px = symbols("l_px")
h_z_bearing = Matrix([l_px])
z_bearing = Matrix([l_px])
R_bearing = symbols("lm_R")
def generate_measurement_1d(X, h_x, h_z, z_k):
H = h_x.jacobian(X)
M = h_z.jacobian(z_k) + h_x.jacobian(z_k)
y_k = h_z - h_x
#vs, es = cse([y_k, H, M], optimizations='basic',
# symbols=numbered_symbols("k"))
#return vs, es
p11, p12, p22 = symbols("p11 p12 p22")
r = symbols("r")
P = Matrix([[p11, p12], [p12, p22]])
S = H*P*H.T + Matrix([[r]])
LL = -y_k[0,0]**2/S[0,0] - 0.5*log((2*pi)**2 * S[0,0])
K = P*H.T / S[0,0]
Pnew = (eye(2) - K*H)*P
Pvec = Matrix([Pnew[0,0], Pnew[0,1], Pnew[1,1]])
#vs, es = cse([y_k, K, LL, Pvec], optimizations='basic',
# symbols=numbered_symbols("k"))
vs, es = cse([y_k[0], S[0,0], H[0,0], H[0,1], LL], # optimizations='basic',
symbols=numbered_symbols("k"))
return vs, es, ["y_k", "S", "H1", "H2", "LL"]
printcode(*generate_measurement_1d(X, h_x_bearing, h_z_bearing, z_bearing))
# +
def generate_xP():
p11, p12, p22, H1, H2, S, yk = symbols("p11 p12 p22 H1 H2 S y_k")
P = Matrix([[p11, p12], [p12, p22]])
H = Matrix([[H1, H2]])
K = P*H.T / S
dx = K*yk
Pnew = (eye(2) - K*H)*P
vs, es = cse([dx[0], dx[1], Pnew[0, 0], Pnew[0,1], Pnew[1,1]], optimizations='basic',
symbols=numbered_symbols("k"))
return vs, es, ["dx", "dy", "p11", "p12", "p22"]
printcode(*generate_xP())
# -
| design/coneslam/rbekf-deriv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# ## Define couopled ODE for Cash-Karp
def dfdx(x,f):
# d^2y/dx^2, define
# y = f[0], dy/dx = z, z = f[1]
y = f[0]
z = f[1]
# Return derivs
dydx = np.zeros_like(f)
dydx[0] = z
dydx[1] = -1*y
return dydx
# ## Cash-Karp Core
def ck_core_mv(x_i, y_i, nv, h, f):
# Cash Karp defined in terms of weighting variables
ni = 7
nj = 6
ci = np.zeros(ni)
aij = np.zeros((ni, nj))
bi = np.zeros(ni)
bis = np.zeros(ni)
# Input values for ci, aij, bi and bis
ci[2] = 1./5.
ci[3] = 3./10.
ci[4] = 3./5.
ci[5] = 1.
ci[6] = 7./8.
# j = 1
aij[2,1] = 1./5.
aij[3,1] = 3./40.
aij[4,1] = 3./10.
aij[5,1] = -11./54.
aij[6,1] = 1631./55296.
# j = 2
aij[3,2] = 9./40.
aij[4,2] = -9./10.
aij[5,2] = 5./2.
aij[6,2] = 175./512.
# j = 3
aij[4,3] = 6./5.
aij[5,3] = -70./27.
aij[6,3] = 575./13824.
# j = 4
aij[5,4] = 35./27.
aij[6,4] = 44275./110592.
# j = 5
aij[6,5] = 253./4096.
# bi
bi[1] = 37./378.
bi[2] = 0.
bi[3] = 250./621.
bi[4] = 125./594.
bi[5] = 0.0
bi[6] = 512./1771.
# bis
bis[1] = 2825./27648.
bis[2] = 0.0
bis[3] = 18575./48384.
bis[4] = 13525./55296.
bis[5] = 277./14336.
bis[6] = 1./4.
# Define k array
ki = np.zeros((ni, nv))
# Compute ki
for i in range(1,ni):
# Compute xn+1 for i
xn = x_i + ci[i]*h
# Compute temporary y
yn = y_i.copy()
for j in range(1,i):
yn += aij[i,j]*ki[j,:]
# Get k
ki[i,:] = h*f(xn,yn)
# Get ynpo, ynpo*
ynpo = y_i.copy()
ynpos = y_i.copy()
# print("ni = ", ni, ynpo, ynpos)
for i in range(1,ni):
ynpo += bi[i]*ki[i,:]
ynpos += bis[i]*ki[i,:]
# Get error
Delta = np.fabs(ynpo - ynpos)
# print("INSIDE Delta", Delta, ki[:,0], ynpo, ynpos)
# Return new y and Delta
return ynpo, Delta
# ## Adaptive Step Size Driver
def ck_mv_ad(dfdx, x_i, y_i, nv, h, tol):
# Define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
# Set max # of iterations
imax = 1000
# Set iteration variable
i = 0
# Create error
Delta = np.full(nv, 2*tol)
# Remember step
h_step = h
# Adjust the step
while(Delta.max()/tol > 1.0):
# Get new y and error estimate
y_ipo, Delta = ck_core_mv(x_i, y_i, nv, h_step, dfdx)
# If error too large, take smaller step
if(Delta.max()/tol > 1.0):
# Error too large, decrease step
h_step *= SAFETY*(Delta.max()/tol)**(-0.25)
# Check iteration
if(i >= imax):
print("Too many iterations in ck_mv_ad()")
raise StopIteration("Ending after i = ", i)
# Iterate
i += 1
# Next time, try bigger step
h_new = np.fmin(h_step*(Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
# Return answer and step info
return y_ipo, h_new, h_step
def ck_mv(dfdx, a, b, y_a, tol, verbose = False):
# dfdx is deriv wrt x, a is lower bound, b is upper bound
# y_a are boundary conditions at a, tol is tolerance
# Define starting step
xi = a
yi = y_a.copy()
# Define initial starting step
h = 1.0e-4*(b-a)
# Set max # iterations and iteration variable
imax = 1000
i = 0
# number of variables
nv = len(y_a)
# Set initial conditions
x = np.full(1,a)
y = np.full((1,nv), y_a)
# Set flag
flag = True
# Loop till upper bound b reached
while(flag):
# Calculate y_i+1, step info
y_ipo, h_new, h_step = ck_mv_ad(dfdx, xi, yi, nv, h, tol)
# Update step for next time
h = h_new
# Prevent overshoot of upper bound
if(xi+h_step > b):
# Limit step to end at b
h = b - xi
# Recompute y_i+1
y_ipo, h_new, h_step = ck_mv_ad(dfdx, xi, yi, nv, h, tol)
# Done
flag = False
# Update values
xi += h_step
yi = y_ipo.copy()
# Add the step
x = np.append(x, xi)
y_ipo = np.zeros((len(x), nv))
y_ipo[0:len(x)-1,:] = y[:]
y_ipo[-1,:] = yi[:]
del y
y = y_ipo
# Prevent too many iterations
if(i > imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ", i)
# Iterate
i += 1
# Output some info
if(verbose):
s = "i = %3d\tx = %9.8f\ty = %9.8f\th = %9.8f\tb = %9.8f" % (i, xi, yi[0], h_step, b)
print(s)
# If finished, exit
if(xi == b):
flag = False
# Return answer
return x, y
# ## Wrapper
# +
a = 0.0
b = 2.0*np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
x, y = ck_mv(dfdx, a, b, y_0, tolerance, verbose = True)
plt.plot(x, y[:,0], 'o', label = 'y(x)')
plt.plot(x, y[:,1], 'o', label = 'z(x)')
plt.plot(x, np.sin(x), label = 'sin(x)')
plt.plot(x, np.cos(x), label = 'cos(x)')
plt.xlim([0, 2*np.pi])
plt.ylim([-1.2, 1.2])
plt.legend(frameon = False)
# -
| astr-119-hw-6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/icarogga/Projeto-Imersao-de-Dados-Alura-3/blob/main/EDA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="fmNsHjz2MeZA"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# + [markdown] id="HvzdrTdDMeZD"
# ## Carregando os dados dos resultados dos experimentos
# + id="07X0F721MeZE"
dados_resultados = pd.read_csv('https://www.dropbox.com/s/g6o6b2x77532q0c/dados_resultados.csv?dl=1')
# + id="UiHwBLtYMeZE" colab={"base_uri": "https://localhost:8080/", "height": 270} outputId="8679a277-95d4-4a5a-918f-9e1f234e98bf"
dados_resultados.head()
# + [markdown] id="yG2jO-EPMeZF"
# #### Na função abaixo podemos entender um pouco sobre a distribuição dos nossos dados
# + id="vxIRwc_aMeZG" colab={"base_uri": "https://localhost:8080/", "height": 363} outputId="adebe4ff-2d4a-4e08-8621-1cdf0dfeb082"
dados_resultados.describe()
# + [markdown] id="5W-GumnGMeZG"
# ## Carregando os dados dos experimentos
# + id="JtuzmS_HMeZH"
dados_experimentos = pd.read_csv('https://www.dropbox.com/s/5hix6c2oo43d4jj/dados_experimentos.csv?dl=1')
# + id="4MAJ-cChMeZH" colab={"base_uri": "https://localhost:8080/", "height": 253} outputId="d643e7c1-90b5-4832-a39d-2911aabcd482"
dados_experimentos.head()
# + [markdown] id="sMPEWxJ9MeZI"
# #### Entendendo um pouco sobre a distribuição dos nossos do experimento
# + id="XgZefOn4MeZI" colab={"base_uri": "https://localhost:8080/", "height": 346} outputId="e26ee422-c50f-4120-97f6-3a578e28020d"
dados_experimentos.describe()
# + [markdown] id="XGlZFPLCMeZJ"
# ### Feature engineer para criar coluna de informação sobre ativação e número de ativações
# + id="GgBuFtP3MeZJ"
dados_resultados['n_moa'] = dados_resultados.drop('id', axis=1).sum(axis=1)
# + id="OiY1KJ2UMeZK" colab={"base_uri": "https://localhost:8080/", "height": 270} outputId="529a1792-ab15-405d-d723-14a7ebf74f6a"
dados_resultados['ativo_moa'] = (dados_resultados['n_moa'] != 0)
dados_resultados.head()
# + [markdown] id="exVw6wnWMeZK"
# ### Vamos agora unir os dados n_moa e ativo_moa na tabela de experimentos para olharmos a correlação das informações
# + id="VZYnOiJ-MeZL"
dados_combinados = pd.merge(dados_experimentos, dados_resultados[['id','n_moa', 'ativo_moa']], on='id')
# + id="X2lKXYoCMeZL" colab={"base_uri": "https://localhost:8080/", "height": 253} outputId="9e70eca0-f676-4258-8e58-a6fb694a1504"
dados_combinados.head()
# + [markdown] id="hJbEhnWRMeZL"
# ### Objetivo:
#
# Com esses dados podemos tentar prever algumas informações, como por exemplo qual tipo de ativação poderá ocorrer para uma determinada assinatura de experimento, para o nosso trabalho vamos tentar prever o número de ativações para uma dada assinatura, ou seja, utilizando os dados do DataFrame, tabela dados_experimentos queremos prever a coluna n_moa do DataFrame, tabela dados_resultados
# + [markdown] id="q8ioyvphwwLq"
# ## Correlação
# + [markdown] id="YhvqLIbKwzEW"
# Antes de partimos para a modelagem podemos tentar entender um pouco melhor nossos dados, sobre tudo as correlações entre colunas com a coluna que queremos prever n_moa
# + colab={"base_uri": "https://localhost:8080/"} id="eLtwyx3WxClD" outputId="2e3fc553-fd8f-4c11-9134-e7697653b750"
# Selecionando as colunas numéricas
features = dados_combinados.select_dtypes('float64').columns
features
# + colab={"base_uri": "https://localhost:8080/"} id="EyYYlJYiw89x" outputId="58be412c-036e-41d0-b4fa-a6adfc927825"
correlation = dados_combinados[features].corrwith(dados_combinados.n_moa).sort_values(axis=0, ascending=False)
print(correlation)
print(type(dados_combinados[features].corrwith(dados_combinados.n_moa)))
# + [markdown] id="jVZJmn5JxKIy"
# Com as informações acima podemos ver que algumas informações de genes como g-100, g-731 e g-349 possuem uma correlação mais positiva com o valor que queremos prever e as informações c-65, c-98, c17 possuem uma correlação negativa, por tanto são colunas importantes na hora de prever o nosso resultado esperado. Podemos tentar montar um modelo futuramente apenas utilizando as features com correlações positivas e negativas que sejam mais altas. Podemos ainda averiguar a correlação entre as colunas para cada coluna de nosso dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="jVqrtNzrw_4x" outputId="0c5d3ea9-e660-44bf-cbe1-92257ef92f3d"
corr = dados_combinados[['g-100', 'g-731', 'g-349', 'c-65', 'c-98', 'c-17', 'n_moa']].corr().abs()
corr
# + colab={"base_uri": "https://localhost:8080/", "height": 717} id="WRdY4KbwyANl" outputId="d1be2e64-43f2-422d-e6ec-e5a8bf74539d"
## correlation
import plotly.figure_factory as ff
primary_bgcolor = "#f4f0ea"
corr = corr
mask = np.triu(np.ones_like(corr, dtype=np.bool))
corr1 = corr.mask(mask)
fig = ff.create_annotated_heatmap(
z=corr1.to_numpy().round(2),
x=list(corr1.index.values),
y=list(corr1.columns.values),
xgap=3, ygap=3,
zmin=0, zmax=1,
colorscale='blugrn',
colorbar_thickness=30,
colorbar_ticklen=3,
)
fig.update_layout(
title_text='<span style="font-size:32px; font-family:Times New Roman">Features Correlation Matrix</span>',
font_family="Serif",
titlefont={'size': 24},
width=800, height=700,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed',
paper_bgcolor=primary_bgcolor,
plot_bgcolor=primary_bgcolor,
margin=dict(l=70, r=70, t=70, b=70, pad=1),
)
fig.show()
# + [markdown] id="walAm1H0uEVd"
# ## Antes de montarmos nosso modelo vamos plotar a distribuição das nossas classes de ativação
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="1D3xMTKNTqib" outputId="0e4f8ff6-83cb-4007-97a9-525f0f17a88b"
# Histogram
sns.distplot(a=dados_combinados['n_moa'], kde=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="DSOtADTluL_-" outputId="7f22898f-0bd0-4607-f59c-4162c42fc0bb"
# KDE plot kernel density estimat
sns.kdeplot(data=dados_combinados['n_moa'], shade=True)
# + [markdown] id="L9bK4pwuuPs1"
# Podemos ver que nossos dados não são muito bem distruibuidos, ou seja, não seguem uma distribuição normal e são altamente concentrados entre as os números 0 e 1, portanto é bem provável que nossos modelos irão erra muito prevendo classes que deveriam ser entre 2 a 7, como sendo algo entre 0 e 1
# + [markdown] id="eUmlg3YTTrWF"
# ### Vamos primeiro transformar nossas variáveis categóricas em colunas
# + id="zOel8fHLTzml"
x = dados_combinados.drop(['id', 'n_moa', 'ativo_moa', 'droga'], axis=1)
x = pd.get_dummies(x, columns=['tratamento', 'dose', 'tempo'])
# + [markdown] id="dUAWPZUBMeZO"
# ### Agora vamos separar nossos dados em uma base de treino e de teste
# + id="g5jVV1v4MeZO"
from sklearn.model_selection import train_test_split
y = dados_combinados['n_moa']
x_treino, x_teste, y_treino, y_teste = train_test_split(x, y, test_size = 0.2, stratify=y, random_state=42)
# + [markdown] id="FSKokOySMeZP"
# #### Para a nossa previssão vamos experimentar 3 algoritmos: RandomForestClassifier, ExtraTreesClassifier e DecisionTreeClassifier afim de validarmos como três modelos baseados em uma hierarquia de decisão em árvore irão se comportar e vamos utilizar o GridSearchCV do sklearn para tunar os modelos
# + id="PAPw8xGyMeZP"
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
# + [markdown] id="AmWJiXOXMeZQ"
# ### Parâmetros que queremos tunar
# + id="I8UbHpv0MeZQ"
# parameters_rfr = {'criterion': ('mse', 'mae'), 'n_estimators':[100, 200, 400], 'max_depth': [None, 5, 10, 15]}
parameters_rfr = {'n_estimators':[100, 400], 'max_depth': (None, 5, 15)}
# parameters_dtr = {'criterion': ('mse', 'mae'), 'splitter': ('best', 'random'), 'max_depth': [None, 5, 10, 15]}
parameters_dtr = {'splitter': ('best', 'random'), 'max_depth': [None, 5, 10, 15]}
#parameters_etr = {'n_estimators':[100, 200, 400], 'max_depth': [None, 5, 10, 15], 'criterion': ('gini', 'entropy')}
parameters_etr = {'n_estimators':[100, 400], 'max_depth': [None, 5, 15]}
# + [markdown] id="zakjjastMeZQ"
# ### Instanciando os modelos
# + id="zjuwBgO6MeZR"
rfr = RandomForestClassifier(verbose=1, n_jobs=-1)
etr = ExtraTreesClassifier(verbose=1, n_jobs=-1)
dtr = DecisionTreeClassifier()
# + [markdown] id="Wte7bqtjMeZR"
# ### Passando os parâmetros para o GridSearchCV
# + id="kAhn5zjrMeZR"
rfr = GridSearchCV(rfr, parameters_rfr, verbose=3)
etr = GridSearchCV(etr, parameters_etr, verbose=3)
dtr = GridSearchCV(dtr, parameters_dtr, verbose=3)
# + id="GmebEb1WMeZS" colab={"base_uri": "https://localhost:8080/"} outputId="526fc32d-11a4-4ba0-a0b8-cf5308057f8a"
rfr.fit(x_treino, y_treino)
# + id="7pBmoLREMeZS" colab={"base_uri": "https://localhost:8080/"} outputId="c7946141-74d5-456d-d5d5-625d8038249f"
etr.fit(x_treino, y_treino)
# + id="QN4MQ2zTMeZT" colab={"base_uri": "https://localhost:8080/"} outputId="e5004a4f-350e-4433-f519-544320f78cde"
dtr.fit(x_treino, y_treino)
# + colab={"base_uri": "https://localhost:8080/", "height": 103} id="Ol18GJFEpJiI" outputId="ffd0cb3f-df79-4e6d-9a03-d7d9842da403"
'''
GridSearchCV(cv=None, error_score=nan,
estimator=RandomForestClassifier(bootstrap=True, ccp_alpha=0.0,
class_weight=None,
criterion='gini', max_depth=None,
max_features='auto',
max_leaf_nodes=None,
max_samples=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=-1,
oob_score=False,
random_state=None, verbose=1,
warm_start=False),
iid='deprecated', n_jobs=None,
param_grid={'max_depth': (None, 5, 15),
'n_estimators': [100, 400]},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=3)
GridSearchCV(cv=None, error_score=nan,
estimator=ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0,
class_weight=None, criterion='gini',
max_depth=None, max_features='auto',
max_leaf_nodes=None,
max_samples=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=-1,
oob_score=False, random_state=None,
verbose=1, warm_start=False),
iid='deprecated', n_jobs=None,
param_grid={'max_depth': [None, 5, 15],
'n_estimators': [100, 400]},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=3)
GridSearchCV(cv=None, error_score=nan,
estimator=DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=None,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=1,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
presort='deprecated',
random_state=None,
splitter='best'),
iid='deprecated', n_jobs=None,
param_grid={'max_depth': [None, 5, 10, 15],
'splitter': ('best', 'random')},
pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
scoring=None, verbose=3)
'''
# + [markdown] id="B0EE-8k0MeZT"
# ### Com os modelos treinados e tunados, vamos tentar prever os números de ativações para cada assinatura de experimento do nosso conjunto de testes
# + id="zM2h1s0QMeZU" colab={"base_uri": "https://localhost:8080/"} outputId="64d7b0c3-9428-4844-e50b-6a9e1db0f3ae"
rfr_predictions = rfr.predict(x_teste)
# + id="vZl9VHCCMeZU" colab={"base_uri": "https://localhost:8080/"} outputId="131f79c4-9075-4f6e-a3ea-797cc60cf788"
etr_predictions = etr.predict(x_teste)
# + id="-Jli7M0VMeZU"
dtr_predictions = dtr.predict(x_teste)
# + [markdown] id="NcF3007yMeZV"
# ### Agora vamos avaliar a precisão do nossos modelos comparando os reultados das predições acima com os valores reais guardados em y_test, utilizando a função accuracy_score do sklearn para calcular a precisão
# + id="-GqNLzX_MeZV" colab={"base_uri": "https://localhost:8080/"} outputId="fd557b14-12bd-4c9a-d41a-c886d4e1ccb1"
from sklearn.metrics import accuracy_score
rfr_accuracy = accuracy_score(y_teste, rfr_predictions)
etr_accuracy = accuracy_score(y_teste, etr_predictions)
dtr_accuracy = accuracy_score(y_teste, dtr_predictions)
print("A precisão do modelo RandomForestClassifier nos dados de teste é de: " + str(rfr_accuracy))
print("\nA precisão do modelo ExtraTreesClassifier nos dados de teste é de: " + str(etr_accuracy))
print("\nA precisão do modelo DecisionTreeClassifier nos dados de teste é de: " + str(dtr_accuracy))
# + [markdown] id="fVwT0Ffqrc8j"
# Alcançamos um resultado muito semelhante entre os três modelos, mesmo tunando os parâmetros, logo podemos concluir que neste cenário o melhor modelo possa ser o modelo mais simples, DecisionTreeClassifier, uma vez que este perfomou muito próximo aos demais modelos mais complexos, como o ganho na precisão não é relevante e o tempo de treino dos modelos é, a escolha do DecisionTreeClassifier é interessante em cenários com pouco poder de processamento e pouco tempo
# + [markdown] id="fDg_y5_tsG9A"
# ### Para entendermos melhor como nosso modelo do DecisionTreeClassifier performa, podemos ver nossa matriz de confusão para saber onde estamos errando e acertando mais e posteriormente podemos tunar nosso modelo em busca de um modelo que erre mais uma classificação e acerte mais outras
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="Y9wM2YTcsF0J" outputId="a7b0ec08-7645-429f-d8ee-91d8cfdefb21"
from sklearn.metrics import confusion_matrix
labels = np.unique(y_teste)
a = confusion_matrix(y_teste, dtr_predictions, labels=labels)
pd.DataFrame(a, index=labels, columns=labels)
# + colab={"base_uri": "https://localhost:8080/"} id="vxoNC7brtARH" outputId="969881b5-edfe-4db0-c345-e0922c8f9611"
y_teste.value_counts()
# + [markdown] id="4ZdxmjT7uryv"
# ## Conclusão
# + [markdown] id="zJ3eLbjxuwY7"
# Como esperado vimos que o modelo tende a predizer a maior parte das vezes um resultado entre as classes 0 e 1, tendendo mais fortemente a classificações na classe 1. Quando olhamos as assinaturas que deveriam ser previstas como 1, vimos que o modelo acerta 2487 de 2506, ou seja, errando apenas 19. Já para os números de ativações maior que 2, o modelo errou todas as predições.
# + [markdown] id="oGt945HcwBg_"
# Fica claro que uma das nossas maiores limitações é por conta do modelo altamente desbalanceado, para trabalhos futuros podemos tentar aplicar alguma forma de normalização em busca de uma distribuição mais normal ou aplicarmos algoritmos que sejam específicos para problemas de classes desbalançeadas.
# + [markdown] id="0OZPw2OMzogL"
# Além disso, podemos também posteriormente desenvolver modelos utilizando apenas as featues com correlação alta com a coluna que queremos predizer
| EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: codeforecon
# language: python
# name: codeforecon
# ---
# (text-intro)=
# # Introduction to Text
#
# This chapter covers how to use code to work with text as data, including opening files with text in, changing and cleaning text, regular expressions, and vectorised operations on text.
#
# It has benefitted from the [Python String Cook Book](https://mkaz.blog/code/python-string-format-cookbook/) and <NAME>' [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/03.10-working-with-strings.html).
#
# ## An aside on encodings
#
# Before we get to the good stuff, we need to talk about string encodings. Whether you're using code or a text editor (Notepad, Word, Pages, Visual Studio Code), every bit of text that you see on a computer will have an encoding behind the scenes that tells the computer how to display the underlying data. There is no such thing as 'plain' text: all text on computers is the result of an encoding. Oftentimes, a computer programme (email reader, Word, whatever) will guess the encoding and show you what it thinks the text should look like. But it doesn't always know, or get it right: *that is what is happening when you get an email or open an file full of weird symbols and question marks*. If a computer doesn't know whether a particular string is encoded using UTF-8 or ASCII or ISO 8859-1 (Latin 1) or Windows 1252 (Western European), it simply cannot display it correctly and you get gibberish.
#
# When it comes to encodings, there are just two things to remember: i) you should use UTF-8 (aka Unicode), it's the international standard. ii) the Windows operating system tends to use either Latin 1 or Windows 1252 but (and this is good news) is moving to UTF-8.
#
# [Unicode](https://www.unicode.org/) is a specification that aims to list every character used by human languages and give each character its own unique code. The Unicode specifications are continually revised and updated to add new languages and symbols.
#
# Take special care when saving CSV files containing text on a Windows machine using Excel; unless you specify it, the text may not be saved in UTF-8. If your computer and you get confused enough about encodings and re-save a file with the wrong ones, you could lose data.
#
# Hopefully you'll never have to worry about string encodings. But if you *do* see weird symbols appearing in your text, at least you'll know that there's an encoding problem and will know where to start Googling. You can find a much more in-depth explanation of text encodings [here](https://kunststube.net/encoding/).
#
# ## Strings
#
# Note that there are many built-in functions for using strings in Python, you can find a comprehensive list [here](https://www.w3schools.com/python/python_ref_string.asp).
#
# Strings are the basic data type for text in Python. They can be of any length. A string can be signalled by quote marks or double quote marks like so:
#
# `'text'`
#
# or
#
#
# `"text"`
#
# Style guides tend to prefer the latter but some coders (ahem!) have a bad habit of using the former. We can put this into a variable like so:
var = "banana"
# Now, if we check the type of the variable:
type(var)
# We see that it is `str`, which is short for string.
#
# Strings in Python can be indexed, so we can get certain characters out by using square brackets to say which positions we would like.
var[:3]
# The usual slicing tricks that apply to lists work for strings too, i.e. the positions you want to get can be retrieved using the `var[start:stop:step]` syntax. Here's an example of getting every other character from the string starting from the 2nd position.
var[1::2]
# Strings do have many similarities to lists but one way in which they are different is that they are immutable. This means commands like `var[1] = "B"` will result in an error. If you want to change a single character, you will have to replace the entire string. In this example, the command to do that would be `var = "Banana"`.
#
# Like lists, you can find the length of a string using `len`:
len(var)
# The `+` operator concatenates two or more strings:
second_word = 'panther'
first_word = 'black'
print(first_word + " " + second_word)
# Note that we added a space so that the noun made sense. Another way of achieving the same end that scales to many words more efficiently (if you have them in a list) is:
#
" ".join([first_word, second_word])
# Three useful functions to know about are `upper`, `lower`, and `title`. Let's see what they do
#
var = 'input TEXT'
var_list = [var.upper(), var.lower(), var.title()]
print(var_list)
# ```{admonition} Exercise
# Reverse the string `"gnirts desrever a si sihT"` using indexing operations.
# ```
# While we're using `print()`, it has a few tricks. If we have a list, we can print out entries with a given separator:
#
print(*var_list, sep="; and \n")
# (We'll find out more about what '\n' does shortly.) To turn variables of other kinds into strings, use the `str()` function, for example
'A boolean is either ' + str(True) + ' or ' + str(False) + ', there are only ' + str(2) + ' options.'
# In this example two boolean variables and one integer variable were converted to strings. `str` generally makes an intelligent guess at how you'd like to convert your non-string type variable into a string type. You can pass a variable or a literal value to `str`.
# ### f-strings
#
# The example above is quite verbose. Another way of combining strings with variables is via *f-strings*. A simple f-string looks like this:
variable = 15.32399
print(f"You scored {variable}")
# This is similar to calling `str` on variable and using `+` for concatenation but much shorter to write. You can add expressions to f-strings too:
print(f"You scored {variable**2}")
# This also works with functions; after all `**2` is just a function with its own special syntax.
# In this example, the score number that came out had a lot of (probably) uninteresting decimal places. So how do we polish the printed output? You can pass more inforation to the f-string to get the output formatted just the way you want. Let's say we wanted two decimal places and a sign (although you always write `+` in the formatting, the sign comes out as + or - depending on the value):
print(f"You scored {variable:+.2f}")
# There are a whole range of formatting options for numbers as shown in the following table:
#
# | Number | Format | Output | Description |
# |------------ |--------- |------------ |----------------------------------------------- |
# | 15.32347 | {:.2f} | 15.32 | Format float 2 decimal places |
# | 15.32347 | {:+.2f} | +15.32 | Format float 2 decimal places with sign |
# | -1 | {:+.2f} | -1.00 | Format float 2 decimal places with sign |
# | 15.32347 | {:.0f} | 15 | Format float with no decimal places |
# | 3 | {:0>2d} | 03 | Pad number with zeros (left padding, width 2) |
# | 3 | {:*<4d} | 3*** | Pad number with *’s (right padding, width 4) |
# | 13 | {:*<4d} | 13** | Pad number with *’s (right padding, width 4) |
# | 1000000 | {:,} | 1,000,000 | Number format with comma separator |
# | 0.25 | {:.1%} | 25.0% | Format percentage |
# | 1000000000 | {:.2e} | 1.00e+09 | Exponent notation |
# | 12 | {:10d} | 12 | Right aligned (default, width 10) |
# | 12 | {:<10d} | 12 | Left aligned (width 10) |
# | 12 | {:^10d} | 12 | Center aligned (width 10) |
#
# As well as using this page interactively through the Colab and Binder links at the top of the page, or downloading this page and using it on your own computer, you can play around with some of these options over at [this link](https://www.python-utils.com/).
# ### Special characters
#
# Python has a string module that comes with some useful built-in strings and characters. For example
# +
import string
string.punctuation
# -
# gives you all of the punctuation,
string.ascii_letters
# returns all of the basic letters in the 'ASCII' encoding (with `.ascii_lowercase` and `.ascii_uppercase` variants), and
string.digits
# gives you the numbers from 0 to 9. Finally, though less impressive visually, `string.whitespace` gives a string containing all of the different (there is more than one!) types of whitespace.
# There are other special characters around; in fact, we already met the most famous of them: "\n" for new line. To actually print "\n" we have to 'escape' the backward slash by adding another backward slash:
print('Here is a \n new line')
print('Here is an \\n escaped new line ')
# The table below shows the most important escape commands:
#
# | Code | Result |
# |------ |----------------- |
# | `\'` | Single Quote (useful if using `'` for strings) |
# | `\"` | Double Quote (useful if using `"` for strings) |
# | `\\` | Backslash |
# | `\n` | New Line |
# | `\r` | Carriage Return |
# | `\t` | Tab |
# ## Cleaning Text
#
# You often want to make changes to the text you're working with. In this section, we'll look at the various options to do this.
#
# ### Replacing sub-strings
#
# A common text task is to replace a substring within a longer string. Let's say you have a string variable `var`. You can use `.replace(old_text, new_text)` to do this.
#
"Value is objective".replace("objective", "subjective")
# As with any variable of a specific type (here, string), this would also work with variables:
text = "Value is objective"
old_substr = "objective"
new_substr = "subjective"
text.replace(old_substr, new_substr)
# Note that `.replace` performs an exact replace and so is case-sensitive.
# ### Replacing characters with translate
#
# A character is an individual entry within a string, like the 'l' in 'equilibrium'. You can always count the number of characters in a string variable called `var` by using `len(var)`. A very fast method for replacing individual characters in a string is `str.translate`.
#
# Replacing characters is extremely useful in certain situations, most commonly when you wish to remote all punctuation prior to doing other text analysis. You can use the built-in `string.punctuation` for this.
#
# Let's see how to use it to remove all of the vowels from some text. With apologies to economist <NAME>, we'll use the abstract from {cite}`cook2011inventing` as the text we'll modify and we'll first create a dictionary of translations of vowels to nothing, i.e. `""`.
example_text = "Much recent work has focused on the influence of social capital on innovative outcomes. Little research has been done on disadvantaged groups who were often restricted from participation in social networks that provide information necessary for invention and innovation. Unique new data on African American inventors and patentees between 1843 and 1930 permit an empirical investigation of the relation between social capital and economic outcomes. I find that African Americans used both traditional, i.e., occupation-based, and nontraditional, i.e., civic, networks to maximize inventive output and that laws constraining social-capital formation are most negatively correlated with economically important inventive activity."
vowels = 'aeiou'
translation_dict = {x: "" for x in vowels}
translation_dict
# Now we turn our dictionary into a string translator and apply it to our text:
#
translator = example_text.maketrans(translation_dict)
example_text.translate(translator)
# ```{admonition} Exercise
# Use `translate` to replace all puncuation from the following sentence with spaces: "The well-known story I told at the conferences [about hypocondria] in Boston, New York, Philadelphia,...and Richmond went as follows: It amused people who knew Tommy to hear this; however, it distressed Suzi when Tommy (1982--2019) asked, \"How can I find out who yelled, 'Fire!' in the theater?\" and then didn't wait to hear Missy give the answer---'Dick Tracy.'"
# ```
# Generally, `str.translate` is very fast at replacing individual characters in strings. But you can also do it using a list comprehension and a `join` of the resulting list, like so:
''.join([ch for ch in "Example. string. with- excess_ [punctuation]/," if ch not in string.punctuation])
# ### Slugifying
#
# A special case of string cleaning occurs when you are given text with lots of non-standard characters in, and spaces, and other symbols; and what you want is a clean string suitable for a filename or column heading in a dataframe. Remember that it's best practice to have filenames that don't have spaces in. Slugiyfing is the process of creating the latter from the former and we can use the [**slugify**](https://github.com/un33k/python-slugify) package to do it.
#
# Here are some examples of slugifying text:
from slugify import slugify
txt = 'the quick brown fox jumps over the lazy dog'
slugify(txt, stopwords=['the'])
# In this very simple example, the words listed in the `stopwords=` keyword argument (a list), are removed and spaces are replaced by hyphens. Let's now see a more complicated example:
#
slugify('当我的信息改变时... àccêntæd tËXT ')
# Slugify converts text to latin characters, while also removing accents and whitespace (of all kinds-the last whitespace is a tab). There's also a `replacement=` keyword argument that will replace specific strings with other strings using a list of lists format, eg `replacement=[['old_text', 'new_text']]`
# ### Splitting strings
#
# If you want to split a string at a certain position, there are two quick ways to do it. The first is to use indexing methods, which work well if you know at which position you want to split text, eg
#
"This is a sentence and we will split it at character 18"[:18]
# Next up we can use the built-in `split` function, which returns a list of places where a given sub-string occurs:
#
"This is a sentence. And another sentence. And a third sentence".split(".")
# Note that the character used to split the string is removed from the resulting list of strings. Let's see an example with a string used for splitting instead of a single character:
#
"This is a sentence. And another sentence. And a third sentence".split("sentence")
# A useful extra function to know about is `splitlines()`, which splits a string at line breaks and returns the split parts as a list.
# ### count and find
#
# Let's do some simple counting of words within text using `str.count`. Let's use the first verse of Elizabeth Bishop's sestina 'A Miracle for Breakfast' for our text.
text = "At six o'clock we were waiting for coffee, \n waiting for coffee and the charitable crumb \n that was going to be served from a certain balcony \n --like kings of old, or like a miracle. \n It was still dark. One foot of the sun \n steadied itself on a long ripple in the river."
word = "coffee"
print(f'The word "{word}" appears {text.count(word)} times.')
# Meanwhile, `find` returns the position where a particular word or character occurs.
text.find(word)
# We can check this using the number we get and some string indexing:
text[text.find(word):text.find(word) + len(word)]
# But this isn't the only place where the word 'coffee' appears. If we want to find the last occurrence, it's
text.rfind(word)
# ## Regular expressions
#
# Regex, aka regular expressions, provide a way to both search and change text. Their advantages are that they are concise, they run very quickly, they can be ported across languages (they are definitely not just a Python thing!), and they are very powerful. The disadvantage is that they are confusing and take some getting used to!
#
# You can live code regex in a couple of places, the first is within Visual Studio Code itself. Do this by clicking the magnifying glass in the left-hand side panel of options. When the search strip appears, you can put a search term in. To the right of the text entry box, there are three buttons, one of which is a period (full stop) followed by an asterisk. This option allows the Visual Studio text search function to accept regular expressions. This will apply regex to all of the text in your current Visual Studio workspace.
#
# Another approach is to head over to [https://regex101.com/](https://regex101.com/) and begin typing your regular expression there. You will need to add some text in the box for the regex to be applied to.
#
# Try either of the above with the regex `string \w+\s`. This matches any occurrence of the word 'string' that is followed by another word and then a whitespace. As an example, 'string cleaning ' would be picked up as a match when using this regex.
#
# Within Python, the `re` library provides support for regular expressions. Let's try it:
#
import re
text = "It is true that string cleaning is a topic in this chapter. string editing is another."
re.findall("string \w+\s", text)
# `re.findall` returns all matches. There are several useful search-like functions in `re` to be aware of that have a similar syntax of `re.function(regex, text)`. The table shows what they all do
#
#
# | Function | What it does | Example of use | Output for given value of `text` |
# |--------------|-----------------------------------------------------------------|---------------------------------------------|-----------------------------------------------------------------------|
# | `re.match` | Declares whether there is a match at the beginning of a string. | `re.match("string \w+\s" , text) is True` | `None` |
# | `re.search` | Declares whether there is a match anywhere in the string. | `re.search("string \w+\s" , text) is True` | `True` |
# | `re.findall` | Returns all matches. | `re.findall("string \w+\s" , text)` | `['string cleaning ', 'string editing ']` |
# | `re.split` | Splits text wherever a match occurs. | `re.split("string \w+\s" , text)` | `['It is true that ', 'is a topic in this chapter. ', 'is another.']` |
#
# Another really handy regex function is `re.sub`, which substitutes one bit of text for another if it finds a match. Here's an example:
new_text = 'new text here! '
re.sub("string \w+\s", new_text, text)
# #### Special Characters
#
# So far, we've only seen a very simple application of regex involving a vanilla word, `string`, the code for another word `\w+` and the code for a whitespace `\s`. Let's take a more comprehensive look at the regex special characters:
#
# | Character | Description | Example Text | Example Regex | Example Match Text |
# |-----------|--------------------------------------------------------|----------------------------------------|-----------------------|---------------------|
# | \d | One Unicode digit in any script | "file_93 is open" | `file_\d\d` | "file_93" |
# | \w | "word character": Unicode letter, digit, or underscore | "blah hello-word blah" | `\w-\w` | "hello-world" |
# | \s | "whitespace character": any Unicode separator | "these are some words with spaces" | `words\swith\sspaces` | "words with spaces" |
# | \D | Non-digit character (opposite of \d) | "ABC 10323982328" | `\D\D\D` | "ABC" |
# | \W | Non-word character (opposite of \w) | "Once upon a time *" | `\W` | "*" |
# | \S | Non-whitespace character (opposite of \s) | "y " | `\S` | "y" |
# | \Z | End of string | "End of a string" | `\w+\Z` | "string"" | |
# | . | Match any character except the newline | "ab=def" | `ab.def` | "ab=def" |
#
#
# Note that whitespace characters include newlines, `\n`, and tabs, `\t`.
#
# #### Quantifiers
#
# As well as these special characters, there are quantifiers which ask for more than one occurence of a character. For example, in the above, `\w\w` asked for two word characters, while `\d\d` asks for two digits. The next table shows all of the quantifiers.
#
# | Quantifier | Role | Example Text | Example Regex | Example Match |
# |------------|--------------------------------------------|----------------------------|---------------|--------------------|
# | {m} | Exactly m repetitions | "936 and 42 are the codes" | `\d{3}` | "936" |
# | {m,n} | From m (default 0) to n (default infinity) | "Words up to four letters" | `\b\w{1,4}\b` | "up", "to", "four" |
# | * | 0 or more. Same as {,} | "42 is the code" | `\d*\s` | "42" |
# | + | 1 or more. Same as {1,} | "4 323 hello" | `\d+` | "4", "323" |
# | ? | Optional, so 0 or 1. Same as {,1}. | "4 323 hello" | `\d?\s` | "4" |
#
# ```{admonition} Exercise
# Find a single regex that will pick out only the percentage numbers from both "Inflation in year 3 was 2 percent" and "Interest rates were as high as 12 percent".
# ```
#
# #### Metacharacters
#
# Now, as well as special characters and quantifiers, we can have meta-character matches. These are not characters *per se*, but starts, ends, and other bits of words. For example, `\b` matches strings at a word (`\w+`) boundary, so if we took the text "Three letter words only are captured" and ran `\b\w\w\w\b` we would return "are". `\B` matches strings not at word (`\w+`) boundaries so the text "Bricks" with `\B\w\w\B` applied would yield "ri". The next table contains some useful metacharacters.
#
# | Metacharacter Sequence | Meaning | Example Regex | Example Match |
# |------------------------|-------------------------------|--------------------|------------------------------------------------------------------------------|
# | ^ | Start of string or line | `^abc` | "abc" (appearing at start of string or line) |
# | $ | End of string, or end of line | `xyz$` | "xyz" (appearing at end of string or line) |
# | \b | Match string at word (\w+) boundary | `ing\b` | "match**ing**" (matches ing if it is at the end of a word) |
# | \B | Match string not at word (\w+) boundary | `\Bing\B` | "st**ing**er" (matches ing if it is not at the beginning or end of the word) |
#
# Because so many characters have special meaning in regex, if you want to look for, say, a dollar sign or a dot, you need to escape the character first with a backward slash. So `\${1}\d+` would look for a single dollar sign followed by some digits and would pick up the '\$50' in 'she made \$50 dollars'.
#
# ```{admonition} Exercise
# Find the regex that will pick out only the first instance of the word 'money' and any word subsequent to 'money' from the following: "money supply has grown considerably. money demand has not kept up.".
# ```
#
# #### Ranges
#
# You probably think you're done with regex, but not so fast! There are more metacharacters to come. This time, they will represent *ranges* of characters.
#
# | Metacharacter Sequence | Description | Example Expression | Example Match |
# |------------------------|---------------------------------------------------------|--------------------|-----------------------------------|
# | \[characters\] | The characters inside the brackets are part of a matching-character set | `[abcd]` | a, b, c, d, abcd |
# | \[^...\] | Characters inside brackets are a non-matching set; a character not inside is a matching character. | `[^abcd]` | Any occurrence of any character EXCEPT a, b, c, d. |
# | \[character-character\] | Any character in the range between two characters (inclusive) is part of the set | `[a-z]` | Any lowercase letter |
# | \[^character\] | Any character that is not the listed character | `[^A]` | Any character EXCEPT capital A |
#
# Ranges have two more neat tricks. The first is that they can be concatenated. For example, `[a-c-1-5]` would match any of a, b, c, 1, 2, 3, 4, 5. They can also be modified with a quantifier, so `[a-c0-2]{2}` would match "a0" and "ab".
#
#
# #### Greedy versus lazy regexes
#
# Buckle up, because this one is a bit tricky to grasp. Adding a `?` after a regex will make it go from being 'greedy' to being 'lazy'. Greedy means that you will match the longest possible string that hits the condition. Lazy will mean that you get the shortest possible string matching the condition. It's easiest to demonstrate with an example:
#
# +
test_string = "stackoverflow"
greedy_regex = "s.*o"
lazy_regex = "s.*?o"
print(f'The greedy match is {re.findall(greedy_regex, test_string)[0]}')
print(f'The lazy match is {re.findall(lazy_regex, test_string)[0]}')
# -
# In the former (greedy) case, we get from an 's' all the way to the last 'o' within the same word. In the latter (lazy) case we just get everything between the start and first occurrence of an 'o'.
# #### Matches versus capture groups
#
# There is often a difference between what you might want to match and what you actually want to *grab* with your regex. Let's say, for example, we're parsing some text and we want any numbers that follow the format '$xx.xx', where the 'x' are numbers but we don't want the dollar sign. To do this, we can create a *capture group* using brackets. Here's an example:
#
text = "Product 1 was $45.34, while product 2 came in at $50.00 however it was assessed that the $4.66 difference did not make up for the higher quality of product 2."
re.findall("\$(\d{2}.\d{2})", text)
# Let's pick apart the regex here. First, we asked for a literal dollar sign using `\$`. Next, we opened up a capture group with `(`. Then we said only give us the numbers that are 2 digits, a period, and another 2 digits (thus excluding \$4.66). Finally, we closed the capture group with `)`.
#
# So while we specify a *match* using regex, while only want running the regex to return the *capture group*.
#
# Let's see a more complicated example.
sal_r_per = r"\b([0-9]{1,6}(?:\.)?(?:[0-9]{1,2})?(?:\s?-\s?|\s?to\s?)[0-9]{1,6}(?:\.)?(?:[0-9]{1,2})?)(?:\s?per)\b"
text = "This job pays gbp 30500.00 to 35000 per year. Apply at number 100 per the below address."
re.findall(sal_r_per, text)
# In this case, the regex first looks for up to 6 digits, then optionally a period, then optionally another couple of digits, then either a dash or 'to' using the '|' operator (which means or), followed by a similar number, followed by 'per'.
#
# But the capture group is only the subset of the match that is the number range-we discard most of the rest. Note also that other numbers, even if they are followed by 'per', are not picked up. `(?:)` begins a *non-capture group*, which matches only but does not capture, so that although `(?:\s?per)` looks for " per" after a salary (with the space optional due to the second `?`), it does not get returned.
#
# ```{admonition} Exercise
# Find a regex that captures the wage range from "Salary Pay in range $9.00 - $12.02 but you must start at 8.00 - 8.30 every morning.".
# ```
#
# This has been a whirlwind tour of regexes. Although regex looks a lot like gobbledygook, it is a really useful tool to be able to deploy for more complex string cleaning and extraction tasks.
# ## Scaling up from a single string to a corpus
#
# For this section, it's useful to be familiar with the **pandas** package, which is covered in the [Data Analysis Quickstart](data-quickstart) and [Data Analysis](data-analysis) sections. This section will closely follow the treatment by Jake VanderPlas.
#
# We've seen how to work with individual strings. But often we want to work with a group of strings, otherwise known as a corpus, that is a collection of texts. It could be a collection of words, sentences, paragraphs, or some domain-based grouping (eg job descriptions).
#
# Fortunately, many of the methods that we have seen deployed on a single string can be straightforwardly scaled up to hundreds, thousands, or millions of strings using **pandas** or other tools. This scaling up is achieved via *vectorisation*, in analogy with going from a single value (a scalar) to multiple values in a list (a vector).
#
# As a very minimal example, here is capitalisation of names vectorised using a list comprehension:
#
[name.capitalize() for name in ['ada', 'adam', 'elinor', 'grace', 'jean']]
# A **pandas** series can be used in place of a list. Let's create the series first:
import pandas as pd
dfs = pd.Series(['ada lovelace', '<NAME>', 'elinor ostrom', 'grace hopper', 'jean bartik'], dtype="string")
dfs
# Now we use the syntax series.str.function to change the text series:
#
dfs.str.title()
# If we had a dataframe and not a series, the syntax would change to refer just to the column of interest like so:
df = pd.DataFrame(dfs, columns=['names'])
df['names'].str.title()
# The table below shows a non-exhaustive list of the string methods that are available in **pandas**.
#
# | Function (preceded by `.str.`) | What it does |
# |-----------------------------|-------------------------|
# | `len()` | Length of string. |
# | `lower()` | Put string in lower case. |
# | `upper()` | Put string in upper case. |
# | `capitalize()` | Put string in leading upper case. |
# | `swapcase()` | Swap cases in a string. |
# | `translate()` | Returns a copy of the string in which each character has been mapped through a given translation table. |
# | `ljust()` | Left pad a string (default is to pad with spaces) |
# | `rjust()` | Right pad a string (default is to pad with spaces) |
# | `center()` | Pad such that string appears in centre (default is to pad with spaces) |
# | `zfill()` | Pad with zeros |
# | `strip()` | Strip out leading and trailing whitespace |
# | `rstrip()` | Strip out trailing whitespace |
# | `lstrip()` | Strip out leading whitespace |
# | `find()` | Return the lowest index in the data where a substring appears |
# | `split()` | Split the string using a passed substring as the delimiter |
# | `isupper()` | Check whether string is upper case |
# | `isdigit()` | Check whether string is composed of digits |
# | `islower()` | Check whether string is lower case |
# | `startswith()` | Check whether string starts with a given sub-string |
#
# Regular expressions can also be scaled up with **pandas**. The below table shows vectorised regular expressions.
#
# | Function | What it does |
# |-|----------------------------------|
# | `match()` | Call `re.match()` on each element, returning a boolean. |
# | `extract()` | Call `re.match()` on each element, returning matched groups as strings. |
# | `findall()` | Call `re.findall()` on each element |
# | `replace()` | Replace occurrences of pattern with some other string |
# | `contains()` | Call `re.search()` on each element, returning a boolean |
# | `count()` | Count occurrences of pattern |
# | `split()` | Equivalent to `str.split()`, but accepts regexes |
# | `rsplit()` | Equivalent to `str.rsplit()`, but accepts regexes |
#
#
# Let's see a couple of these in action. First, splitting on a given sub-string:
df['names'].str.split(' ')
# It's fairly common that you want to split out strings and save the results to new columns in your dataframe. You can specify a (max) number of splits via the `n=` kwarg and you can get the columns using `expand`
df['names'].str.split(' ', n=2, expand=True)
# ```{admonition} Exercise
# Using vectorised operations, create a new column with the index position where the first vowel occurs for each row in the `names` column.
# ```
#
# Here's an example of using a regex function with **pandas**:
df['names'].str.extract('(\w+)', expand=False)
# There are a few more vectorised string operations that are useful.
#
# | Method | Description |
# |-|-|
# | `get()` | Index each element |
# | `slice()` | Slice each element |
# | `slice_replace()` | Replace slice in each element with passed value |
# | `cat()` | Concatenate strings |
# | `repeat()` | Repeat values |
# | `normalize()` | Return Unicode form of string |
# | `pad()` | Add whitespace to left, right, or both sides of strings |
# | `wrap()` | Split long strings into lines with length less than a given width |
# | `join()` | Join strings in each element of the Series with passed separator |
# | `get_dummies()` | extract dummy variables as a dataframe |
#
#
# The `get()` and `slice()` methods give access to elements of the lists returned by `split()`. Here's an example that combines `split()` and `get()`:
#
#
df['names'].str.split().str.get(-1)
# We already saw `get_dummies()` in the [Regression](regression) chapter, but it's worth revisiting it here with strings. If we have a column with tags split by a symbol, we can use this function to split it out. For example, let's create a dataframe with a single column that mixes subject and nationality tags:
#
df = pd.DataFrame({'names': ['ada lovelace', '<NAME>', '<NAME>', '<NAME>', '<NAME>'], 'tags': ['uk; cs', 'uk; econ', 'usa; econ', 'usa; cs', 'usa; cs']})
df
# If we now use `str.get_dummies` and split on `;` we can get a dataframe of dummies.
df['tags'].str.get_dummies(';')
# ## Reading and Writing Text
#
# ### Text file
#
# If you have just a plain text file, you can read it in like so:
#
# ```python
# fname = 'book.txt'
# with open(fname, encoding='utf-8') as f:
# text_of_book = f.read()
# ```
#
# You can also read a text file directly into a **pandas** dataframe using
#
# ```python
# df = pd.read_csv('book.txt', delimiter = "\n")
# ```
#
# In the above, the delimiter for different rows of the dataframe is set as "\n", which means new line, but you could use whatever delimiter you prefer.
#
# ```{admonition} Exercise
# Download the file 'smith_won.txt' from this book's github repository using this [link](https://github.com/aeturrell/coding-for-economists/blob/main/data/smith_won.txt) (use right-click and save as). Then read the text in using **pandas**.
# ```
#
# ### CSV file
#
# CSV files are already split into rows. By far the easiest way to read in csv files is using **pandas**,
#
# ```python
# df = pd.read_csv('book.csv')
# ```
#
# Remember that **pandas** can read many other file types too.
| text-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from riotApi import GetSummoner, Getmatchlist, GetmatchStats, GetmatchPerChamp, GetmatchPerqueue, GetTierbysummoner
import pandas as pd
api_key = 'API-KEY'
data_summoner = GetSummoner(api_key)
match_lists = Getmatchlist(api_key)
match_stats = GetmatchStats(api_key)
match_filter_list = GetmatchPerChamp(api_key)
match_filter_queue = GetmatchPerqueue(api_key)
tier = GetTierbysummoner(api_key)
# -
def get_summoners(user):
seed = data_summoner.get_summoner_by_name(user) # dados do seed
accountID_seed = seed['accountId'] # accountID do seed
matchlist_seed = match_lists.get_summoner_account(accountID_seed) # lista de partidas do seed
list_gameID = []
for i, _ in enumerate(matchlist_seed['matches']):
list_gameID.append(matchlist_seed['matches'][i]['gameId']) # lista de gameID das partidas do seed
# colegando os outros summoners da partida
list_summoners_names = []
for j in list_gameID:
stats_seed = match_stats.get_match_stats(j) # stats da partida do seed
for c in range(10):
try:
list_summoners_names.append(stats_seed['participantIdentities'][c]['player']['summonerName'])
except:
print('error')
return list_summoners_names
def get_accounts(lista):
'''
recebe uma lista de summoners e
retorna accountsID e summonersID
'''
accountID = []
summonerID = []
for summoner in lista:
try:
lolzeiro = data_summoner.get_summoner_by_name(summoner) # dados do lolzeiro
accountID.append(lolzeiro['accountId']) # accountID do lolzeiro
summonerID.append(lolzeiro['id']) # summonerID do lolzeiro
except:
# devido a problemas de solicitações na API
# o código provavelmente vai crashar em algum request
# dae ele atribui 'erro' no summoner que der problema.
accountID.append('erro')
summonerID.append('erro')
tiers = []
for Id in summonerID:
try:
tier_lolzeiro = tier.get_summonerTier_by_id(Id)
tiers.append(tier_lolzeiro[0]['tier'])
except:
tiers.append('erro')
# dict_from_list = {'summoners': lista[0], 'accountIds': accountID[0], 'summonerID': summonerID[0], 'tiers': tiers[0]}
dataset = pd.DataFrame(list(zip(lista, accountID, summonerID, tiers)), columns=['summoners', 'accountID', 'summonerID', 'tier'])
return dataset
# preciso dos gameIds desses summoners
def get_gameid(dataframe):
'''
Do dataframe criado até o momento, iterar o pelo accountId
'''
game_id_lists = []
for account_id in dataframe['accountID']:
try:
game_id = match_filter_queue.get_summoner_accountPerqueue(account_id, 420)
game_id_lists.append(game_id['matches'][0]['gameId'])
except:
game_id_lists.append('error')
new_df = dataframe.copy()
new_df['game_ids'] = game_id_lists
return new_df
def get_stats(dataframe):
'''
Do dataframe criado até o momento, iterar o pelo gameId
e retornar todas as estatísticas selecionadas, de uma partida.
'''
keys_object = ['gameDuration']
keys_teams = ['win', 'firstBlood', 'firstTower', 'firstInhibitor', 'firstBaron', 'firstDragon',
'firstRiftHerald', 'towerKills', 'inhibitorKills', 'baronKills', 'dragonKills',
'vilemawKills', 'riftHeraldKills']
keys_participants = ['championId', 'spell1Id', 'spell2Id']
keys_stats_participants = [
'win', 'kills', 'deaths', 'assists',
'largestKillingSpree', 'largestMultiKill', 'killingSprees', 'longestTimeSpentLiving',
'doubleKills', 'tripleKills', 'quadraKills', 'pentaKills', 'totalDamageDealt',
'magicDamageDealt', 'physicalDamageDealt', 'trueDamageDealt', 'largestCriticalStrike',
'totalDamageDealtToChampions', 'magicDamageDealtToChampions', 'physicalDamageDealtToChampions',
'trueDamageDealtToChampions', 'totalHeal', 'damageSelfMitigated', 'damageDealtToObjectives',
'damageDealtToTurrets', 'visionScore', 'totalDamageTaken', 'magicalDamageTaken', 'physicalDamageTaken',
'trueDamageTaken', 'goldEarned', 'goldSpent', 'turretKills', 'inhibitorKills', 'totalMinionsKilled',
'neutralMinionsKilled', 'neutralMinionsKilledTeamJungle', 'neutralMinionsKilledEnemyJungle',
'visionWardsBoughtInGame', 'sightWardsBoughtInGame', 'wardsPlaced', 'wardsKilled', 'firstBloodKill',
'firstBloodAssist', 'firstTowerKill', 'firstTowerAssist', 'firstInhibitorKill', 'firstInhibitorAssist']
# criação dos dataframes que serão utilizados em cada etapa
df_final_obj = pd.DataFrame()
df_final_teams = pd.DataFrame()
df_final_bans = pd.DataFrame()
df_final_participants0 = pd.DataFrame()
df_final_participants1 = pd.DataFrame()
for game_ids in dataframe['game_ids']:
lista_object = []
lista_bans = []
lista_teams = []
lista_participants_0 = []
lista_participants_1 = []
column_names_obj = []
column_names_teams = []
column_names_bans = []
column_names_participants0 = []
column_names_participants1 = []
try:
gameid_object = match_stats.get_match_stats(game_ids)
except:
print('key_error')
for obj in keys_object:
try:
lista_object.append(gameid_object[obj])
except:
lista_object.append('error')
column_names_obj.append(obj)
df_object = pd.DataFrame([lista_object], columns=column_names_obj)
df_final2_obj = pd.concat((df_object, df_final_obj), axis=0)
df_final_obj = df_final2_obj.copy()
# criando os datatrames individuais e preenchendo-os
# for teams data
for key in keys_teams:
for i in range(2):
try:
lista_teams.append(gameid_object['teams'][i][key])
except:
lista_teams.append('error')
name = key + str(i)
column_names_teams.append(name)
df_teams = pd.DataFrame([lista_teams], columns=column_names_teams)
df_final2_teams = pd.concat((df_teams, df_final_teams), axis=0)
df_final_teams = df_final2_teams.copy()
# for bans
for a in range(2):
for b in range(5):
try:
lista_bans.append(gameid_object['teams'][a]['bans'][b]['championId'])
except:
lista_bans.append('error')
name = 'ban' + str(a) + '_' + str(b)
column_names_bans.append(name)
df_bans = pd.DataFrame([lista_bans], columns=column_names_bans)
df_final2_bans = pd.concat((df_bans, df_final_bans), axis=0)
df_final_bans = df_final2_bans.copy()
# for participants
for key2 in keys_participants:
for c in range(10):
try:
lista_participants_0.append(gameid_object['participants'][c][key2])
except:
lista_participants_0.append('error')
name = key2 + str(c)
column_names_participants0.append(name)
df_participants0 = pd.DataFrame([lista_participants_0], columns=column_names_participants0)
df_final2_participants0 = pd.concat((df_participants0, df_final_participants0), axis=0)
df_final_participants0 = df_final2_participants0.copy()
# for participants stats
for key3 in keys_stats_participants:
for d in range(10):
try:
lista_participants_1.append(gameid_object['participants'][d]['stats'][key3])
except:
lista_participants_1.append('erro')
name = key3 + str(d)
column_names_participants1.append(name)
df_participants1 = pd.DataFrame([lista_participants_1], columns=column_names_participants1)
df_final2_participants1 = pd.concat((df_participants1, df_final_participants1), axis=0)
df_final_participants1 = df_final2_participants1.copy()
df = pd.concat((df_final2_obj, df_final2_teams, df_final2_bans, df_final2_participants0, df_final2_participants1), axis=1)
return df
def get_all(seed):
'''
Executa tudo de uma vez. E retorna um .csv ao final,
e um .csv intermediário com algumas estatísticas.
'''
lista = get_summoners(seed)
dataset = get_accounts(lista)
dataset = get_gameid(dataset)
dataset.to_csv('pre_stats.csv')
new_df = get_stats(dataset)
new_df.reset_index(drop=True, inplace=True)
df_final = pd.concat((dataset, new_df), axis=1)
df_final.to_csv('df_final.csv')
return df_final
seed = 'seed_para_iniciar_a_coleta'
df = get_all(seed)
| coleta/league_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Demographic Shares
#
# Adds percentages to the demographics data
from pandas import DataFrame, read_csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# We read the data from the csv file, replace the asterisk (protecting values between 1 and 4) with a 2 and add the column 'Total' which we need to calculate the share of each demographic group.
file = r'../data/unhcr_popstats_export_demographics_all_data.csv'
df = pd.read_csv(file, header=3)
df = df.replace(to_replace='*', value='2')
df.iloc[:, 3:19] = df.iloc[:, 3:19].apply(pd.to_numeric)
df['Total'] = df['F: Total'] + df['M: Total']
df['Total'] = pd.to_numeric(df['Total']).astype(np.int64)
df.dtypes
# We calculate the share of each demographic group and add a column for it (`<original name>_share`).
def calculate_share(demographic):
df[demographic + '_share'] = df[demographic] / df['Total']
for column in df.iloc[:, 3:19]:
calculate_share(column)
print(df)
# We save the dataframe as a csv file.
df.to_csv('../data/unhcr_demographics_share.csv', encoding='utf-8', index=False)
| notebooks/Demographic Shares.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append("./")
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
pd.set_option("display.width", 5000)
pd.set_option("display.max_columns", None)
pd.set_option("display.expand_frame_repr", False)
import black_scholes.option_greeks as og
import options.risk_adjustments as ra
import options.decay_functions as dcf
# import test option portfolio
optPort = pd.read_csv("../test_port.csv")
calls = optPort[optPort.OptType == "C"]
puts = optPort[optPort.OptType == "P"]
optPort
# static variables
tau = 8./365
r = 0.03
spot = 2.8745
spotTickIncr = 0.001
spotTickValue = 1.0
numDaysPerYear = 365
# get option greeks
greekList = []
opt = og.Option(r, tau, spotTickValue, spotTickIncr, numDaysPerYear, modelName="black_scholes")
for i in range(len(optPort)):
K = optPort.Strike.iloc[i]
vol = optPort.ImpVol.iloc[i]
if optPort.OptType.iloc[i] == "C":
isCall = True
else:
isCall = False
gdf = opt.getOptionGreeks(spot, K, r, vol, tau, isCall)
greekList.append(gdf)
optGreeks = pd.concat(greekList)
callGreeks = optGreeks.iloc[:3]
putGreeks = optGreeks.iloc[3:]
callGreeks
putGreeks
# get option current positions
optPos = optPort.CurrentPosition.as_matrix().reshape((len(optPort), 1))
callPos = calls.CurrentPosition.as_matrix()
callPos = callPos.reshape((len(callPos), 1))
putPos = puts.CurrentPosition.as_matrix()
putPos = putPos.reshape((len(putPos), 1))
# get decay matrix
decayKwargs = {"maxDecayLimit":len(callPos),
"decayRate":0.85}
linearDecayMatrix = dcf.getDecayMatrix(callPos, decayType="linear", **decayKwargs)
expDecayMatrix = dcf.getDecayMatrix(callPos, decayType="exponential", **decayKwargs)
parabolicDecayMatrix = dcf.getDecayMatrix(callPos, decayType="parabolic", **decayKwargs)
powerDecayMatrix = dcf.getDecayMatrix(callPos, decayType="power", **decayKwargs)
print(linearDecayMatrix)
print(expDecayMatrix)
print(parabolicDecayMatrix)
print(powerDecayMatrix)
## shape checker
print("optPos", np.shape(optPos))
print("optGreeks", np.shape(optGreeks))
print("callPos", np.shape(callPos))
print("callGreeks", np.shape(callGreeks))
optGreeks
# calc position greeks
optPosGreeks = optPos * optGreeks
print(optPosGreeks.shape)
optPosGreeks
# get optRisk inputs
aggressiveness = 0.5
optTickIncr = 0.0001
optTickValue = 1
# convert position greeks to portfolio risk
posRiskGreeks = ra.convertPortfolioRiskGreeks(optPosGreeks, spotTickIncr, spotTickValue, optTickIncr, optTickValue)
posRiskGreeks
optPos.sum()
posRiskGreeks.sum(axis=0)
# get maxRisk Adj
maxRiskAdj = optPort.BidAskSpread.as_matrix().reshape((len(optPort), 1))
print(np.shape(maxRiskAdj))
# get optionPositionLimit
strikePosLimit = optPort.StrikeLimit.as_matrix().reshape((len(optPort), 1))
print(np.shape(strikePosLimit))
# opt_greek_limit = strike_position_limit * opt_greek
opt_greek_limit = ra.convertPortfolioRiskGreeks(strikePosLimit * optGreeks, spotTickIncr, spotTickValue, optTickIncr, optTickValue)
opt_greek_limit
risk_limits = pd.read_csv("../test_portfolio_limits.csv")
risk_limits.set_index("greeks", inplace=True)
risk_limits
# calc minGreekLimit = min(term_greek_limit, opt_greek_limit, pos_greek_limit)
minGreekLimit = np.zeros(np.shape(opt_greek_limit))
for i in range(len(opt_greek_limit)):
minGreekLimit[i] = np.min([risk_limits.termLimit.T, risk_limits.positionLimit.T, abs(opt_greek_limit.iloc[0])], axis=0)
minGreekLimit
# get optRiskAdj = -posRiskGreeks/minGreekLimit * maxRiskAdj * aggressiveness
optRiskAdj = maxRiskAdj*(-posRiskGreeks / minGreekLimit)*aggressiveness
optRiskAdj
# get decayedRiskAdj = np.dot(decayMatrix, optRiskAdj)
call_optRiskAdj = optRiskAdj.iloc[:3]
decayedRiskAdj = np.dot(linearDecayMatrix, call_optRiskAdj)
print(np.sum(decayedRiskAdj, axis=1))
call_totalRiskAdj = pd.DataFrame(data=np.sum(decayedRiskAdj, axis=1), index=optRiskAdj.index[:3], columns="totalRiskAdj")
call_totalRiskAdj
| options/models/.ipynb_checkpoints/risk_adjustment_tester-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# 
# # Uploading a Qiskit runtime program
# Here we provide an overview on how to construct and upload a runtime program. A runtime program is a piece of Python code that lives in the cloud and can be invoked by passing in just its parameters. Runtime programs are private by default, which means only you can see and access your programs. Some authorized users can also mark their programs as public, making them visible and accessible by everyone.
# ## Constructing a runtime program
# Below is a template of a runtime program. You can find the template file in the
# [qiskit-ibm-runtime](https://github.com/Qiskit/qiskit-ibm-runtime/blob/main/qiskit_ibm_runtime/program/program_template.py) repository.
# +
import sys
import json
from qiskit_ibm_runtime.program import UserMessenger, ProgramBackend
def program(backend: ProgramBackend, user_messenger: UserMessenger, **kwargs):
"""Function that does classical-quantum calculation."""
# UserMessenger can be used to publish interim results.
user_messenger.publish("This is an interim result.")
return "final result"
def main(backend: ProgramBackend, user_messenger: UserMessenger, **kwargs):
"""This is the main entry point of a runtime program.
The name of this method must not change. It also must have ``backend``
and ``user_messenger`` as the first two positional arguments.
Args:
backend: Backend for the circuits to run on.
user_messenger: Used to communicate with the program user.
kwargs: User inputs.
"""
# Massage the input if necessary.
result = program(backend, user_messenger, **kwargs)
# Final result can be directly returned
return result
# -
# Each runtime program must have a `main()` function, which serves as the entry point to the program. This function must have `backend` and `user_messenger` as the first two positional arguments:
#
# - `backend` is an instance of [ProgramBackend](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.ProgramBackend.html#qiskit_ibm_runtime.ProgramBackend) and has a [run()](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.ProgramBackend.html#qiskit_ibm_runtime.ProgramBackend.run) method that can be used to submit circuits.
# - `user_messenger` is an instance of [UserMessenger](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.UserMessenger.html#qiskit_ibm_runtime.UserMessenger) and has a [publish()](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.UserMessenger.html#qiskit_ibm_runtime.UserMessenger.publish) method that can be used to send interim and final results to the program user. This method takes a parameter `final` that indicates whether it's a final result. However, it is recommended to return the final result directly from the `main()` function. Currently only final results are stored after a program execution finishes.
# There are several runtime program source code in the `program_source` directory in this repository. `program_source/hello_world/hello_world.py` is one of them. It is a sample runtime program that submits random circuits for user-specified iterations:
# +
"""A sample runtime program that submits random circuits for user-specified iterations."""
import random
from qiskit import transpile
from qiskit.circuit.random import random_circuit
def prepare_circuits(backend):
"""Generate a random circuit.
Args:
backend: Backend used for transpilation.
Returns:
Generated circuit.
"""
circuit = random_circuit(
num_qubits=5, depth=4, measure=True, seed=random.randint(0, 1000)
)
return transpile(circuit, backend)
def main(backend, user_messenger, **kwargs):
"""Main entry point of the program.
Args:
backend: Backend to submit the circuits to.
user_messenger: Used to communicate with the program consumer.
kwargs: User inputs.
"""
iterations = kwargs.pop("iterations", 5)
for it in range(iterations):
qc = prepare_circuits(backend)
result = backend.run(qc).result()
user_messenger.publish({"iteration": it, "counts": result.get_counts()})
return "Hello, World!"
# -
# ## Data serialization
# Runtime programs live in the cloud, and JSON is the standard way of passing data to and from cloud services. Therefore, when a user invokes a runtime program, the input parameters must first be serialized into the JSON format and then deserialized once received by the server. By default, this serialization and deserialization is done automatically using the [RuntimeEncoder](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.RuntimeEncoder.html#qiskit_ibm_runtime.RuntimeEncoder) and [RuntimeDecoder](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.RuntimeDecoder.html#qiskit_ibm_runtime.RuntimeDecoder) classes.
#
# ### Custom classes
# `RuntimeEncoder` and `RuntimeDecoder` only support types commonly used in Qiskit, such as complex numbers and numpy arrays. If your program uses custom Python classes for input or output, these two methods only have partial support for that.
#
# Your custom class should have the following methods:
#
# - a `to_json()` method that returns a JSON string representation of the object
# - a `from_json()` class method that accepts a JSON string and returns the corresponding object.
#
# When `RuntimeEncoder` serializes a Python object, it checks whether the object has a `to_json()` method. If so, it calls the method to serialize the object. `RuntimeDecoder`, however, does _not_ invoke `from_json()` to convert the data back because it doesn't know how to import your custom class. Therefore the deserialization needs to be done explicitly.
# Here is an example of serializing and deserializing a custom class. First we define the class `MyCustomClass`:
# +
import json
class MyCustomClass:
def __init__(self, foo, bar):
self._foo = foo
self._bar = bar
def to_json(self):
"""Convert this instance to a JSON string."""
return json.dumps({"foo": self._foo, "bar": self._bar})
@classmethod
def from_json(cls, json_str):
"""Return a MyCustomClass instance based on the input JSON string."""
return cls(**json.loads(json_str))
# -
# Note that it has the `to_json()` method that converts a `MyCustomClass` instance to a JSON string, and a `from_json()` class method that converts a JSON string back to a `MyCustomClass` instance.
# Here is how one would use `MyCustomClass` as an **input** to your program:
# ```
# program_inputs = {
# 'my_obj': MyCustomClass("my foo", "my bar")
# }
#
# options = {"backend_name": "ibmq_qasm_simulator"}
# job = service.run(program_id="some-program",
# options=options,
# inputs=program_inputs
# )
# ```
# Since `MyCustomClass` has a `to_json()` method, the method is automatically called to convert the instance to a JSON string when `service.run()` is invoked.
#
# Your program can then use the `from_json()` method to restore the JSON string back to a `MyCustomClass` instance:
def main(backend, user_messenger, **kwargs):
"""Main entry point of the program."""
my_obj_str = kwargs.pop("my_obj")
my_obj = MyCustomClass.from_json(my_obj_str)
# Similarly, if you pass a `MyCustomClass` instance as an **output** of your program, it is automatically converted to a JSON string (via the `to_json()` method):
def main(backend, user_messenger, **kwargs):
"""Main entry point of the program."""
return MyCustomClass("this foo", "that bar")
# Now when the user of this program calls `job.result()`, they will receive a JSON string rather than a `MyCustomClass` instance. The user can convert the string back to `MyCustomClass` themselves:
# ```
# output_str = job.result()
# output = MyCustomClass.from_json(output_str)
# ```
# Alternatively, you can provide a decoder for the users. Your decoder class should inherit [ResultDecoder](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.ResultDecoder.html#qiskit_ibm_runtime.ResultDecoder) and overwrite the `decode()` method:
# +
from qiskit_ibm_runtime.program import ResultDecoder
class MyResultDecoder(ResultDecoder):
@classmethod
def decode(cls, data):
data = super().decoded(data) # Perform any preprocessing.
return MyCustomClass.from_json(data)
# -
# Your user can then use this `MyResultDecoder` to decode the result of your program:
#
# ```
# output = job.result(decoder=MyResultDecoder)
# ```
# ## Testing your runtime program
# You can test your runtime program using a local simulator or a real backend before uploading it. Simply import and invoke the `main()` function of your program and pass the following parameters:
#
# - the `backend` instance you want to use
# - a new `UserMessenger` instance.
# - program input parameters that are serialized and then deserialized using the correct encoder and decoder. While this may seem redundant, it is to ensure input parameters can be passed to your program properly once it's uploaded to the cloud.
#
# The following example tests the `hello-world` program we saw earlier. It uses the `qasm_simulator` from Qiskit Aer as the test backend. It serializes and deserializes input data using `RuntimeEncoder` and `RuntimeDecoder`, which are the default en/decoders used by runtime.
# +
import sys
sys.path.insert(0, "..") # Add source_program directory to the path
from program_source.hello_world import hello_world
from qiskit import Aer
from qiskit_ibm_runtime import RuntimeEncoder, RuntimeDecoder, UserMessenger
inputs = {"iterations": 3}
backend = Aer.get_backend("qasm_simulator")
user_messenger = UserMessenger()
serialized_inputs = json.dumps(inputs, cls=RuntimeEncoder)
deserialized_inputs = json.loads(serialized_inputs, cls=RuntimeDecoder)
hello_world.main(backend, user_messenger, **deserialized_inputs)
# -
# ## Defining program metadata
# Program metadata helps users to understand how to use your program. It includes:
#
# - `name`: Name of the program.
# - `max_execution_time`: Maximum amount of time, in seconds, a program can run before being forcibly terminated.
# - `description`: Describes the program.
# - `spec`: Detailed information about the program, which includes the following attributes:
# - `backend_requirements`: Describes the backend attributes needed to run the program.
# - `parameters`: Describes the program input parameters as a JSON schema
# - `return_values`: Describes the return values as a JSON schema
# - `interim_results`: Describes the interim results as a JSON schema
#
# When uploading a program, you must specify at least `name`, `max_execution_time`, and `description`. It is strongly encouraged to also specify `parameters`, `return_values`, and `interim_results` within `spec` if the program has them.
# Below shows the metadata JSON file of the `hello-world` program as an example:
# +
import os
hello_world_json = os.path.join(
os.getcwd(), "../../program_source/hello_world/hello_world.json"
)
with open(hello_world_json, "r") as file:
data = file.read()
print(data)
# -
# ## Uploading a program
# You can use the [QiskitRuntimeService.upload_program()](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.QiskitRuntimeService.html#qiskit_ibm_runtime.QiskitRuntimeService.upload_program) method to upload your program. In the example below, the program data lives in the file `hello_world.py`, and its metadata, as described above, is in `hello_world.json`.
# +
import os
from qiskit_ibm_runtime import QiskitRuntimeService
service = QiskitRuntimeService()
hello_world_data = os.path.join(
os.getcwd(), "../../program_source/hello_world/hello_world.py"
)
hello_world_json = os.path.join(
os.getcwd(), "../../program_source/hello_world/hello_world.json"
)
program_id = service.upload_program(data=hello_world_data, metadata=hello_world_json)
print(program_id)
# -
# `upload_program()` returns a program ID, which uniquely identifies the program. It is derived from the program name, usually with a randomly-generated suffix. Program ID is needed to invoke the program.
# ## Updating a program
# You can use the [QiskitRuntimeService.update_program()](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.QiskitRuntimeService.update_program.html#qiskit_ibm_runtime.QiskitRuntimeService.update_program) method to update the source code and/or metadata of a program:
service.update_program(program_id=program_id, description="A new description.")
# This method allows you to make changes to your program while retaining the same program ID.
# ## Deleting a program
# You can use the [QiskitRuntimeService.delete_program()](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.QiskitRuntimeService.html#qiskit_ibm_runtime.QiskitRuntimeService.delete_program) method to delete a program. Only the person who uploaded the program can delete it.
#
service.delete_program(program_id)
# ## Additional materials
# This is an introductory tutorial on creating and uploading a very simple custom program. [sample_vqe_program/qiskit_runtime_vqe_program.ipynb](sample_vqe_program/qiskit_runtime_vqe_program.ipynb) provides a more in-depth tutorial on creating a real-world Qiskit Runtime program.
# +
from qiskit.tools.jupyter import *
# %qiskit_copyright
| docs/tutorials/05_uploading_program.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
import pandas as pd
from fbprophet import Prophet
# データフレームの読み込み
df = pd.read_csv(r"C:\Users\Windows\prophet\stockdata\6758_2015_2020.csv", encoding = 'shift-jis')
# データフレームの出力
df.columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']
df
# -
# カラム名をdsとyに変更
df = df.rename(columns={'Date':'ds', 'Adj Close':'y'})
df
# +
# Prophetのインスタンス化
m = Prophet()
# fitメソッドでデータを学習させる
m.fit(df)
# make_future_dataframeメソッドで予測分の未来のデータフレームの作成
future = m.make_future_dataframe(periods=250)
# 月曜=0,火曜=1,水曜=2,木曜=3,金曜=4,土曜=5,日曜=6
future = future[future['ds'].dt.weekday < 5]
# 予測する
forecast = m.predict(future)
# 予測結果の描画
fig1 = m.plot(forecast)
# トレンド、周期性の描画
fig2 = m.plot_components(forecast)
# plotlyを使って描画
#from fbprophet.plot import plot_plotly
#import plotly.offline as py
#fig1 = plot_plotly(m, forecast)
#py.plot(fig1)
# -
| prophet/stock_prophet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow]
# language: python
# name: conda-env-tensorflow-py
# ---
# +
import numpy as np
import pandas as pd
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
# -
article_df = pd.read_csv('article.csv')
article_df.tail(2)
article_df['Category'] = [100]*100 + [101]*100 + [102]*100 + [103]*100 + [104]*100 + [105]*100
#데이터전처리 : 데이터셋 분리
X_train, X_test, Y_train, Y_test = train_test_split(article_df.Text, article_df.Category, test_size = 0.1, random_state = 1)
# +
#모델만들기
clf = Pipeline([
('vect', TfidfVectorizer()),
('clf', MultinomialNB(alpha = 0.1)),
])
# -
#학습
model = clf.fit(X_train.values.astype('str'), Y_train.values)
y_pred = model.predict(X_test)
print(classification_report(Y_test, y_pred))
# #### 모델 사용하기
categories = {
100 : '정치',
101 : '경제',
102 : '사회',
103 : '생활/문화',
104 : '세계',
105 : 'IT/과학'
}
contents= [
'트럼프 대통령은 UN 제재를 선언하였다. 문재인 대통령은 이에 성명을 발표했다.',
'요즘 환율 주가 예측이 불가능하다. '
]
datas = {
'content': contents,
'category_code' : model.predict(contents)
}
result = pd.DataFrame(datas)
result['category'] = result.category_code.apply(lambda x: categories[x])
result['proba'] = result.content.apply(lambda x :round(max(model.predict_proba([x])[0]),2))
result
#모델 저장
pickle.dump(model, open('clf.pkl', 'wb'))
| python/webcrawl/4. Multinomial classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import collections
from wordsplit import WordSplit, showDistribution, showDistributionFreq
# -
# Load the corpus
W = WordSplit(4)
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# # Morpheme splitting by information reduction
#
# Idea: pick a word and try to split off morpheme-like bits from the start and from the end of a word.
# We do it in rounds, in each round at most one morf per side of the word is split off.
#
# What we do is:
#
# ```
# word = morf + main
# ```
#
# and/or
#
# ```
# word = main + morf
# ```
#
# How do we determine whether a possible split is a good split?
# By computing certain qualities of a split:
#
# 1. the quality of the *morf*.
# 1. the quality of the *main*
#
# We then weigh the values found for these indicators, and take a decision.
#
# We use an upper bound for morf lengths.
# + [markdown] tags=[]
# # Preparation
#
# We compute data in advance that we will need frequently: the compositions of all word forms.
# -
W.getCompositions()
# # Quality of a word split
#
# We define the quality of word splits in an almost cyclical manner.
#
# First we define a very coarse notion of quality.
# Based on that we refine the notion.
# And again, and again.
#
# We split the word into two parts one called morf and one called lex.
#
# The morf is no longer than a limited amount of characters, e.g. 5.
#
# The morf is either at the end or at the start of a word.
# The lex is the rest of the word.
#
# A morf may be empty.
# A lex may not be empty, but it may be short.
#
# ## Round 1
#
# We start with the quality of a lex:
#
# We look at the number of morfs that combine with a lex at each side.
#
# If for a lex that number is 1, and it is not the empty morf,
# it means that the lex is not a word itself, and that it can be extended to a word
# in exactly one way.
# Hence the lex is not a real lex, but just a fragment of a lex.
# We give it quality 0.
# If the lex can be extended in very many ways, it is also a sure sign that the lex is not
# a real lex, but just a fragment of a lex.
# We give it quality 0.
# With a positive, moderate amount of possible completions, we give it a positive quality.
#
# We count only the longest extensions, i.e. if one suffix is a prefix of another suffix, we only count the latter suffix.
# And if one prefix is a suffix of another prefix, we only count the former prefix.
#
# We assign a quality as follows:
#
# * if there are no morfs that complete the lex: 0
# * else if the empty morf completes the lex: 1
# * else if n is the number of morfs that completes the lex:
# * if n = 1: 0
# * else if n = 2: 1
# * else if n < 5: 2
# * else if n < 8: 1
# * else 0
#
# We do this separately for both sides and take the maximum of them.
#
# Then the quality of a morf.
#
# Morfs at the end are separately calculated as morfs at the beginning.
#
# We look at each lex-candidate that can complete it.
# For each lex-candidate we compute the round-1-quality.
# Then we take the average of all those qualities.
# That is the quality of the morf.
#
# ## Round n + 1
#
# We start with the quality of a lex. If the lex has round-n quality 0 it also has round n+1 quality zero.
# Otherwise: it is the average of the round-n quality of the morphs that combine with it,
#
# The the quality of a morf.
# We take the average of all the round-n+1 qualities of the lex that combine with it.
#
#
W.clearQuality()
# # Quality of a word split
#
# The computation of the quality measures is potentially costly.
# We do not compute them in advance for the whole corpus, but on a just-in-time basis.
# Whenever we have computed a quality measure, we memoize it, so that it can be retrieved rather than
# computed when it is needed again.
#
# This pays off, because the quality of an item is typically computed by applying a formula to
# the qualities of a (big) number of other items.
#
# We experiment with notions of quality.
#
# Suppose we have `word = morf + main`.
#
# A simple notion of quality is the amount of possible *mains* that are possible after the *morf*.
#
# But this will give an enormous quality to one-letter morfs. In general, there is a big number of words starting with the
# same letter.
# A bit more sophisticated is to weigh all *mains*, by counting how many other *morfs* can precede it.
# If `mainOther` is another main that can follow `morf`, but `mainOther` cannot be preceded by any other *morf* than `morf`,
# it does not contribute to the quality of `morf` as morpheme.
# But if it can be preceded by multiple *morfs*, it contributes to the fact that `morf` is a morpheme.
#
# So the quality of a *morf* is the weighted sum of all its (non-empty) *mains* with which it forms a word,
# where each main is weighted by the number of other *morfs* with which it can form a word, *including* the empty *morf*.
#
# The quality of a *main* is the non-weighted sum of all its other (possibly empty) *morfs* with which it forms a word.
#
# **Note the asymmetry:**
#
# In the quality of a *morph*, we weigh its *mains* in such a way that a *main* that is a word on its own does contribute.
#
# In the quality of a *main*, we weigh its *morfs* in such a way that a *morf* that is a word on its own does *not* contribute.
#
# In the quality of a *morf*, we do weigh the possible mains.
#
# In the quality of a *main*, we just take the number of other morfs, not the sum of their qualities.
# If a *main* has exactly one morf, then the result is 0; such mains are not good mains.
#
# How how do we weigh an empty morf exactly? If we do not say anything, it will be the sum of all words in the corpus.
# Instead, the empty morf has weight 1.
#
# **Recursive closure?**
#
# Note that we could also have defined the notion of quality in a recursive way:
#
# The quality of a *morf* is the sum of the qualities of its *mains*.
#
# The quality of a *main* is the sum of the qualities of its *morfs*.
#
# The question is then: how does this recursion stop?
#
# We can approach this definition by the one we have given, by computing the qualities in rounds, where in each round we
# compute new qualities based on the old qualities.
# And then we stop when the qualities do not change anymore.
#
# But at present I have no idea whether this will ever stop.
#
# So we stick with just one iteration.
#
# # Building intuition
#
# We show first these quality measures in some examples, in order to get intuition.
# When we have seen enough examples, we can proceed to define a decision procedure
# based on the quality measures.
# Some examples.
EXAMPLES = """
aanneemt
neemt
aengeschreeven
H
hunner
monterende
gelegentheyt
ongelegentheyt
heeft
gesanten
ootmoedige
""".strip().split()
P.COMPOSE[True]["eemt"]
P.clearQuality()
len(P.getFills("aann", "eemt", False, False))
P.getQuality("vr", "eemt", False, False)
# Examine the examples:
len(P.COMPOSE[False]["n"])
len(P.getFills("n", None, True, False))
P.clearQuality()
P.showExamples(EXAMPLES)
| programs/legacy/morph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: morpheus
# language: python
# name: morpheus
# ---
# # Recursion in Morpheus
# # Preliminaries
# ## Imports
# +
import os
import sys
import numpy as np
import pprint
from os.path import dirname
from networkx.drawing.nx_pydot import to_pydot
# Import morpheus
note_dir = dirname(os.getcwd())
root_dir = dirname(note_dir)
src_dir = os.path.join(root_dir, "src")
sys.path.append(src_dir)
import morpheus
from morpheus import Morpheus
from morpheus.tests import (default_dataset,
default_m_list_for_mercs,
random_m_list_for_mercs)
from morpheus.algo.inference import get_predict
# Visuals
from morpheus.graph import to_dot
from morpheus.visuals import show_diagram
from morpheus.graph.network import *
from morpheus.composition import o as ω
from morpheus.composition import x as χ
# Ipython things
from IPython.display import Image, display
from IPython.core.display import HTML
pp = pprint.PrettyPrinter(indent=4)
# -
# ## Methods
# ## General Variables
# # Prepare sandbox
#
# Train a morpheus on a dataset and extract an interesting composite model.
# +
m = Morpheus(random_state=802,
prediction_algorithm='it',
clf_criterion='entropy',
rgr_criterion='mae',
selection_algorithm='random',
nb_targets=1,
nb_iterations=10,
fraction_missing=0.4,
max_depth=15,
regression_max_depth=25)
df_train, df_test = default_dataset()
test = df_test.copy().values
q_code = np.array([0,0,-1,-1,-1,-1,0,1])
m.fit(df_train.values)
# -
Y = m.predict(test, q_code=q_code)
m.show_q_diagram(fi=True)
q_compose = m.q_compose
q_compose.desc_ids, q_compose.targ_ids
cg = model_to_graph(q_compose, idx=99)
m.g_list.append(cg)
m.m_list.append(q_compose)
show_diagram(m.g_list[-1], fi=True)
Y2 = m.predict(df_test.values, q_code=q_code)
np.sum(Y-Y2)
m.show_q_diagram(fi=True)
# + active=""
# %%timeit
# Y2 = m.predict(df_test.values, q_code=q_code)
# print(np.sum(Y-Y2))
# + active=""
# %%timeit
# Y = m.predict(test, q_code=q_code)
# -
| note/dev/190521 - recursion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
import pandas as pd
# +
newsgroups = fetch_20newsgroups(remove=('headers', 'footers', 'quotes'))
# +
target_dict = dict(enumerate(newsgroups['target_names']))
df = pd.DataFrame(zip(newsgroups['data'], newsgroups['target']),
columns = ['text','newsgroup'])
df['newsgroup'].replace(target_dict, inplace = True)
df.sample(5)
# +
# https://stackoverflow.com/questions/52986253/scoring-strategy-of-sklearn-model-selection-gridsearchcv-for-latentdirichletallo
class LDAp(LatentDirichletAllocation):
def score(self, X, y=None):
# You can change the options passed to perplexity here
score = super(LDAp, self).perplexity(X, sub_sampling=False)
# Since perplexity is lower for better, so we do negative
return -1*score
# -
vectorizer = CountVectorizer(stop_words='english')
tm = LDAp(verbose=1, evaluate_every = 5, perp_tol = 1, max_iter = 100, )
pipe = Pipeline(steps=[('vectorizer', vectorizer),
('tm', tm)])
param_grid = {'tm__n_components': [15,16,17],
'vectorizer__max_features' : [500, 750, 1000]}
search = GridSearchCV(pipe, param_grid, n_jobs=-1, cv=3, verbose=1)
search.fit(df['text'])
gs_df = pd.DataFrame(search.cv_results_)
gs_df.sort_values(by='rank_test_score')
# +
# %matplotlib inline
df = gs_df.copy()
df.set_index('param_tm__n_components', inplace=True)
df.groupby('param_vectorizer__max_features')['mean_test_score'].plot(legend=True);
# -
gs_df.set_index('param_tm__n_components').plot.line(x='', y='')
cvectorizer = CountVectorizer(stop_words='english', min_df = .005 )
cvectorizer.fit(df['text'])
len(cvectorizer.get_feature_names())
# +
# CountVectorizer?
# -
| Notebooks/Uncategorized/LDA Topic Perplixity II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seaborn Exercises - Solutions
#
# Time to practice your new seaborn skills! Try to recreate the plots below (don't worry about color schemes, just the plot itself.
# ## The Data
#
# We will be working with a famous titanic data set for these exercises. Later on in the Machine Learning section of the course, we will revisit this data, and use it to predict survival rates of passengers. For now, we'll just focus on the visualization of the data with seaborn:
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set_style('whitegrid')
titanic = sns.load_dataset('titanic')
titanic.head()
# # Exercises
#
# ** Recreate the plots below using the titanic dataframe. There are very few hints since most of the plots can be done with just one or two lines of code and a hint would basically give away the solution. Keep careful attention to the x and y labels for hints.**
#
# ** *Note! In order to not lose the plot image, make sure you don't code in the cell that is directly above the plot, there is an extra cell above that one which won't overwrite that plot!* **
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
sns.jointplot(x='fare',y='age',data=titanic)
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
sns.distplot(titanic['fare'],bins=30,kde=False,color='red')
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
sns.boxplot(x='class',y='age',data=titanic,palette='rainbow')
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
sns.swarmplot(x='class',y='age',data=titanic,palette='Set2')
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
sns.countplot(x='sex',data=titanic)
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
sns.heatmap(titanic.corr(),cmap='coolwarm')
plt.title('titanic.corr()')
# +
# CODE HERE
# REPLICATE EXERCISE PLOT IMAGE BELOW
# BE CAREFUL NOT TO OVERWRITE CELL BELOW
# THAT WOULD REMOVE THE EXERCISE PLOT IMAGE!
# -
g = sns.FacetGrid(data=titanic,col='sex')
g.map(plt.hist,'age')
# # Great Job!
#
# ### That is it for now! We'll see a lot more of seaborn practice problems in the machine learning section!
| 06-Data-Visualization-with-Seaborn/.ipynb_checkpoints/08-Seaborn Exercises - Solutions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import division
import os
import urllib, cStringIO
import pymongo as pm
import matplotlib
from matplotlib import pylab, mlab, pyplot
# %matplotlib inline
from IPython.core.pylabtools import figsize, getfigs
plt = pyplot
import seaborn as sns
sns.set_context('poster')
sns.set_style('white')
import numpy as np
import scipy.stats as stats
import pandas as pd
import json
import re
from PIL import Image
import base64
import sys
from svgpathtools import parse_path
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# -
# ## setup
# +
# directory & file hierarchy
proj_dir = os.path.abspath('../../..')
analysis_dir = os.getcwd()
results_dir = os.path.join(proj_dir,'results')
plot_dir = os.path.join(results_dir,'plots')
csv_dir = os.path.join(results_dir,'csv')
exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments'))
sketch_dir = os.path.abspath(os.path.join(proj_dir,'sketches'))
## add helpers to python path
if os.path.join(proj_dir,'analysis','python') not in sys.path:
sys.path.append(os.path.join(proj_dir,'analysis','python'))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
# Assign variables within imported analysis helpers
import analysis_helpers as h
if sys.version_info[0]>=3:
from importlib import reload
reload(h)
# +
# set vars
auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user
pswd = auth.values[0][0]
user = 'sketchloop'
host = 'rxdhawkins.me' ## cocolab ip address
# have to fix this to be able to analyze from local
import pymongo as pm
conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1')
db = conn['3dObjects']
coll = db['graphical_conventions']
# which iteration name should we use?
iterationName = 'run2_chairs1k_size6'
# -
## get total number of stroke and clickedObj events in the collection as a whole
S = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'stroke'}]}).sort('time')
C = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'clickedObj'}]}).sort('time')
print str(S.count()) + ' stroke records in the database.'
print str(C.count()) + ' clickedObj records in the database.'
# ## generate group dataframe
## list of researcher mturk worker ID's to ignore
jefan = ['A1MMCS8S8CTWKU','A1MMCS8S8CTWKV','A1MMCS8S8CTWKS']
hawkrobe = ['A1BOIDKD33QSDK']
megsano = ['A1DVQQLVZR7W6I']
researchers = jefan + hawkrobe + megsano
# +
reload(h)
## get list of all candidate games
games = coll.distinct('gameid')
## get list of complete and valid games
complete_games = h.get_complete_and_valid_games(games,coll,iterationName,
researchers=researchers,
tolerate_undefined_worker=False,
verbose=False)
# -
print complete_games
short_list = complete_games[2:3]
# +
reload(h)
# preprocessing
TrialNum = []
GameID = []
Condition = []
Target = []
Category = []
Distractor1 = []
Distractor2 = []
Distractor3 = []
Outcome = []
Response = []
Repetition = []
Phase = []
numStrokes = []
drawDuration = [] # in seconds
svgStringLength = [] # sum of svg string for whole sketch
svgStringLengthPerStroke = [] # svg string length per stroke
numCurvesPerSketch = [] # number of curve segments per sketch
numCurvesPerStroke = [] # mean number of curve segments per stroke
svgStringStd = [] # std of svg string length across strokes for this sketch
Outcome = [] #accuracy (True or False)
png=[] # the sketch
timedOut=[] # True if sketchers didn't draw anything, False o.w.
meanPixelIntensity=[]
for i,g in enumerate(short_list):
print 'Analyzing game {} | {} of {}: '.format(g, i, len(complete_games))
# collection of all clickedObj events in a particular game
X = coll.find({ '$and': [{'gameid': g}, {'eventType': 'clickedObj'}]}).sort('time')
print X.count()
# collection of all stroke events in a particular game
Y = coll.find({ '$and': [{'gameid': g}, {'eventType': 'stroke'}]}).sort('time')
for t in X: # for each clickedObj event
targetname = t['intendedName']
category = h.OBJECT_TO_CATEGORY_run2[targetname]
Phase.append(t['phase'])
Repetition.append(t['repetition'])
distractors = [t['object2Name'],t['object3Name'],t['object4Name']]
full_list = [t['intendedName'],t['object2Name'],t['object3Name'],t['object4Name']]
png.append(t['pngString'])
#for each stroke event with same trial number as this particular clickedObj event
y = coll.find({ '$and': [{'gameid': g}, {'eventType': 'stroke'}, {'trialNum': t['trialNum']}]}).sort('time')
# have to account for cases in which sketchers do not draw anything
if (y.count() == 0):
numStrokes.append(float('NaN'))
drawDuration.append(float('NaN'))
svgStringLength.append(float('NaN'))
svgStringLengthPerStroke.append(float('NaN'))
numCurvesPerSketch.append(float('NaN'))
numCurvesPerStroke.append(float('NaN'))
svgStringStd.append(float('NaN'))
meanPixelIntensity.append('NaN')
timedOut.append(True)
else:
# calculate numStrokes
lastStrokeNum = float(y[y.count() - 1]['currStrokeNum']) # get currStrokeNum at last stroke
# numStrokes.append(lastStrokeNum)
ns = y.count()
assert lastStrokeNum == ns
numStrokes.append(ns)
# calculate drawDuration
startStrokeTime = float(y[0]['startStrokeTime'])
endStrokeTime = float(y[y.count() - 1]['endStrokeTime']) ## took out negative 1
duration = (endStrokeTime - startStrokeTime) / 1000
drawDuration.append(duration)
# calculate other measures that have to do with sketch
ls = [len(_y['svgData']) for _y in y]
svgStringLength.append(sum(ls))
y = coll.find({ '$and': [{'gameid': g}, {'eventType': 'stroke'}, {'trialNum': t['trialNum']}]}).sort('time')
num_curves = [len([m.start() for m in re.finditer('c',str(_y['svgData']))]) for _y in y] ## gotcha: need to call string on _y['svgData'], o/w its unicode and re cant do anything with it
numCurvesPerSketch.append(sum(num_curves))
numCurvesPerStroke.append(sum(num_curves)/lastStrokeNum)
svgStringLengthPerStroke.append(sum(ls)/lastStrokeNum)
svgStringStd.append(np.std(ls))
timedOut.append(False)
## calculate pixel intensity (amount of ink spilled)
imsize = 100
numpix = imsize**2
thresh = 250
imgData = t['pngString']
filestr = base64.b64decode(imgData)
fname = os.path.join('sketch.png')
with open(fname, "wb") as fh:
fh.write(imgData.decode('base64'))
im = Image.open(fname).resize((imsize,imsize))
_im = np.array(im)
meanPixelIntensity.append(len(np.where(_im[:,:,3].flatten()>thresh)[0])/numpix)
### aggregate game metadata
TrialNum.append(t['trialNum'])
GameID.append(t['gameid'])
Target.append(targetname)
Category.append(category)
Condition.append(t['condition'])
Response.append(t['clickedName'])
Outcome.append(t['correct'])
Distractor1.append(distractors[0])
Distractor2.append(distractors[1])
Distractor3.append(distractors[2])
# -
# ##### svg rendering
import svg_rendering_helpers as srh
reload(srh)
g = '3511-727c76f4-f755-48e9-8fc8-b61efbe785c9'
## example sketch from trial one of some game
stroke_recs = coll.find({ '$and': [{'gameid': g}, {'eventType': 'stroke'}, {'trialNum':25}]}).sort('time')
stroke_recs.count()
svg_list = srh.make_svg_list(stroke_recs)
from matplotlib.path import Path
import matplotlib.patches as patches
svg_list
type(parsed[0])
curves = []
Verts = []
Codes = []
for stroke_ind,stroke in enumerate(svg_list):
x = []
y = []
parsed = parse_path(stroke)
for i,p in enumerate(parsed):
x.append(p.start.real)
y.append(p.start.imag)
x.append(p.control1.real)
y.append(p.control1.imag)
x.append(p.control2.real)
y.append(p.control2.imag)
x.append(p.end.real)
y.append(p.end.imag)
assert len(zip(x,y))%4==0
curves.append(zip(x,y))
parsed
len(curves)
parsed
svg_list[stroke_ind]
curves[0]
def polycurve_pathmaker(curves):
x = []
y = []
codes = []
stroke_ind = []
for i,l in enumerate(curves):
for _i,_l in enumerate(l):
x.append(_l[0])
y.append(_l[1])
stroke_ind.append(i)
if _i%4==0:
codes.append(Path.MOVETO)
else:
codes.append(Path.CURVE4) # remaining control and endpoints for each spline
verts = zip(x,y)
return verts, codes, stroke_ind
verts, codes, stroke_ind = polycurve_pathmaker(curves)
verts, codes, stroke_ind = map(np.array,[verts, codes, stroke_ind])
assert len(stroke_ind)==len(verts)
verts[stroke_ind==0]
# +
imsize=6
canvas_size=600
line_width=5
### render sketch so far
unique_stroke_inds = np.unique(stroke_ind)
for i,ind in enumerate(unique_stroke_inds):
fig = plt.figure(figsize=(imsize,imsize))
ax = plt.subplot(111)
ax.axis('off')
ax.set_xlim(0,canvas_size)
ax.set_ylim(0,canvas_size)
these_verts = verts[stroke_ind<=i]
these_codes = codes[stroke_ind<=i]
path = Path(these_verts, these_codes)
patch = patches.PathPatch(path, facecolor='none', edgecolor='black', lw=line_width)
ax.add_patch(patch)
plt.gca().invert_yaxis() # y values increase as you go down in image
plt.show()
# -
# +
for i,g in enumerate(complete_games[:1]):
image_recs = coll.find({'$and': [{'gameid':g}, {'eventType':'clickedObj'}]}).sort('time')
num_drawings = image_recs.count()
if num_drawings > 3:
print 'Great! Number of drawings made by this person: {}'.format(image_recs.count())
### now try looping through all images made during this session and render out
try:
image_recs = coll.find({'$and': [{'gameid':g}, {'eventType':'clickedObj'}]}).sort('time')
for imrec in image_recs:
## now loop through all the strokes comprising this sketch
stroke_recs = coll.find({'$and': [
{'gameid':g},
{'eventType':'stroke'},
{'trialNum': imrec['trialNum']}]}).sort('time')
## check to make sure that there is at least one stroke!
assert stroke_recs.count()>0
## some handy metadata for writing out the sketch PNG filename
trial_num = stroke_recs[0]['trialNum']
category = stroke_recs[0]['category']
game_id = stroke_recs[0]['gameid']
## now make an svg list!
svg_list = srh.make_svg_list(stroke_recs)
## now get me some verts and codes!
Verts, Codes = srh.get_verts_and_codes(svg_list)
# ## now render out your cumulative sketches and save out as pngs!
# srh.render_and_save(Verts,
# Codes,
# line_width=5,
# imsize=8,
# canvas_size=600,
# game_id=game_id,
# trial_num=trial_num,
# category=category)
except Exception as e:
print 'Oops, something went wrong! Here is the error:'
print e
pass
# -
# +
## now actually make dataframe
GameID,TrialNum,Condition, Target, Category, Repetition, Phase, drawDuration, Outcome, Response, numStrokes, meanPixelIntensity, svgStringLength, svgStringLengthPerStroke, svgStringStd, numCurvesPerSketch, numCurvesPerStroke, timedOut, png = map(np.array, \
[GameID,TrialNum,Condition, Target, Category, Repetition, Phase, drawDuration, Outcome, Response, numStrokes, meanPixelIntensity,svgStringLength, svgStringLengthPerStroke, svgStringStd, numCurvesPerSketch, numCurvesPerStroke, timedOut,png])
Repetition = map(int,Repetition)
D = pd.DataFrame([GameID,TrialNum,Condition, Target, Category, Repetition, Phase, drawDuration, Outcome, Response, numStrokes, meanPixelIntensity,svgStringLength, svgStringLengthPerStroke, svgStringStd, numCurvesPerSketch, numCurvesPerStroke, timedOut, png],
index = ['gameID','trialNum','condition', 'target', 'category', 'repetition', 'phase', 'drawDuration', 'outcome', 'response', 'numStrokes', 'meanPixelIntensity', 'svgStringLength', 'svgStringLengthPerStroke', 'svgStringStd', 'numCurvesPerSketch', 'numCurvesPerStroke', 'timedOut', 'png'])
D = D.transpose()
## save out dataframe to be able to load in and analyze later w/o doing the above mongo querying ...
D.to_csv(os.path.join(results_dir,'graphical_conventions_group_data_{}.csv'.format(iterationName)))
# Just look at one game
#D = D[D['gameID'] == '3511-727c76f4-f755-48e9-8fc8-b61efbe785c9']
# Just look at repeated trials and sort them by target and reps
# _D = D[(D.condition=='repeated')]
# _D = _D.sort_values(by=['target','repetition'])
D
# -
# ##### seeing how previous trial accuracy affects numStrokes in the current trial
D.shape[0]
falseNumStrokes = []
trueNumStrokes = []
for i, d in D.iterrows():
if (i != D.shape[0] - 1):
if d['outcome'] == False:
nextNumStrokes = D['outcome'][i+1]
falseNumStrokes.append(nextNumStrokes)
else:
nextNumStrokes = D['outcome'][i+1]
trueNumStrokes.append(nextNumStrokes)
meanNumStrokesGivenFalse = sum(falseNumStrokes)/float(len(falseNumStrokes))
meanNumStrokesGivenTrue = sum(trueNumStrokes)/float(len(trueNumStrokes))
print meanNumStrokesGivenFalse, meanNumStrokesGivenTrue
## adding previous outcomes to each trial
previousOutcome = []
previousOutcome.append('NaN')
for i, d in D.iterrows():
if (i != D.shape[0] - 1):
previousOutcome.append(d['outcome'])
D['previousOutcome'] = pd.Series(previousOutcome)
## conditioning on previous trial outcome being true
D_true = D[D['previousOutcome'] == True]
reload(h)
dv = 'numStrokes'
h.ts_repeated_control(D_true,
var=dv,
numReps = 6,
limit=10, # recommended limits: 'numStrokes' : 6, 'drawDuration' : 10, 'numCurvesPerSketch' : 20, 'numCurvesPerStroke' : 6
save_plot=False,
plot_dir=plot_dir)
# ### confusion matrix for each category
reload(h)
h.get_confusion_matrix(D, 'armchair', 6)
h.get_confusion_matrix(D, 'waiting', 6)
h.get_confusion_matrix(D, 'dining', 6)
h.get_confusion_matrix(D, 'deck', 6)
# +
category = 'waiting'
set_size = 6
_D = D[D['condition'] == 'repeated']
_D = _D[_D['repetition'] >= 5]
target_list = _D['target'].tolist()
obj_list_ = []
obj_list = []
objlist = h.CATEGORY_TO_OBJECT_run2[category]
for obj in objlist[:set_size*2]:
obj_list_.append(obj)
for i in obj_list_:
if i in target_list:
obj_list.append(i)
## initialize confusion matrix
confusion = np.zeros((len(obj_list), len(obj_list)))
## generate confusion matrix by incrementing each cell
for i, d in _D.iterrows():
if d['category'] == category:
targ_ind = obj_list.index(d['target'])
chosen_ind = obj_list.index(d['response'])
confusion[targ_ind, chosen_ind] += 1
## normalize confusion matrix
normed = np.zeros((len(obj_list), len(obj_list)))
for i in np.arange(len(confusion)):
normed[i,:] = confusion[i,:]/np.sum(confusion[i,:])
## plot confusion matrix
from matplotlib import cm
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(111)
cax = ax.matshow(normed,vmin=0,vmax=1,cmap=cm.viridis)
plt.xticks(range(len(normed)), obj_list, fontsize=12,rotation='vertical')
plt.yticks(range(len(normed)), obj_list, fontsize=12)
plt.colorbar(cax,shrink=0.8)
plt.tight_layout()
#plt.savefig('./plots/confusion_matrix_all.pdf')
#plt.close(fig)
# -
# ## visualize behavioral measures over time
# +
# reload analysis_helpers just to be on the safe side
reload(h)
## set seaborn style params here
sns.set_context('poster')
sns.set_style('white')
# -
# ##### Accuracy over time
total = 0
for outcome in D['outcome']:
total = total + outcome
meanAccuracy = total / len(D['outcome'])
meanAccuracy
mean_accuracy_list = []
for i in range(0,6):
outcome_list = (D.loc[D['repetition'] == i])['outcome']
mean_accuracy = sum(outcome_list) / float(len(outcome_list))
mean_accuracy_list.append(mean_accuracy)
D_mean = pd.DataFrame()
D_mean['meanAccuracy'] = mean_accuracy_list
D_mean['repetition'] = range(0,6)
D_mean
plt.figure(figsize=(6,6))
sns.regplot(data=D_mean,
x='repetition',
y='meanAccuracy',
ci = None)
plt.ylim([0.5,1.0])
# ### plot time series during repetition phase
# ##### individual measures across reps (repeated trials only)
# What is the measure that you want to visualize across reps?
dv = 'numCurvesPerSketch'
# +
# D0 = h.ts_repeated(D,
# var=dv,
# limit=20, # recommended limits: 'numStrokes' : 6, 'drawDuration' : 10, 'numCurvesPerSketch' : 20, 'numCurvesPerStroke' : 6
# save_plot=False,
# plot_dir=plot_dir)
# -
# ##### individual measures across reps (repeated + control)
reload(h)
h.ts_repeated_control(D,
var=dv,
numReps = 6,
limit=40, # recommended limits: 'numStrokes' : 6, 'drawDuration' : 10, 'numCurvesPerSketch' : 20, 'numCurvesPerStroke' : 6
save_plot=False,
plot_dir=plot_dir)
# ##### 4 measures (2x2) across reps (repeated + control)
# What are the four measures that you want to visualize across reps?
var0='numStrokes'
var1='drawDuration'
var2='numCurvesPerSketch'
var3='numCurvesPerStroke'
# +
def convert_numeric(X,column_id):
## make numeric types for aggregation
X[column_id] = pd.to_numeric(X[column_id])
return X
### Subhelper 1
def collapse_within_repetition(D, var, condition, numReps):
_D = D[D['condition']==condition]
if condition == 'repeated':
return (_D.groupby(['gameID','repetition','condition','category'])[var].mean()).reset_index()
else:
return ((_D.groupby(['gameID','repetition','condition','category'])[var].mean()).reset_index()).replace(1,numReps-1)
D = convert_numeric(convert_numeric(convert_numeric(convert_numeric(D,var0),var1),var2),var3)
## collapsing across objects within repetition (within pair)
## and only aggregating repeated trials into this sub-dataframe
D0 = collapse_within_repetition(D, var0, 'repeated', 6)
D1 = collapse_within_repetition(D, var1, 'repeated', 6)
D2 = collapse_within_repetition(D, var2, 'repeated', 6)
D3 = collapse_within_repetition(D, var3, 'repeated', 6)
#fig = plt.figure(figsize=(12,12))
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(10,10))
## make sure that the number of timepoints now per gameID is equal to the number of repetitions in the game
num_reps = len(np.unique(D.repetition.values))
assert D0.groupby('gameID')['gameID'].count()[0]==num_reps
sns.lineplot(data=D0,
x='repetition',
hue='category',
#unit='gameID',
y=var0,
ax=ax0,
legend = False)
sns.lineplot(data=D1,
x='repetition',
hue='category',
#unit='gameID',
y=var1,
ax=ax1,
legend = False)
sns.lineplot(data=D2,
x='repetition',
hue='category',
#unit='gameID',
y=var2,
ax=ax2,
legend = False)
sns.lineplot(data=D3,
x='repetition',
hue='category',
#unit='gameID',
y=var3,
ax=ax3)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.xticks(np.arange(np.max(D0['repetition'])+1))
ax3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# -
reload(h)
h.ts_grid_repeated_control(D,
var0, var1, var2, var3,
numReps=6,
save_plot=False,
plot_dir=plot_dir)
# ### compare conditions in pre and post phases
D1 = h.compare_conditions_prepost(D,
var='drawDuration',
limit=20,
save_plot=False,
plot_dir=plot_dir)
# ##### compare accuracy in pre and post phases (repeated vs control conditions)
for i, o in enumerate(D['outcome']):
if o == True:
D.set_value(i, 'outcome', 1)
else:
D.set_value(i, 'outcome', 0)
D['outcome'] = D['outcome'].astype(int)
# +
_D1 = D[D['phase']!='repeated'] ## exclude "repetition-phase" trials
D1 = _D1.groupby(['gameID','phase','condition'])['outcome'].mean()
D1 = D1.reset_index()
plt.figure(figsize=(6,6))
sns.pointplot(data=D1,
x='phase',
y='outcome',
hue='condition',
order=['pre','post'])
plt.ylim([0,1.1])
#plt.savefig(os.path.join(plot_dir,'timeseries_across_reps_{}.pdf'.format(var)))
#return D1
# -
# ### visualize how sketches are changing across repetitions
# +
# print_repeated_sketches(D,
# complete_games,
# sketch_dir)
# +
# print_control_sketches(D,
# complete_games,
# sketch_dir)
# -
# ##### printing actual sketches next to repeated sketches
index = list(range(1, 43))
new_index = filter(lambda x: x%7!=0, index)
for g in complete_games:
print 'Printing out sketches from game: ' + g
trial_types = ['repeated']
for tt in trial_types:
_D = D[(D.condition=='repeated') & (D.gameID==g)]
all_targs = np.unique(_D.target.values) ## use this later to name the file
_D = _D.sort_values(by=['target','repetition'])
_i = 0
textsize=12
fig = plt.figure(figsize=(10,10))
for i,_d in _D.iterrows():
true_index = new_index[_i]
if _i %6 == 0:
target = _d['target']
dir_path = 'chairs1k_pilot'
png_name = target + '.png'
path = os.path.join(dir_path, png_name)
im = Image.open(path)
cropped_im = im.crop((350, 150, 600, 400))
p = plt.subplot(6,7,true_index+6)
plt.imshow(cropped_im)
sns.set_style('white')
k = p.get_xaxis().set_ticklabels([])
k = p.get_yaxis().set_ticklabels([])
k = p.get_xaxis().set_ticks([])
k = p.get_yaxis().set_ticks([])
imgData = _d['png']
filestr = base64.b64decode(imgData)
fname = 'sketch.png'
with open(fname, "wb") as fh:
fh.write(imgData.decode('base64'))
textsize = 16
# first plot the target
im = Image.open(fname)
p = plt.subplot(6,7,true_index)
plt.imshow(im)
sns.set_style('white')
k = p.get_xaxis().set_ticklabels([])
k = p.get_yaxis().set_ticklabels([])
k = p.get_xaxis().set_ticks([])
k = p.get_yaxis().set_ticks([])
outcome = _d['outcome']
category = _d['category']
if outcome == 1:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.4,0.8,0.4))
p.spines[s].set_linewidth(4)
else:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.9,0.2,0.2))
p.spines[s].set_linewidth(4)
if (_i < 6) & (tt in 'repeated'):
plt.title('rep ' + str(_d['repetition']) ,fontsize=textsize)
if _i%6==0:
plt.ylabel(_d['target'] ,fontsize=textsize)
_i = _i + 1
filepath = os.path.join(sketch_dir,'repeated','{}_{}.pdf'.format(g,category))
if not os.path.exists(os.path.join(sketch_dir,'repeated')):
os.makedirs(os.path.join(sketch_dir,'repeated'))
plt.tight_layout()
D_ = D[(D.condition=='control') & (D.gameID=='9276-b328e584-c3fb-4a8f-b5a9-1b5f88292993')]
D__ = D_[(D_.phase == 'post')]
D__['outcome']
# ##### printing last sketch of control condition next to repeated sketches
for g in complete_games:
print 'Printing out sketches from game: ' + g
trial_types = ['repeated']
for tt in trial_types:
_D = D[(D.condition=='repeated') & (D.gameID==g)]
D_ = D[(D.condition=='control') & (D.gameID==g)]
all_targs = np.unique(_D.target.values) ## use this later to name the file
_D = _D.sort_values(by=['target','repetition'])
_i = 0
control_index = 0
textsize=12
fig = plt.figure(figsize=(10,10))
for i,_d in _D.iterrows():
true_index = new_index[_i]
if _i %6 == 0:
# plot last of control sketch
target = _d['target']
D__ = D_[D_.phase == 'post']
imgData = D__['png'].iloc[control_index]
filestr = base64.b64decode(imgData)
fname = 'sketch.png'
with open(fname, "wb") as fh:
fh.write(imgData.decode('base64'))
textsize = 16
# first plot the target
im = Image.open(fname)
p = plt.subplot(6,7,true_index+6)
plt.imshow(im)
if (_i < 6):
plt.title('control' ,fontsize=textsize)
sns.set_style('white')
k = p.get_xaxis().set_ticklabels([])
k = p.get_yaxis().set_ticklabels([])
k = p.get_xaxis().set_ticks([])
k = p.get_yaxis().set_ticks([])
outcome = D__['outcome'].iloc[control_index]
if outcome == 1:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.4,0.8,0.4))
p.spines[s].set_linewidth(4)
else:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.9,0.2,0.2))
p.spines[s].set_linewidth(4)
imgData = _d['png']
filestr = base64.b64decode(imgData)
fname = 'sketch.png'
with open(fname, "wb") as fh:
fh.write(imgData.decode('base64'))
textsize = 16
# first plot the target
im = Image.open(fname)
p = plt.subplot(6,7,true_index)
plt.imshow(im)
sns.set_style('white')
k = p.get_xaxis().set_ticklabels([])
k = p.get_yaxis().set_ticklabels([])
k = p.get_xaxis().set_ticks([])
k = p.get_yaxis().set_ticks([])
outcome = _d['outcome']
category = _d['category']
if outcome == 1:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.4,0.8,0.4))
p.spines[s].set_linewidth(4)
else:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.9,0.2,0.2))
p.spines[s].set_linewidth(4)
if (_i < 6) & (tt in 'repeated'):
plt.title('rep ' + str(_d['repetition']) ,fontsize=textsize)
if _i%6==0:
plt.ylabel(_d['target'] ,fontsize=textsize)
control_index = control_index + 1
_i = _i + 1
filepath = os.path.join(sketch_dir,'repeated','{}_{}.pdf'.format(g,category))
if not os.path.exists(os.path.join(sketch_dir,'repeated')):
os.makedirs(os.path.join(sketch_dir,'repeated'))
plt.tight_layout()
# ##### printing control sketches
# +
_valid_gameids = complete_games
for g in _valid_gameids:
print 'Printing out sketches from game: ' + g
trial_types = ['control']
for tt in trial_types:
_D = D[(D.condition=='control') & (D.gameID==g)]
all_targs = np.unique(_D.target.values) ## use this later to name the file
_D = _D.sort_values(by=['target','repetition'])
_i = 1
textsize=12
fig = plt.figure(figsize=(5,10))
for i,_d in _D.iterrows():
imgData = _d['png']
filestr = base64.b64decode(imgData)
fname = 'sketch.png'
with open(fname, "wb") as fh:
fh.write(imgData.decode('base64'))
textsize = 16
# first plot the target
im = Image.open(fname)
p = plt.subplot(6,2,_i)
plt.imshow(im)
sns.set_style('white')
k = p.get_xaxis().set_ticklabels([])
k = p.get_yaxis().set_ticklabels([])
k = p.get_xaxis().set_ticks([])
k = p.get_yaxis().set_ticks([])
outcome = _d['outcome']
category = _d['category']
if outcome == 1:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.4,0.8,0.4))
p.spines[s].set_linewidth(4)
else:
sides = ['bottom','top','right','left']
for s in sides:
p.spines[s].set_color((0.9,0.2,0.2))
p.spines[s].set_linewidth(4)
if (_i-1 < 2) & (tt in 'control'):
plt.title('rep ' + str(_d['repetition']) ,fontsize=textsize)
if (_i-1)%2==0:
plt.ylabel(_d['target'] ,fontsize=textsize)
_i = _i + 1
filepath = os.path.join(sketch_dir,'control','{}_{}.pdf'.format(g,category))
if not os.path.exists(os.path.join(sketch_dir,'control')):
os.makedirs(os.path.join(sketch_dir,'control'))
#plt.savefig(os.path.join(sketch_dir,'control',filepath))
#plt.close(fig)
# -
# ### WORKING AREA
# +
## collapsing across objects within repetition (within pair)
D0_repeated = (D_repeated.groupby(['gameID','repetition','condition'])[var0].mean()).reset_index()
D1_repeated = (D_repeated.groupby(['gameID','repetition','condition'])[var1].mean()).reset_index()
D2_repeated = (D_repeated.groupby(['gameID','repetition','condition'])[var2].mean()).reset_index()
D3_repeated = (D_repeated.groupby(['gameID','repetition','condition'])[var3].mean()).reset_index()
D0_control = ((D_control.groupby(['gameID','repetition','condition'])[var0].mean()).reset_index()).replace(1, 7) # rescale control reps
D1_control = ((D_control.groupby(['gameID','repetition','condition'])[var1].mean()).reset_index()).replace(1, 7) # rescale control reps
D2_control = ((D_control.groupby(['gameID','repetition','condition'])[var2].mean()).reset_index()).replace(1, 7) # rescale control reps
D3_control = ((D_control.groupby(['gameID','repetition','condition'])[var3].mean()).reset_index()).replace(1, 7) # rescale control reps
# +
# sns.tsplot(data=D0_repeated,
# time='repetition',
# unit='gameID',
# value=var0,
# ax=ax0)
# sns.tsplot(data=D0_control,
# time='repetition',
# unit='gameID',
# value=var0,
# err_style='ci_bars',
# interpolate=False,
# ax=ax0,
# color='r')
# sns.tsplot(data=D1_repeated,
# time='repetition',
# unit='gameID',
# value=var1,
# ax=ax1)
# sns.tsplot(data=D1_control,
# time='repetition',
# unit='gameID',
# value=var1,
# err_style='ci_bars',
# interpolate=False,
# ax=ax1,
# color='r')
# sns.tsplot(data=D2_repeated,
# time='repetition',
# unit='gameID',
# value=var2,
# ax=ax2)
# sns.tsplot(data=D2_control,
# time='repetition',
# unit='gameID',
# value=var2,
# err_style='ci_bars',
# interpolate=False,
# ax=ax2,
# color='r')
# sns.tsplot(data=D3_repeated,
# time='repetition',
# unit='gameID',
# value=var3,
# ax=ax3)
# sns.tsplot(data=D3_control,
# time='repetition',
# unit='gameID',
# value=var3,
# err_style='ci_bars',
# interpolate=False,
# ax=ax3,
# color='r')
# plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# ax0.set(xlim=(-0.5, 7.5), xticks=range(0,8))
# ax1.set(xlim=(-0.5, 7.5), xticks=range(0,8))
# ax2.set(xlim=(-0.5, 7.5), xticks=range(0,8))
# ax3.set(xlim=(-0.5, 7.5), xticks=range(0,8))
# +
# for i in range(0, 4):
# var = 'var'+ i
# print var
# 'D' + str(i) + '_' + condition = collapse_within_repetition(D, var, condition)
# +
def convert_numeric(X,column_id):
## make numeric types for aggregation
X[column_id] = pd.to_numeric(X[column_id])
return X
D = convert_numeric(convert_numeric(convert_numeric(convert_numeric(D,var0),var1),var2),var3)
def collapse_within_repetition(D, var, condition):
_D = D[D['condition']==condition]
if condition == 'repeated':
return (_D.groupby(['gameID','repetition','condition'])[var].mean()).reset_index()
else:
return ((_D.groupby(['gameID','repetition','condition'])[var].mean()).reset_index()).replace(1,7)
def plot_repeated_control(D_repeated, D_control, var, ax):
sns.tsplot(data=D_repeated,
time='repetition',
unit='gameID',
value=var,
ax=ax)
sns.tsplot(data=D_control,
time='repetition',
unit='gameID',
value=var,
err_style='ci_bars',
interpolate=False,
ax=ax,
color='r')
ax.set(xlim=(-0.5, 7.5), xticks=range(0,8))
# +
_newD = pd.DataFrame()
target = d['target']
link = 'https://s3.amazonaws.com/shapenet-graphical-conventions/52f0514f5c38bc96f51f77a6d7299806.png'
extra_row = [link] * D.shape[1]
extra_df = pd.DataFrame(extra_row, index = list(D.columns.values))
extra_df = extra_df.transpose()
extra_df
_newD = pd.concat([_newD, extra_df])
# -
newD = pd.DataFrame()
for i, d in D.iterrows():
d = d.to_frame()
d = d.transpose()
if i%6 == 0 & i != 0:
newD = pd.concat([newD, d])
target = d['target']
link = 'https://s3.amazonaws.com/shapenet-graphical-conventions/52f0514f5c38bc96f51f77a6d7299806.png'
extra_row = [link] * D.shape[1]
extra_df = pd.DataFrame(extra_row, index = list(D.columns.values))
extra_df = extra_df.transpose()
newD = pd.concat([newD, extra_df])
else:
newD = pd.concat([newD, d])
# # Miscellaneous
# +
#seaborn plotting
d = pd.DataFrame()
numReps = 7
variable = 'numStrokes'
# repeated conditions
criteria_repeated = (D['condition']=='repeated')
repeated = D[criteria_repeated] # get all repeated condition trials
repeats = list(range(0, numReps)) # number of repeats
repeated_means = []
for rep in repeats:
mean = ((repeated[(repeated['repetition'] == rep)])[variable]).mean() # get mean across trials with same repetition number
repeated_means.append(mean)
# control conditions
criteria_control = (D['condition']=='control')
control = D[criteria_control] # get all control condition trials
controls = [0, numReps - 1] # number of controls scaled up to total number of repeats
control_means= []
for i in [0, 1]: # only 2 controls, but should scale up to numReps
mean = ((control[(control['repetition'] == i)])[variable]).mean() # get mean across trials with same repetition number
control_means.append(mean)
d['numReps'] = repeats
d[variable] = repeated_means
#d['control'] = control_means
facet = sns.lmplot(data=d, x='numReps', y=variable, fit_reg=False)
# add error bars
# -
print repeated_strokes.mean(), control_strokes.mean()
print repeated_svgLength.mean(), control_svgLength.mean()
print repeated_svgStd.mean(), control_svgStd.mean()
print repeated_svgLengthPS.mean(), control_svgLengthPS.mean()
print repeated_drawDuration.mean(), control_drawDuration.mean()
print repeated_accuracy.mean(), control_accuracy.mean()
fig = plt.figure(figsize=(10,10))
plt.subplot(2,2,1)
lb = 0
ub = 16
plt.plot([lb,ub],[lb,ub],'k--')
plt.scatter(control_strokes,repeated_strokes,64,(0.8,0.4,0.4))
plt.xlim([lb,ub])
plt.ylim([lb,ub])
plt.title('number of strokes')
plt.xlabel('control')
plt.ylabel('repeated')
plt.subplot(2,2,2)
lb = 0
ub = 3000
plt.plot([lb,ub],[lb,ub],'k--')
plt.scatter(control_svgLength,repeated_svgLength,64,(0.8,0.4,0.4))
plt.xlim([lb,ub])
plt.ylim([lb,ub])
plt.tight_layout()
plt.title('svg string length')
plt.xlabel('control')
plt.ylabel('repeated')
plt.subplot(2,2,3)
lb = 0
ub = 300
plt.plot([lb,ub],[lb,ub],'k--')
plt.scatter(control_svgStd,repeated_svgStd,64,(0.8,0.4,0.4))
plt.xlim([lb,ub])
plt.ylim([lb,ub])
plt.title('stroke variability')
plt.xlabel('control')
plt.ylabel('repeated')
plt.subplot(2,2,4)
lb = 0
ub = 600
plt.plot([lb,ub],[lb,ub],'k--')
plt.scatter(control_svgLengthPS,repeated_svgLengthPS,64,(0.8,0.4,0.4))
plt.xlim([lb,ub])
plt.ylim([lb,ub])
plt.tight_layout()
plt.title('svg length per stroke')
plt.xlabel('control')
plt.ylabel('repeated')
a = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'clickedObj'}, {'time':{'$gt':1531160581750, '$lt': 1531161932801}}]}).sort('time')
for rec in a:
print rec['phase'], rec['condition'],rec['repetition']
| analysis/ipynb/jefan/run2_chairs1k_size6_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All of these python notebooks are available at [https://gitlab.erc.monash.edu.au/andrease/Python4Maths.git]
# </i></small></small>
# # Getting started
#
# Python can be used like a calculator. Simply type in expressions to get them evaluated.
#
# ## Basic syntax for statements
# The basic rules for writing simple statments and expressions in Python are:
# 1. No **spaces or tab characters allowed at the start** of a statement: Indentation plays a special role in Python (see the section on control statements). For now simply ensure that all statements start at the beginning of the line.
# 2. The '**#**' character indicates that the rest of the line is a **comment**
# 3. Statements **finish at the end of the line**:
# * **Except** when there is an open bracket or paranthesis:
# ```python
# 1+2
# +3 #illegal continuation of the sum
# (1+2
# + 3) # perfectly OK even with spaces
# ```
# * **Or** when there is a single backslash at the end of the line. It is used to indicate that a statement is still incomplete
# ```python
# 1 + \
# 2 + 3 # this is also OK
# ```
# The jupyter notebook system for writting Python intersperses text (like this) with Python statements. Try typing something into the cell (box) below and press the 'run cell' button above (triangle+line symbol) to execute it.
1+2+3
# Python has extensive help built in. You can execute **help()** for an overview or **help(x)** for any library, object or type **x** to get more information. For example:
help()
# # Variables & Values
# A name that is used to denote something or a value is called a variable. In python, variables can be declared and values can be assigned to it as follows,
x = 2 # anything after a '#' is a comment
y = 5
xy = 'Hey'
print(x+y, xy) # not really necessary as the last value in a bit of code is displayed by default
# Multiple variables can be assigned with the same value.
x = y = 1
print(x,y)
# The basic types build into Python include `float` (floating point numbers), `int` (integers), `str` (unicode character strings) and `bool` (boolean). Some examples of each:
2.0 # a simple floating point number
1e100 # a googol
-1234567890 # an integer
True or False # the two possible boolean values
'This is a string'
"It's another string"
print("""Triple quotes (also with '''), allow strings to break over multiple lines.
Alternatively \n is a newline character (\t for tab, \\ is a single backslash)""")
# Python also has complex numbers that can be written as follows. Note that the brackets are required.
complex(1,2)
(1+2j) # the same number as above
# # Operators
# ## Arithmetic Operators
# | Symbol | Task Performed |
# |----|---|
# | + | Addition |
# | - | Subtraction |
# | / | division |
# | % | mod |
# | * | multiplication |
# | // | floor division |
# | ** | to the power of |
1+2
2-1
1*2
3/4
# In many languages (and older versions of python) 1/2 = 0 (truncated division). In Python 3 this behaviour is captured by a separate operator that rounds down: (ie a // b$=\lfloor \frac{a}{b}\rfloor$)
3//4.0
15%10
# Python natively allows (nearly) infinite length integers while floating point numbers are double precision numbers:
11**300
11.0**300
# ## Relational Operators
# | Symbol | Task Performed |
# |----|---|
# | == | True, if it is equal |
# | != | True, if not equal to |
# | < | less than |
# | > | greater than |
# | <= | less than or equal to |
# | >= | greater than or equal to |
#
# Note the difference between `==` (equality test) and `=` (assignment)
z = 2
z == 2
z > 2
# Comparisons can also be chained in the mathematically obvious way. The following will work as expected in Python (but not in other languages like C/C++):
0.5 < z <= 1
# ## Boolean and Bitwise Operators
# |Operator|Meaning | | Symbol | Task Performed |
# |----|--- | |----|---|
# |`and`| Logical and | | & | Bitwise And |
# |`or` | Logical or | | $\mid$ | Bitwise OR |
# | | | | ^ | Exclusive or |
# |`not` | Not | | ~ | Negate |
# | | | | >> | Right shift |
# | | | | << | Left shift |
a = 2 #binary: 10
b = 3 #binary: 11
print('a & b =',a & b,"=",bin(a&b))
print('a | b =',a | b,"=",bin(a|b))
print('a ^ b =',a ^ b,"=",bin(a^b))
print( not (True and False), "==", not True or not False)
# # Built-in Functions
# Python comes with a wide range of functions. However many of these are part of stanard libraries like the `math` library rather than built-in.
# ## Converting values
#
# Conversion from hexadecimal to decimal is done by adding prefix **0x** to the hexadecimal value or vice versa by using built in **hex( )**, Octal to decimal by adding prefix **0** to the octal value or vice versa by using built in function **oct( )**.
hex(170)
0xAA
# **int( )** converts a number to an integer. This can be a single floating point number, integer or a string. For strings the base can optionally be specified:
print(int(7.7), int('111',2),int('7'))
# Similarly, the function **str( )** can be used to convert almost anything to a string
print(str(True),str(1.2345678),str(-2))
# ## Mathematical functions
# Mathematical functions include the usual suspects like logarithms, trigonometric fuctions, the constant $\pi$ and so on.
import math
math.sin(math.pi/2)
from math import * # avoid having to put a math. in front of every mathematical function
sin(pi/2) # equivalent to the statement above
# ## Simplifying Arithmetic Operations
# **round( )** function rounds the input value to a specified number of places or to the nearest integer.
print( round(5.6231) )
print( round(4.55892, 2) )
# **complex( )** is used to define a complex number and **abs( )** outputs the absolute value of the same.
c =complex('5+2j')
print( abs(c) )
# **divmod(x,y)** outputs the quotient and the remainder in a tuple(you will be learning about it in the further chapters) in the format (quotient, remainder).
divmod(9,2)
# ## Accepting User Inputs
# **input(prompt)**, prompts for and returns input as a string. A useful function to use in conjunction with this is **eval()** which takes a string and evaluates it as a python expression.
abc = input("abc = ")
abcValue=eval(abc)
print(abc,'=',abcValue)
| Documentos.bak/01-python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports and Constants
# +
# File system
import os, os.path
import pickle
from glob import glob
import sys
# Workflow
import random
import tqdm
import tqdm.notebook
# Computation
import numpy as np
import torch
import torch.nn as nn
# Data visualization
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
sns.set(rc={"figure.dpi":100, 'savefig.dpi':100})
sns.set_context('notebook')
torch.__version__
# +
# Keys to the pickle objects
CITY = 'city'
LANE = 'lane'
LANE_NORM = 'lane_norm'
SCENE_IDX = 'scene_idx'
AGENT_ID = 'agent_id'
P_IN = 'p_in'
V_IN = 'v_in'
P_OUT = 'p_out'
V_OUT = 'v_out'
CAR_MASK = 'car_mask'
TRACK_ID = 'track_id'
# Encode Miami as 0, Pittsburgh as 1
MIA = 'MIA'
PIT = 'PIT'
CITY_MAP = {MIA: 0, PIT: 1}
# Transformed data keys
LANE_IN = 'closest_lanes_in'
NORM_IN = 'closest_lane_norms_in'
LANE_OUT = 'closest_lanes_out'
NORM_OUT = 'closest_lane_norms_out'
# +
# Dataset variables
VALIDATION_PATH = './val_data/'
ORIGINAL_PATH = './original_train_data/'
TRANSFORMED_TRAIN_PATH = './transformed_train_data'
TRANSFORMED_VAL_PATH = './transformed_val_data'
train_path = TRANSFORMED_TRAIN_PATH
val_path = TRANSFORMED_VAL_PATH
# Path to model predictions on test set
PREDICTION_PATH = './my_submission.csv'
# Header of predictions CSV file
CSV_HEADER = ['ID,'] + ['v' + str(i) + ',' for i in range(1, 60)] + ['v60', '\n']
# Get list of all training file names
original_files = glob(os.path.join(train_path, '*'))
# train-test split
TRAIN_TEST_RATIO = 0.8 # Percent that are train files
train_files = random.sample(original_files, int(len(original_files) * TRAIN_TEST_RATIO))
test_files = list(set(original_files) - set(train_files))
# Validation
val_files = glob(os.path.join(val_path, '*'))
# +
# Control variables
NUM_AGENTS = 1
IN_LEN = 19
OUT_LEN = 30
# Controls how many features to use
N_FEAT_IN = 4
N_FEAT_OUT = 2
# Batch variables
BATCH_SIZE_TRAIN = 64
BATCH_SIZE_TEST = BATCH_SIZE_TRAIN
BATCH_SIZE_VAL = 32
N_WORKERS = 4
# Windows doesn't support anything but N_WORKERS = 0 for the DataLoader
if 'win' in sys.platform:
N_WORKERS = 0
N_WORKERS
# -
# # Dataset Loading and Batching
class ArgoverseDataset(torch.utils.data.Dataset):
def __init__(self, files):
super(ArgoverseDataset, self).__init__()
self.files = files
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
pkl_path = self.files[idx]
with open(pkl_path, 'rb') as f:
data = pickle.load(f)
return data
# +
def collate_train_test(batch):
"""
Custom collate_fn function to be used for DataLoader.
"""
inp = []
out = []
agent_idxs = []
scene_idxs = []
city_ids = []
for scene in batch:
# Get the target agent id
agent_id = scene[AGENT_ID]
# Get the matrix of all agents
track_id = scene[TRACK_ID]
# Get the location of the target agent in the matrix
idx = np.nonzero(track_id[:, 0] == agent_id)[0][0]
# Number of time steps in the input and output sequences
inlen = scene[P_IN].shape[1]
outlen = scene[P_OUT].shape[1]
# Aliases of scene variables for convenience
pin, pout, vin, vout = scene[P_IN], scene[P_OUT], scene[V_IN], scene[V_OUT]
lanein, laneout, normin, normout = scene[LANE_IN], scene[LANE_OUT], scene[NORM_IN], scene[NORM_OUT]
if scene[CITY] == MIA:
cities = np.zeros((inlen, 1))
else:
cities = np.ones((inlen, 1))
# Use this line to include all input features
# inp_tens = np.concatenate((pin[idx], vin[idx], lanein[idx], normin[idx], cities), axis=1)
# Lane only
# inp_tens = np.concatenate((pin[idx], vin[idx], lanein[idx], normin[idx]), axis=1)
# Only include cities
# inp_tens = np.concatenate((pin[idx], vin[idx], cities), axis=1)
# Only include pos/vel
inp_tens = np.concatenate((pin[idx], vin[idx]), axis=1)
# inp_tens = np.concatenate((pin, vin), axis=2)
# All output features
# out_tens = np.concatenate((pout[idx], vout[idx], laneout[idx], normout[idx]), axis=1)
# Only include output position and velocity
# out_tens = np.concatenate((pout[idx], vout[idx]), axis=1)
out_tens = pout[idx]
# out_tens = pout
inp.append(inp_tens)
out.append(out_tens)
inp = torch.FloatTensor(inp)
out = torch.FloatTensor(out)
return [inp, out, scene_idxs, agent_idxs, city_ids]
# +
def collate_val(batch):
"""
Custom collate_fn for validation dataset. The validation data do not contain output values.
"""
inp = []
scene_idxs = []
agent_idxs = []
city_ids = []
for scene in batch:
# Get the target agent id
agent_id = scene[AGENT_ID]
# Get the matrix of all agents
track_id = scene[TRACK_ID]
# Get the location of the target agent in the matrix
idx = np.nonzero(track_id[:, 0] == agent_id)[0][0]
inlen = scene[P_IN].shape[1]
# Aliases of scene variables for convenience
pin, vin = scene[P_IN], scene[V_IN]
lanein, normin = scene[LANE_IN], scene[NORM_IN]
num_agents = scene[P_IN].shape[0]
if scene[CITY] == MIA:
cities = np.zeros((inlen, 1))
else:
cities = np.ones((inlen, 1))
# All
# inp_tens = np.concatenate((pin[idx], vin[idx], lanein[idx], normin[idx], cities), axis=1)
# Lane only
# inp_tens = np.concatenate((pin[idx], vin[idx], lanein[idx], normin[idx]), axis=1)
# Cities
# inp_tens = np.concatenate((pin[idx], vin[idx], cities), axis=1)
# Pos/vel
inp_tens = np.concatenate((pin[idx], vin[idx]), axis=1)
# inp_tens = np.concatenate((pin, vin), axis=2)
inp.append(inp_tens)
scene_idxs.append(scene[SCENE_IDX])
agent_idxs.append(idx)
city_ids.append(CITY_MAP[scene[CITY]])
inp = torch.FloatTensor(inp)
return [inp, scene_idxs, agent_idxs, city_ids]
# +
# Initiliaze datasets and loaders
train_dataset = ArgoverseDataset(train_files)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE_TRAIN,
shuffle=True, collate_fn=collate_train_test,
num_workers=N_WORKERS)
test_dataset = ArgoverseDataset(test_files)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE_TEST,
shuffle=False, collate_fn=collate_train_test,
num_workers=N_WORKERS)
val_dataset = ArgoverseDataset(val_files)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=BATCH_SIZE_VAL,
shuffle=False, collate_fn=collate_val,
num_workers=N_WORKERS)
# -
# Look at one data sample
for _, (data, target, agent_idxs, scene_idxs, cities) in enumerate(train_loader):
print(data.shape)
print(target.shape)
break
# Look at one data sample
for _, (data, agent_idxs, scene_idxs, masks) in enumerate(val_loader):
print(data.shape)
break
# # Training Workflow
def train(model, device, train_loader, optimizer, epoch):
# Set the model into training mode
model.train()
# Define the loss function.
# criterion = torch.nn.MSELoss(reduction='mean')
criterion = torch.nn.SmoothL1Loss(reduction='mean')
total_loss = 0
for i in range(epoch):
iterator = tqdm.notebook.tqdm(train_loader, total=int(len(train_loader)))
for _, batch in enumerate(iterator):
data, target, scene_idxs, agent_idxs, masks = batch
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
out = model(data)
loss = torch.sqrt(criterion(out, target))
total_loss += loss.item()
# Perform backpropagation
loss.backward()
# Update the weights
optimizer.step()
# Update the progress bar for tqdm
iterator.set_postfix(train_loss=loss.item(), rmse=torch.sqrt(nn.functional.mse_loss(out, target)).item())
return (total_loss * BATCH_SIZE_TRAIN) / len(train_files)
def test(model, device, test_loader):
model.eval()
# criterion = torch.nn.MSELoss(reduction='mean')
criterion = torch.nn.SmoothL1Loss(reduction='mean')
iterator = tqdm.notebook.tqdm(test_loader, total=int(len(test_loader)))
total_loss = 0
for _, batch in enumerate(iterator):
data, target, scene_idxs, agent_idxs, masks = batch
data, target = data.to(device), target.to(device)
with torch.no_grad():
out = model(data)
loss = torch.sqrt(criterion(out, target))
iterator.set_postfix(test_loss=loss.item(), rmse=torch.sqrt(nn.functional.mse_loss(out, target)).item())
total_loss += loss.item()
return (total_loss * BATCH_SIZE_TEST) / len(test_files)
def train_test(model, device, train_loader, test_loader, optimizer, NUM_EPOCH):
for t in range(1, NUM_EPOCH + 1):
train_loss = train(model, device, train_loader, optimizer, 1)
test_loss = test(model, device, test_loader)
train_losses.append(train_loss)
test_losses.append(test_loss)
print(f'Epoch {t}: train_loss = {train_loss}, test_loss = {test_loss}')
def validate(model, device, val_loader, path):
"""
path: path to csv file to write predictions
"""
model.eval()
# Prep the output file
with open(PREDICTION_PATH, "w") as csv_file:
# Clear the csv file before appending data to it
csv_file.truncate()
# Write the header to the csv file
csv_file.writelines(CSV_HEADER)
# Make predictions
with open(path, "a") as pred_file:
iterator = tqdm.notebook.tqdm(val_loader, total=int(len(val_loader)))
for _, batch in enumerate(iterator):
data, scene_idxs, agent_idxs, masks = batch
data = data.to(device)
with torch.no_grad():
output = model(data)
# Convert the Tensor from GPU -> CPU -> NumPy array
np_out = output.cpu().detach().numpy()
# Store only the predictions for the target agent and keep the positions, not the velocities
batch_size = np_out.shape[0]
pred = np.zeros((batch_size, 60))
# The output should be a (batch size, time steps, num features out) tensor
# where the first two features are the input position and the output position
for i in range(batch_size):
pred[i] = np_out[i, :, :2].flatten()
# Form comma-separated string
s = []
for i in range(pred.shape[0]):
s.append(','.join([str(scene_idxs[i])] + [str(v) for v in pred[i]]) + '\n')
# Write data to file
pred_file.writelines(s)
# # Model Initialization
# +
# class ArgoNet(torch.nn.Module):
# """
# Neural Network class - CNN with 1 conv and 1 linear layer
# """
# def __init__(self, device):
# super(ArgoNet, self).__init__()
# self.device = device
# self.hid = 12
# self.conv = nn.Sequential(
# nn.Conv1d(IN_LEN, self.hid, 1),
# nn.SELU()
# )
# self.fc = nn.Sequential(
# nn.Linear(self.hid * N_FEAT_IN, OUT_LEN * N_FEAT_OUT),
# )
# def forward(self, x):
# x = self.conv(x)
# x = x.view(x.shape[0], self.hid * N_FEAT_IN)
# x = self.fc(x)
# x = x.view(x.shape[0], OUT_LEN, N_FEAT_OUT)
# return x
# -
class ArgoNet(torch.nn.Module):
"""
Neural Network class - linear regression
"""
def __init__(self, device):
super(ArgoNet, self).__init__()
self.device = device
self.fc = nn.Linear(NUM_AGENTS * IN_LEN * N_FEAT_IN, 1 * OUT_LEN * N_FEAT_OUT)
def forward(self, x):
x = x.view(x.size(0), -1) # flatten result
x = self.fc(x)
print(self.fc.weight.shape)
raise ValueError
return x
# +
# class ArgoNet(torch.nn.Module):
# """
# Neural Network class - CNN with 1 conv and 2 linear layers
# """
# def __init__(self, device):
# super(ArgoNet, self).__init__()
# self.device = device
# self.h1 = 8
# self.h2 = 64
# self.conv = nn.Sequential(
# nn.Conv1d(IN_LEN, self.h1, 1),
# nn.ReLU()
# )
# self.fc = nn.Sequential(
# nn.Linear(self.h1 * N_FEAT_IN, self.h2),
# nn.ReLU(),
# nn.Linear(self.h2, OUT_LEN * N_FEAT_OUT)
# )
# def forward(self, x):
# x = self.conv(x)
# x = x.view(x.shape[0], self.h1 * N_FEAT_IN)
# x = self.fc(x)
# x = x.view(x.shape[0], OUT_LEN, 2)
# return x
# +
# class ArgoNet(torch.nn.Module):
# """
# Neural Network class - CNN with multiple conv
# """
# def __init__(self, device):
# super(ArgoNet, self).__init__()
# self.device = device
# self.hidden_size = IN_LEN * 4
# self.outch1 = IN_LEN * 4
# self.outch2 = OUT_LEN
# self.conv = nn.Sequential(
# nn.Conv1d(IN_LEN, self.outch1, 1),
# nn.ReLU(),
# nn.Conv1d(self.outch1, self.outch2, 1),
# nn.ReLU()
# )
# self.fc = nn.Linear(self.outch2 * N_FEAT_IN, OUT_LEN * 2)
# def forward(self, x):
# x = self.conv(x)
# # print(x.shape)
# # raise ValueError
# x = x.view(x.shape[0], self.hidden_size * N_FEAT_OUT)
# x = self.fc(x)
# x = x.view(x.shape[0], OUT_LEN, 2)
# # print(x.shape)
# # raise ValueError
# return x
# -
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
DEVICE
# +
# Code to save and reload a model
MODEL_STATE = 'model_state_dict'
OPTIMIZER_STATE = 'optimizer_state_dict'
EPOCH_STATE = 'epoch'
LOSS_STATE = 'loss'
BATCH_STATE = 'batch'
def save_model(path, model_state_dict, optimizer_state_dict, epoch, loss, batch):
to_save = {
MODEL_STATE: model_state_dict,
OPTIMIZER_STATE: optimizer_state_dict,
EPOCH_STATE: epoch,
LOSS_STATE: loss,
BATCH_STATE: batch
}
torch.save(to_save, path)
def load_model(path, model_to_load, optimizer_to_load):
checkpoint = torch.load(path)
model_to_load.load_state_dict(checkpoint[MODEL_STATE])
optimizer_to_load.load_state_dict(checkpoint[OPTIMIZER_STATE])
return checkpoint[EPOCH_STATE], checkpoint[LOSS_STATE], checkpoint[BATCH_STATE]
# +
model = ArgoNet(DEVICE).to(DEVICE)
learning_rate = 1e-2
weight_decay = 0.001
optimizer = torch.optim.Adam(model.parameters()) # Best optimizer so far
# optimizer = torch.optim.AdamW(model.parameters()) # Can't decrease training loss below 4
# optimizer = torch.optim.RMSprop(model.parameters()) # Doesn't converge w/o tuning
# momentum = 0.9
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum) # Doesn't converge w/o tuning
# optimizer = torch.optim.Adagrad(model.parameters()) # Better than others, not as good as Adam
num_params = sum(p.numel() for p in model.parameters())
print(f"Number of model parameters is {num_params}")
NUM_EPOCH = 1
# used for visualizing the loss
train_losses = []
test_losses = []
# +
# # Use these two lines to save a model
# save_model('conv_hid12_adam_epoch2_selu_batch1.tar', model.state_dict(), optimizer.state_dict(), NUM_EPOCH,
# (train_losses[-1], test_losses[-1]), BATCH_SIZE_TRAIN)
# +
# Reload a model
# model = ArgoNet(DEVICE).to(DEVICE)
# optimizer = torch.optim.Adam(model.parameters())
# load_model('conv_3_adam_epoch_posvel_outpos_only_wd_001.tar', model, optimizer)
# train_losses.clear()
# test_losses.clear()
# num_params = sum(p.numel() for p in model.parameters())
# print(f"Number of model parameters is {num_params}")
# -
# # Evaluation
train_test(model, DEVICE, train_loader, test_loader, optimizer, NUM_EPOCH)
train_test(model, DEVICE, train_loader, test_loader, optimizer, 1)
train_test(model, DEVICE, train_loader, test_loader, optimizer, 1)
train_test(model, DEVICE, train_loader, test_loader, optimizer, 1)
train_test(model, DEVICE, train_loader, test_loader, optimizer, 1)
train_test(model, DEVICE, train_loader, test_loader, optimizer, 1)
validate(model, DEVICE, val_loader, PREDICTION_PATH)
# # Loss Visualization
def visualize_loss(losses):
"""
Plots the losses over each training iteration.
Assumes that each element of the 'losses' list corresponds to the loss after each batch of train()
"""
t_iter = np.arange(1, len(losses) + 1, 1, dtype=int)
ax = sns.scatterplot(x=t_iter, y=losses, alpha=0.5)
ax.set_xlabel('Batch iteration number')
ax.set_ylabel('Root-mean-square loss')
ax.set_title('Batch Iteration vs. Root-Mean-Square Loss')
plt.savefig('lossViter')
visualize_loss(train_losses)
# # Ground Truth Comparison
def visualize_predictions(model, device, loader):
"""
Compares some randomly selected data samples to the model's predictions
"""
model.eval()
# Get a batch of data
_, (inp, out, scene_idxs, agent_idxs, masks) = next(enumerate(loader))
# Move tensors to chosen device
inp, out = inp.to(device), out.to(device)
# Sample number
i = 0
# Scene idx
scene_idx = scene_idxs[i]
# Get contiguous arrays of the ground truth output positions
truth = target[i].cpu().detach().numpy()
x = truth[:, 0]
y = truth[:, 1]
# Get contiguous arrays of the prediction output positions
output = model(inp)
pred = output[i].cpu().detach().numpy()
xh = pred[:, 0]
yh = pred[:, 0]
# Plot the ground truth and prediction positions
fig, (ax) = plt.subplots(nrows=1, ncols=1, figsize=(3, 3))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('Scene ' + str(scene_idx))
ax.scatter(x, y, label='Ground Truth')
ax.scatter(xh, yh, label='Prediction')
ax.legend()
visualize_predictions(model, DEVICE, train_loader)
| previous/argoverse_cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Consistency check between the MAREDAT data and cyanobacteria abundance data
# We use a recent study by [Flombaum et al.](http://dx.doi.org/10.1073/pnas.1307701110) which estimated the total number of cyanobacteria worldwide. Flombaum et al. estimate ≈$3×10^{27}$ Prochlorococcus cells and ≈$7×10^{26}$ Synechococcus cells.
#
# In order to estimte the total biomass of cyanobacteria, we use data from [Buitenhuis et al](https://ueaeprints.uea.ac.uk/40778/), to estimate the carbon content of Prochlorococcus and Synechococcus. Buitenhuis et al. reports values from the literature on the carbon content of Prochlorococcus and Synechococcus. We use the geometric mean of the estimates from different studies as our best estimate of the carbon content of Prochlorococcus and Synechococcus:
# +
import pandas as pd
import numpy as np
from scipy.stats import gmean
# Load data from Buitenhuis et al.
carbon_content = pd.read_excel('cyanobacteria_data.xlsx',skiprows=1)
# Calculate the geometric mean of the carbon content of Prochlorococcus and Synechococcus
pro_cc = gmean(carbon_content['Prochlorococcus [fg C cell^-1]'].dropna())*1e-15
syn_cc = gmean(carbon_content['Synechococcus [fg C cell^-1]'].dropna())*1e-15
# -
# We multiply the total number of cells of Prochlorococcus and Synechococcus by the carbon content of Prochlorococcus and Synechococcus to estimate their total biomass. The total biomass of cyanobacteria is the sum of the total biomass of Prochlorococcus and Synechococcus:
# +
# The total number of Prochlorococcus and Synechococcus from Flombaum et al.
pro_cell_num = 3e27
syn_cell_num = 7e26
# Calculate the total biomass of Prochlorococcus and Synechococcus
pro_tot_biomass = pro_cc*pro_cell_num
syn_tot_biomass = syn_cc*syn_cell_num
# Calculate the total biomass of cyanobacteria
cyano_biomass = pro_tot_biomass + syn_tot_biomass
print('The total biomass of cyanobacteria is ≈%.1f Gt C' %(cyano_biomass/1e15))
# -
# We note in the section detailing our estimate of the total biomass of marine protists that the total biomass of picophytoplankton based on the MAREDAT database is ≈0.42 Gt C. Buithenhuis et al. estimates based on data from the MAREDAT database that cyanobacteria account for 31-51% out of the total biomass of picophytoplankton, which are equivalent to:
# +
# The estimate of the biomass of picophytoplankton based on MAREDAT data
picophyto_biomass = 0.42e15
# The fraction of cyanobacteria out of the total biomass of picophytoplankton based
# on MAREDAT data
cyano_fraction = [0.31,0.51]
# The estimate of the total biomass of cyanobacteria
cyano_maredat = picophyto_biomass*np.mean(cyano_fraction)
print('The estimate of the biomass of cyanobacteria based on the MAREDAT database is %.1f Gt C' %(cyano_maredat/1e15))
# -
# The estimate based on the data from Flumbaum et al. and the estimate based on the MAREDAT database are less than 2-fold apart.
| MAREDAT_consistency_check/.ipynb_checkpoints/cyanobacteria_consistency_check-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import psycopg2
## Connect to SQL database
conn = psycopg2.connect(database = 'cuzegotk',user = 'cuzegotk',password = '<PASSWORD>',host = 'raja.db.elephantsql.com')
cur = conn.cursor()
## Get Joined table of the company financials, and ratings
cur.execute('SELECT companyfinancials.ticker, year, last_year, revenue, revenue_ly, cogs, cogs_ly, gross_profit, gross_profit_ly, operating_expense, operating_expense_ly, operating_profit, operating_profit_ly, net_profit, net_profit_ly, rating FROM companyfinancials JOIN companyratings ON companyfinancials.ticker = companyratings.ticker')
financials_a = cur.fetchall()
cur.close()
conn.close()
## Connect to SQL database
conn = psycopg2.connect(database = 'cuzegotk',user = 'cuzegotk',password = '<PASSWORD>',host = 'raja.db.elephantsql.com')
cur = conn.cursor()
## Get Joined table of the company balance sheets, quarter-ends, and shares_outstanding
cur.execute('SELECT Q.ticker, L.sector, B.quarter_end, shares_outstanding, total_assets,total_liabilities,intangible_assets,shareholders_equity,total_equity,revenue,gross_profit,operating_profit,net_income FROM companybalancesheets B JOIN companyquarterends Q ON Q.ticker = B.ticker JOIN sharesoutstanding S ON S.ticker = B.ticker JOIN companylisting L ON L.ticker = S.ticker')
financials_q = cur.fetchall()
cur.close()
conn.close()
# Define columns annual financials
a_columns = ['ticker', 'year', 'last_year', 'revenue', 'revenue_ly', 'cogs', 'cogs_ly', 'gross_profit', 'gross_profit_ly', 'operating_expense', 'operating_expense_ly', 'operating_profit', 'operating_profit_ly', 'net_profit', 'net_profit_ly', 'rating']
# Define columns of quarter financials
q_columns = ['ticker', 'sector', 'quarter_end', 'shares_outstanding', 'total_assets', 'total_liabilities', 'intangible_assets', 'shareholders_equity', 'total_equity', 'revenue', 'gross_profit', 'operating_profit', 'net_income']
financials_a = pd.DataFrame(financials_a, columns=a_columns)
financials_q = pd.DataFrame(financials_q, columns=q_columns)
Y = financials_a.rating.values.reshape(len(financials_a), 1)
financials_a.head()
financials_q.head()
# ###### Feature Selection
## Categorical Feature for the sector that the company is listed under
financials_q.sector.value_counts()
financials_q[financials_q.sector == 'nan']
financials_q.iloc[250,1] = 'Materials'
financials_q.iloc[429,1] = 'Consumer Discretionary'
financials_q.sector.value_counts()
## Encoding categorical date using Pandas get_dummies
X1 = pd.get_dummies(financials_q['sector']).values
## Feature for measuring a company's earnings and its margin of safety, EPS and Book Value per share
X2 = (financials_q.net_income / financials_q.shares_outstanding).values.reshape(len(X1), 1)
X3 = ((financials_q.total_assets - financials_q.total_liabilities - financials_q.intangible_assets) / financials_q.shares_outstanding).values.reshape(len(X1), 1)
## Measure of company's profitability, Return on Equity and Return on Assets
X4 = (financials_q.net_income / financials_q.shareholders_equity).values.reshape(len(X1), 1)
X5 = (financials_q.net_income / financials_q.shareholders_equity).values.reshape(len(X1), 1)
## Measure of company's financial leverage and liquidity, Debt-to-Equity ratio and Debt-to-Asset ratio
X6 = (financials_q.total_liabilities / financials_q.shareholders_equity).values.reshape(len(X1), 1)
X7 = (financials_q.total_liabilities / financials_q.total_assets).values.reshape(len(X1), 1)
## Measure of company's margins, gross, operating, and net
X8 = (financials_q.gross_profit / financials_q.revenue).values.reshape(len(X1), 1)
X9 = (financials_q.operating_profit / financials_q.revenue).values.reshape(len(X1), 1)
X10 = (financials_q.net_income / financials_q.revenue).values.reshape(len(X1), 1)
## Measure of year-over-year growth in sales
X11 = ((financials_a.revenue - financials_a.revenue_ly) / financials_a.revenue_ly).values.reshape(len(X1), 1)
## Set of features we will use for our machine learning algorithm
features = np.concatenate((X1[:,1:], X2, X3, X4, X5, X6, X7, X8, X9, X10, X11), axis = 1)
# ###### Testing Features
## Loading and dropping NaNs
df = pd.DataFrame(np.concatenate((features, Y), axis = 1))
df.dropna(inplace=True)
df.info()
df.iloc[:,20].value_counts()
## Dropping any rows where there are no ratings for the stock
df = df[df.iloc[:,20] != 'No Ratings']
df.iloc[:,20].value_counts()
df.head()
## Standardizing data types of columns
for i in range(0,10):
df.iloc[:,i] = pd.to_numeric(df.iloc[:,i], downcast='integer')
for i in range(10,20):
df.iloc[:,i] = pd.to_numeric(df.iloc[:,i], downcast='float')
df.info()
df.iloc[:,10:].describe()
## Setting rules for our margin features. Generally margins cannot be greater than 1
new_features = df[(df.iloc[:,18] < 1.0) & (df.iloc[:,17] < 1.0) & (df.iloc[:,16] < 1.0)]
new_features.iloc[:,10:].describe()
new_features.iloc[:,10].quantile(0.01)
new_features.iloc[:,11].quantile(0.01)
new_features.iloc[:,14].quantile(0.01)
new_features = new_features[(new_features.iloc[:,10] < new_features.iloc[:,10].quantile(0.99)) & (new_features.iloc[:,11] < new_features.iloc[:,11].quantile(0.99)) & (new_features.iloc[:,14] < new_features.iloc[:,14].quantile(0.99)) & (new_features.iloc[:,10] > new_features.iloc[:,10].quantile(0.01)) & (new_features.iloc[:,11] > new_features.iloc[:,11].quantile(0.01)) & (new_features.iloc[:,14] > new_features.iloc[:,14].quantile(0.01))]
new_features.iloc[:,10:].describe()
new_features[20].value_counts()
## Encoding ratings into numerical data
ratings = {
'Strong Sell': 0,
'Moderate Sell': 0,
'Hold': 1,
'Moderate Buy': 2,
'Strong Buy': 2
}
new_features[20] = new_features[20].map(ratings)
new_features[20].value_counts()
X = new_features.values[:,:-1]
Y = new_features.values[:,-1]
## Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, stratify=Y)
## Feature Scaling
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
X_train = minmax.fit_transform(X_train)
X_test = minmax.transform(X_test)
## Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
model_lr = LogisticRegression(solver='lbfgs', multi_class='ovr')
model_lr.fit(X_train, Y_train)
y_pred_lr = model_lr.predict(X_test)
print(round(accuracy_score(Y_test, y_pred_lr),2))
## Prediction results for Logistic Regression Model Above. Very biased model that only predicts 'Buy'
np.unique(y_pred_lr, return_counts=True)
minority = np.unique(Y_train, return_counts=True)[1][0]
majority = np.unique(Y_train, return_counts=True)[1][2]
## Undersampling
from imblearn.under_sampling import RandomUnderSampler
us = RandomUnderSampler(sampling_strategy={0:minority,1:minority,2:minority}).fit(X_train, Y_train)
X_train_us, Y_train_us = us.fit_resample(X_train, Y_train)
model_us = LogisticRegression(solver='lbfgs', multi_class='ovr')
model_us.fit(X_train_us, Y_train_us)
y_pred_us = model_us.predict(X_test)
from sklearn.model_selection import cross_val_score
accuracies_us = cross_val_score(estimator = model_us, X = X_train_us, y = Y_train_us, cv = 10)
print('Undersampling: Average Accuracy of {}, Standard Deviation of {}'.format(round(accuracies_us.mean(),2), round(accuracies_us.std(),2)))
## SMOTE
from imblearn.over_sampling import SMOTE
sm = SMOTE(sampling_strategy={0:majority,1:majority,2:majority})
X_train_sm, Y_train_sm = sm.fit_sample(X_train, Y_train)
model_sm = LogisticRegression(solver='lbfgs', multi_class='ovr').fit(X_train_sm, Y_train_sm)
y_pred_sm = model_sm.predict(X_test)
accuracies_sm = cross_val_score(estimator = model_sm, X = X_train_sm, y = Y_train_sm, cv = 10)
print('SMOTE: Average Accuracy of {}, Standard Deviation of {}'.format(round(accuracies_sm.mean(),2), round(accuracies_sm.std(),2)))
## Grid Search and Hyperparameter Tuning
from sklearn.model_selection import GridSearchCV
parameters = [
{
'C': [800, 900, 1000, 1100, 1200, 1300, 1400, 1500],
'solver': ['newton-cg'],
'multi_class':['ovr', 'multinomial']
},
]
grid_search = GridSearchCV(estimator = model_sm, param_grid = parameters, scoring='accuracy', cv = 10)
grid_search = grid_search.fit(X = X_train_sm, y = Y_train_sm)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
best_accuracy
best_parameters
## Feature Selection for Model Optimization Using Backward Elimination Method
import statsmodels.formula.api as sm
## Features after Backward Elimination Method: We will keep all our features
X_opt = X_train_sm[:, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]]
Y_opt = Y_train_sm
classification_OLS = sm.OLS(endog=Y_opt, exog = X_opt).fit()
classification_OLS.summary()
model_opt = LogisticRegression(C= best_parameters['C'], multi_class= best_parameters['multi_class'], solver= best_parameters['solver']).fit(X_opt, Y_opt)
y_pred_opt = model_opt.predict(X_test[:, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]])
accuracies_opt = cross_val_score(estimator = model_opt, X = X_opt, y = Y_opt, cv = 10)
print('Logistic Regression Optimial: Average Accuracy of {}, Standard Deviation of {}'.format(round(accuracies_opt.mean(),2), round(accuracies_opt.std(),2)))
print(round(accuracy_score(Y_test, y_pred_opt),2))
## Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
model_rf = RandomForestClassifier(n_estimators = 100, criterion = 'entropy').fit(X_opt, Y_opt)
y_pred_rf = model_rf.predict(X_test[:, [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]])
accuracies_rf = cross_val_score(estimator = model_rf, X = X_opt, y = Y_opt, cv = 10)
print('Random Forest: Average Accuracy of {}, Standard Deviation of {}'.format(round(accuracies_rf.mean(),2), round(accuracies_rf.std(),2)))
print(round(accuracy_score(Y_test, y_pred_rf),2))
# +
## Pickle Model
import pickle
filename = 'model.pkl'
pickle.dump(model_rf, open(filename, 'wb'))
## Load Model
#model = pickle.load(open(filename, 'rb'))
## For MinMax Scaling
maximum = X_train.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]].max()
minimum = X_train.iloc[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]].min()
maximum.to_csv('./datasets/featurescalingmax.csv', header=True)
minimum.to_csv('./datasets/featurescalingmin.csv', header=True)
# -
| model_evaluation.ipynb |