code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Solution:
def convertToTitle(self, n: int) -> str:
res = ''
k = n // 26 # 整数
m = n % 26 # 余数
if 0 < k <= 26:
if k % 26 > 0:
res += chr(k + 64)
else:
res += 'Z'
elif k > 26:
res += self.convertToTitle(k)
if n % 26 > 0:
res += chr(n % 26 + 64)
else:
print(res, n, 52 //26)
return res
class Solution:
def convertToTitle(self, n: int) -> str:
res = ''
k = n // 26 # 整数
m = n % 26 # 余数
if m == 0:
res += chr(k - 1 + 64) + 'Z'
else:
res += chr(m + 64)
return res
# + active=""
# 10 进制包括数字:0~9
# 2 进制包括:0、1
# 26 进制应包括:0~25
# +
import math
class Solution:
def convertToTitle(self, n: int) -> str:
res = ''
while n:
n -= 1
n, y = divmod(n, 26)
res += chr(y + 65)
return res[::-1]
# -
solution = Solution()
solution.convertToTitle(10000)
# +
# 52 --> AZ
# -
chr(26 + 64)
|
Math/1225/168. Excel Sheet Column Title.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fake news detection
# A fake news are those stories that are false, manupulated, no solid proof or not from reliable sources.
# Dataset: https://www.kaggle.com/clmentbisaillon/fake-and-real-news-dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import feature_extraction, linear_model, model_selection, preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# ## Read datasets
fake = pd.read_csv("data/Fake.csv")
true = pd.read_csv("data/True.csv")
fake.shape
true.shape
# ## Data cleaning and preparation
# Add flag to track fake and real
fake['target'] = 'fake'
true['target'] = 'true'
fake.head()
true.head()
# Concatenate dataframes
data = pd.concat([fake, true]).reset_index(drop = True)
data.shape
data.head(5)
data.tail(5)
# Shuffle the data
from sklearn.utils import shuffle
data = shuffle(data)
data = data.reset_index(drop=True)
# Check the data
data.head()
data.info()
# Removing the date
data.drop(["date"],axis=1,inplace=True)
data.head()
# Removing the title
data.drop(["title"],axis=1,inplace=True)
data.head()
# +
# Convert to lowercase
data['text'] = data['text'].apply(lambda x: x.lower())
data.head()
# +
# Remove punctuation
import string
def punctuation_removal(text):
all_list = [char for char in text if char not in string.punctuation]
clean_str = ''.join(all_list)
return clean_str
data['text'] = data['text'].apply(punctuation_removal)
# -
# Check
data.head()
# +
# Removing stopwords
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
data['text'] = data['text'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
# -
data.head()
# ## Basic data exploration
# How many articles per subject?
print(data.groupby(['subject'])['text'].count())
data.groupby(['subject'])['text'].count().plot(kind="bar")
plt.show()
# How many fake and real articles?
print(data.groupby(['target'])['text'].count())
data.groupby(['target'])['text'].count().plot(kind="bar")
plt.show()
# +
# # !pip install wordcloud
# +
# Word cloud for fake news
from wordcloud import WordCloud
fake_data = data[data["target"] == "fake"]
all_words = ' '.join([text for text in fake_data.text])
wordcloud = WordCloud(width= 800, height= 500,
max_font_size = 110,
collocations = False).generate(all_words)
plt.figure(figsize=(10,7))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# +
# Word cloud for real news
from wordcloud import WordCloud
real_data = data[data["target"] == "true"]
all_words = ' '.join([text for text in fake_data.text])
wordcloud = WordCloud(width= 800, height= 500,
max_font_size = 110,
collocations = False).generate(all_words)
plt.figure(figsize=(10,7))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# +
# Most frequent words counter
from nltk import tokenize
token_space = tokenize.WhitespaceTokenizer()
def counter(text, column_text, quantity):
all_words = ' '.join([text for text in text[column_text]])
token_phrase = token_space.tokenize(all_words)
frequency = nltk.FreqDist(token_phrase)
df_frequency = pd.DataFrame({"Word": list(frequency.keys()),
"Frequency": list(frequency.values())})
df_frequency = df_frequency.nlargest(columns = "Frequency", n = quantity)
plt.figure(figsize=(12,8))
ax = sns.barplot(data = df_frequency, x = "Word", y = "Frequency", color = 'blue')
ax.set(ylabel = "Count")
plt.xticks(rotation='vertical')
plt.show()
# -
# Most frequent words in fake news
counter(data[data["target"] == "fake"], "text", 20)
# Most frequent words in real news
counter(data[data["target"] == "true"], "text", 20)
# ## Modeling
# +
# Function to plot the confusion matrix
from sklearn import metrics
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# -
# ### Split Data
# Split the data
X_train,X_test,y_train,y_test = train_test_split(data['text'], data.target, test_size=0.2, random_state=42)
X_train.head()
y_train.head()
# ### Decision Tree Classifier
# +
from sklearn.tree import DecisionTreeClassifier
# Vectorizing and applying TF-IDF
pipe = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('model', DecisionTreeClassifier(criterion= 'entropy',
max_depth = 20,
splitter='best',
random_state=42))])
# Fitting the model
model = pipe.fit(X_train, y_train)
# Accuracy
prediction = model.predict(X_test)
print("accuracy: {}%".format(round(accuracy_score(y_test, prediction)*100,2)))
# -
cm = metrics.confusion_matrix(y_test, prediction)
plot_confusion_matrix(cm, classes=['Fake', 'Real'])
|
FakeNewsDetection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Steam reforming with water-gas shift
#
# ## Known coversion and temperature in steam reforming
#
# Given temperature and conversion of steam reforming (DR), assume exit composition is in equilibrium with respect to water-gas shift (WGS), and determine WGS-conversion $X_{H_2O,WGS}$.
#
# Parameters:
# * $X_{CH_4}=0.2$
# * $T_{DR}=624~^\circ C$
# * $S/C=2.2$
# +
from numpy import array, log, exp, sqrt, zeros, prod, linspace
from scipy.optimize import bisect
from matplotlib import pyplot as plt
data=[line.split(';') for line in """z_i/(-);h_ig/(J/mol);s_ig/(J/mol/K);g_ig/(J/mol);cp_ig/(J/mol/K);cas_no/();phase/();formula/();formula_name_structure/();ant_name/();poling_no/();poling_formula/();poling_name/();poling_molwt/(g/mol);poling_tfp/(K);poling_tb/(K);poling_tc/(K);poling_pc/(bar);poling_vc/(cm3/mol);poling_zc/();poling_omega/();poling_delhf0/(kJ/mol);poling_delgf0/(kJ/mol);poling_delhb/(kJ/mol);poling_delhm/(kJ/mol);poling_v_liq/(cm3/mol);poling_t_liq/(K);poling_dipole/(Debye);p_ant_a/();p_ant_b/(K);p_ant_c/(K);p_ant_tmin/(K);p_ant_tmax/(K);p_ant_pvpmin/(bar);p_ant_pvpmax/(bar);eant_to/(K);eant_n/();eant_e/();eant_f/();eant_tmin/(K);eant_tmax/(K);eant_pvpmin/(bar);eant_pvpmax/(bar);wagn_a/();wagn_b/();wagn_c/();wagn_d/();wagn_tmin/(K);wagn_tmax/(K);wagn_pvpmin/(bar);wagn_pvpmax/(bar);range_tmin_to_1000/(K);range_1000_to_tmax/(K);molecular_weight/(g/mol);hf298_div_r/();a1_low/();a2_low/(K^-1);a3_low/(K^-2);a4_low/(K^-3);a5_low/(K^-4);a6_low/(K^-1);a7_low/();a1_high/();a2_high/(K^-1);a3_high/(K^-2);a4_high/(K^-3);a5_high/(K^-4);a6_high/(K^-1);a7_high/();reference/();source/();date/();ant_no/();ant_formula/();ant_name/();ant_a/();ant_b/();ant_c/();ant_tmin/(°C);ant_tmax/(°C);ant_code/()
0;0;130,802012845;-38998,6201299;28,8363121833;1333-74-0;G;H2 REF ELEMENT;H2 CALC FROM GURVIC'S TABLE;hydrogen;438;H2;hydrogen;2,016;13,83;20,27;32,98;12,93;64,2;0,303;-0,217;0;0;0,89;0,12;28,39;20;0;2,93954;66,7954;275,65;10,25;22,82;0,05;2;;;;;;;;;;;;;;;;;200;6000;2,01588;0;2,34433112;0,00798052075;-1,9478151e-05;2,01572094e-08;-7,37611761e-12;-917,935173;0,683010238;2,93286575;0,000826608026;-1,46402364e-07;1,54100414e-11;-6,888048e-16;-813,065581;-1,02432865;GURVICH 78;tpis;78;161;H2;hydrogen;6,14858;80,948;277,53;-259,2;-239,97;1,2;
0;-74520;186,034253085;-129986,112557;35,6126668172;74-82-8;G;CH4 RRHO;CH4 METHANE SAME AS THE ANHARMONIC BUT CALCULATED USING THE RRHO METHOD RATHER THAN THE NRRAO2.;methane;26;CH4;methane;16,043;90,69;111,66;190,56;45,992;98,6;0,286;0,011;-74,52;-50,45;8,17;0,94;35,54;90,68;0;3,7687;395,744;266,681;92,64;120,59;0,15;2;;;;;;;;;-6,02242;1,26652;-0,5707;-1,366;;190,55;;45,99;200;6000;16,04246;-8972,26656;5,14825732;-0,013700241;4,93749414e-05;-4,91952339e-08;1,70097299e-11;-10245,3222;-4,63322726;1,911786;0,0096026796;-3,38387841e-06;5,3879724e-10;-3,19306807e-14;-10099,2136;8,48241861;;g;8/99;137;CH4;methane;6,84377;435,4534;271,361;-196,85;-82,59;1,2;
0;-393510;213,789479981;-457251,333456;37,1352585732;124-38-9;G;CO2;CO2 CARBON-DIOXIDE;carbon dioxide;31;CO2;carbon dioxide;44,01;216,58;;304,12;73,74;94,07;0,274;0,225;-393,51;-394,38;;9,02;;;0;;;;;;;;;;;;;;;;;;;;;;;;200;6000;44,0098;-47328,105;2,356813;0,0089841299;-7,1220632e-06;2,4573008e-09;-1,4288548e-13;-48371,971;9,9009035;4,6365111;0,0027414569;-9,9589759e-07;1,6038666e-10;-9,1619857e-15;-49024,904;-1,9348955;GURVICH VOL 2 1991 P.27;L;7/88;164;CO2;carbon dioxide;9,81367;1340,9768;271,883;-119,74;-8,96;1,2;
0;-110530;197,672063199;-169465,925643;29,1409651441;630-08-0;G;CO;CO CARBON-MONOXIDE CALCULATED FROM TSIV TABLE.;carbon monoxide;30;CO;carbon monoxide;28,01;68,15;81,66;132,85;34,94;93,1;0,292;0,045;-110,53;-137,16;6,04;0,84;34,88;81;0,1;3,81912;291,743;267,996;69,73;88,08;0,2;2;;;;;;;;;;;;;;;;;200;6000;28,0104;-13293,628;3,5795335;-0,00061035369;1,0168143e-06;9,0700586e-10;-9,0442449e-13;-14344,086;3,5084093;3,0484859;0,0013517281;-4,8579405e-07;7,8853644e-11;-4,6980746e-15;-14266,117;6,0170977;TSIV 79;RUS;79;161;CO;carbon monoxide;6,72828;295,2279;268,243;-216,7;-140,29;1,2;
0;-241810;188,799783187;-298100,655357;33,58766993;7732-18-5;G;H2O;H2O;water;440;H2O;water;18,015;273,15;373,15;647,14;220,64;55,95;0,229;0,344;-241,81;-228,42;40,66;6,01;18,07;298,15;1,8;5,11564;1687,537;230,17;273,2;473,2;0,01;16;;;;;;;;;-7,77224;1,45684;-2,71942;-1,41336;273,2;647,3;0,01;221;200;6000;18,01528;-29084,817;4,1986352;-0,0020364017;6,5203416e-06;-5,4879269e-09;1,771968e-12;-30293,726;-0,84900901;2,6770389;0,0029731816;-7,7376889e-07;9,4433514e-11;-4,2689991e-15;-29885,894;6,88255;WOOLEY J. RES. NBS 92 (1987), 35. BASED ON HF298(L) FROM COX, WAGMAN & MEDVEDEV CODATA KEY VAL. FOR THERMO, HEMISPHERE 1989 P.21 AND HEAT OF VAP. FROM HAAR, GALLAGHER & KELL NBS/NRC TABLES, HEMISPHERE 1984.;L;5/89;162;H2O;water;8,05573;1723,6425;233,08;0,01;373,98;1,2;
0;0;191,632621307;-57135,2660428;29,124315278;7727-37-9;G;N2 REF ELEMENT;N2 REFERENCE ELEMENT HF=0. FROM TSIV TABLES;nitrogen;455;N2;nitrogen;28,014;63,15;77,35;126,2;34;90,1;0,289;0,037;0;0;5,58;0,72;34,84;78;0;3,61947;255,68;266,55;60,81;83,65;0,08;2;;;;;;;;;-6,11102;1,2189;-0,69366;-1,89893;;126,2;;34;200;6000;28,0134;0;3,53100528;-0,000123660988;-5,02999433e-07;2,43530612e-09;-1,40881235e-12;-1046,97628;2,96747038;2,95257637;0,0013969004;-4,92631603e-07;7,86010195e-11;-4,60755204e-15;-923,948688;5,87188762;;G;8/02;239;N2;nitrogen;6,72531;285,5727;270,09;-210;-147,05;1,2;
0;0;205,095940942;-61149,3547919;29,3783179536;7782-44-7;G;O2 REF ELEMENT;O2 CALCULATED FROM ORIGINAL VALUES;oxygen;460;O2;oxygen;31,999;54,36;90,17;154,58;50,43;73,37;0,288;;0;0;6,82;0,44;27,85;90;0;3,81634;319,013;266,7;64,29;97,2;0,02;2;;;;;;;;;;;;;;;;;200;6000;31,9988;0;3,78245636;-0,00299673416;9,84730201e-06;-9,68129509e-09;3,24372837e-12;-1063,94356;3,65767573;3,66096065;0,000656365811;-1,41149627e-07;2,05797935e-11;-1,29913436e-15;-1215,97718;3,41536279;GURVICH 1989. CORRECTED BY B.MCBRIDE NASA TP-2002-211556;TPIS;89;271;O2;oxygen;6,83706;339,2095;268,7;-218,8;-118,57;1,2;
0;0;154,846476768;-46167,4770484;20,78625;7440-37-1;G;AR REF ELEMENT;AR;argon;1;Ar;argon;39,948;83,8;87,27;150,86;48,98;74,57;0,291;-0,002;0;0;6,43;;29,1;90;0;3,74141;304,227;267,32;82,59;94,26;0,6;2;;;;;;;;;;;;;;;;;200;6000;39,948;0;2,5;0;0;0;0;-745,375;4,37967491;2,5;0;0;0;0;-745,375;4,37967491;C.E. MOORE ATOMIC ENERGY LEVELS NSRDS-NBS 35 (1971) P.211;g;5/97;15;Ar;argon;6,84064;340,2707;271,8;-189,37;-122,29;1,2;
0;0;126,153545211;-37612,6795046;20,78625;7440-59-7;G;He REF ELEMENT;HE;helium-4;450;He;helium;4,003;2,15;4,3;5,19;2,27;57,3;0,301;-0,39;0;0;0,08;;32,54;4,3;0;1,6836;8,1548;273,71;1,85;5,34;0,02;2;;;;;;;;;;;;;;;;;200;6000;4,0026;0;2,5;0;0;0;0;-745,375;0,928723974;2,5;0;0;0;0;-745,375;0,928723974;;g;5/97;175;He;helium-4;5,2712;13,5171;274,58;-271,39;-267,95;1,2;
0;82880;268,325246538;2878,82774459;81,934398014;71-43-2;G;C6H6;[F&W NOTATION A1] C6H6 BENZENE;benzene;187;C6H6;benzene;78,114;278,68;353,24;562,05;48,98;256;0,268;0,21;82,88;129,75;30,72;9,95;89,41;298,15;0;3,98523;1184,24;217,572;279,64;377,06;0,05;2;;;;;;;;;-7,01433;1,55256;-1,8479;-3,713;;562,16;;48,98;200;6000;78,11184;9968,11598;0,504818632;0,0185020642;7,38345881e-05;-1,18135741e-07;5,07210429e-11;8552,47913;21,6412893;11,0809576;0,0207176746;-7,52145991e-06;1,22320984e-09;-7,36091279e-14;4306,41035;-40,041331;SHIMANOUCHI AND PLIVA ET AL J. MOLEC. SPETROS 107,(1984),209;g;6/01;2460;C6H6;benzene;6,81432;1090,4312;197,146;-9,6;103,04;1,2;
0;-123100;295,971523164;-211343,909631;105,342922613;110-82-7;G;C6H12,cyclo-;C6H12 CYCLOHEXANE;cyclohexane;197;C6H!2;cyclohexane;84,161;279,69;353,93;553,5;40,73;308;0,273;0,211;-123,1;32,26;29,97;2,63;108,75;298,15;0,3;3,93002;1182,774;220,618;282,11;378,46;0,06;2;25;3,40407;10,048;-126,96;378,15;553,15;1,9871;40,48;;;;;;;;;200;6000;84,15948;-14829,4969;4,04357527;-0,00619608335;0,000176622274;-2,22968474e-07;8,63668578e-11;-16920,3544;8,52527441;13,214597;0,0358243434;-1,32110852e-05;2,17202521e-09;-1,31730622e-13;-22809,2102;-55,3518322;<NAME> & JORISH JPCRD 15 (1986) 437;g;6/90;2823;C6H12;cyclohexane;6,88938;1200,8256;218,815;-14,92;105,07;1,2;
0;-166920;387,462696222;-282442,002879;142,590479962;110-54-3;G;C6H14,n-hexane;C6H14 N-HEXANE;hexane;216;c6hm;hexane;86,177;177,84;341,88;507,6;30,35;368;0,264;0,3;-166,92;0,15;28,85;13,07;131,59;298,15;0;4,00139;1170,875;224,317;254,24;365,25;0,02;2;;;;;;;;;-7,53998;1,83759;-2,5438;-3,163;;507,9;;30,35;200;6000;86,17536;-20075,7471;9,87121167;-0,00936699002;0,000169887865;-2,1501952e-07;8,45407091e-11;-23718,5495;-12,4999353;19,5158086;0,0267753942;-7,49783741e-06;1,19510646e-09;-7,51957473e-14;-29436,2466;-77,4895497;;g;6/01;3139;C6H14;hexane;6,98978;1216,9154;227,451;-24,29;92,12;1,2;
0;-200940;240,19493704;-272554,120478;42,9167106558;67-56-1;G;CH3OH Methyl alc;CH4O METHANOL (CH3OH);methyl alcohol;27;ch4o;methanol;32,042;175,49;337,69;512,64;80,92;118;0,224;0,565;-200,94;-162,24;35,21;3,18;40,73;298,15;1,7;5,20277;1580,08;239,5;262,59;356;0,02;2;;;;;;;;;-8,63571;1,17982;-2,479;-1,024;;512,64;;80,92;200;6000;32,04216;-24174,6056;5,65851051;-0,0162983419;6,91938156e-05;-7,58372926e-08;2,8042755e-11;-25611,9736;-0,897330508;3,52726795;0,0103178783;-3,62892944e-06;5,77448016e-10;-3,42182632e-14;-26002,8834;5,16758693;CHEN WILHOIT & ZWOLINSKI JPCRD 6,(1977),105;T;06/02;144;CH4O;methyl alcohol;8,08404;1580,4585;239,096;-15,99;199,45;1,2;
0;-352400;285,738660054;-437592,981495;62,4363045378;107-31-3;G;C2H4O2 methylfor;C2H4O2 METHYLFORMATE HCOOCH3;methyl formate;61;C2H4O2;methyl methanoate (methyl formate);60,053;174,15;304,9;487,2;60;172;0,255;;-352,4;-294,9;27,92;;62,14;298,15;1,8;4,29529;1125,2;230,56;230,3;324,29;0,02;2;;;;;;;;;;;;;;;;;200;6000;60,05196;-43032,7223;5,96757028;-0,00938085425;7,07648417e-05;-8,29932227e-08;3,13522917e-11;-44870,9982;0,750341113;6,3336088;0,0134851485;-4,84305805e-06;7,81719241e-10;-4,67917447e-14;-46131,3237;-6,91542601;ATCT C 2011;T;7/11;460;C2H4O2;methyl formate;7,05336;1138,3109;236,959;-48,91;214,05;1,2;
0;-378600;246,867144773;-452203,439214;41,3052664729;64-18-6;G;HCOOH FORMIC ACID;CH2O2 METHANOIC (FORMIC) ACID HCOOH MONOMER;formic acid;22;ch2o2;methanoic acid (formic acid);46,026;281,5;374,04;588;58,07;;;0,316;-378,6;-35,06;22,69;12,72;;;1,5;;;;;;;;;;;;;;;;-7,24917;0,44255;-0,35558;-0,96906;;588;;58,07;200;6000;46,02568;-45531,246;3,8983616;-0,0035587795;3,5520538e-05;-4,3849959e-08;1,7107769e-11;-46770,609;7,3495397;4,6138316;0,0064496364;-2,2908251e-06;3,6716047e-10;-2,1873675e-14;-47514,85;0,84788383;CHAO & ZWOLINSKI JPCRD 7.(1978),363;L;8/88;108;CH2O2;formic acid;6,96405;1162,8529;184,037;8,4;314,85;1,2;
0;-83820;228,779508382;-152030,610424;52,5009107486;74-84-0;G;C2H6;C2H6 ETHANE;ethane;65;C2H6;ethane;30,07;90,35;184,55;305,32;48,71;145,5;0,279;0,099;-83,82;-31,86;14,7;2,86;46,15;90,36;0;3,95405;663,72;256,681;133,8;198,16;0,02;2;;;;;;;;;-6,475;1,41071;-1,144;-1,859;;305,33;;48,71;200;6000;30,06904;-10084,9652;4,29142572;-0,00550154901;5,99438458e-05;-7,08466469e-08;2,68685836e-11;-11522,2056;2,66678994;4,04666411;0,0153538802;-5,47039485e-06;8,77826544e-10;-5,23167531e-14;-12447,3499;-0,968698313;CHAO WILHOIT & ZWOLINSKI JPCRD 2,(1973), 427;g;8/88;491;C2H6;ethane;6,95185;698,9298;260,264;-142,83;32,17;1,2;
0;52500;218,878873835;-12758,7362338;42,8868953326;74-85-1;G;C2H4;C2H4 ETHYLENE;ethylene;55;C2H4;ethene (ethylene);28,054;103,99;169,42;282,34;50,41;131,1;0,282;0,087;52,5;68,48;13,53;3,35;51,07;183,15;0;3,91382;596,526;256,37;123,06;181,9;0,02;2;-99;2,79132;9,717;52,77;188,15;273,15;2,71;40,99;;;;;;;;;200;6000;28,05316;6314,26266;3,95920063;-0,00757051373;5,70989993e-05;-6,91588352e-08;2,6988419e-11;5089,77598;4,09730213;3,99182724;0,0104833908;-3,71721342e-06;5,94628366e-10;-3,53630386e-14;4268,65851;-0,269081762;CHAO & ZWOLINSKY, JPCRD 4,(1975),251;g;1/00;428;C2H4;ethylene;6,96867;649,8057;262,73;-169,15;9,2;1,2;
0;-104680;269,715634825;-185095,716523;73,5887322571;74-98-6;G;C3H8;C3H8 PROPANE CH3CH2CH3;propane;95;C3H8;propane;44,097;91,45;231,02;369,83;42,47;200;0,276;0,152;-104,68;-24,29;19,04;3,53;74,87;233,15;0;3,92828;803,997;247,04;168,9;247,76;0,02;2;;;;;;;;;-6,76368;1,55481;-1,5872;-2,024;;369,85;;42,47;200;6000;44,09562;-12590,0384;4,21093013;0,00170886504;7,06530164e-05;-9,20060565e-08;3,64618453e-11;-14381,0883;5,61004451;6,6691976;0,0206108751;-7,36512349e-06;1,18434262e-09;-7,0691463e-14;-16275,4066;-13,1943379;CHAO WILHOIT & ZWOLINSKI JPCRD 2, (1973),427;g;2/00;959;C3H8;propane;7,02022;889,8642;257,084;-109,27;96,74;1,2;
0;20000;266,234106556;-59377,6988696;64,4333605623;115-07-1;G;C3H6 propylene;C3H6 PROPYLENE;propylene;85;C3H6;propene (propylene);42,081;87,89;225,46;364,9;46;184,6;0,28;0,142;20;62,5;18,42;3;;;0,4;3,95606;789,624;247,58;165,2;241,61;0,02;2;-41;2,67417;22,13;-199,34;238,15;363,15;1,74;44,67;;;;;;;;;200;6000;42,07974;2405,43339;3,83464468;0,00329078952;5,05228001e-05;-6,66251176e-08;2,63707473e-11;788,717123;7,53408013;6,03870234;0,0162963931;-5,821308e-06;9,35936829e-10;-5,58603143e-14;-741,715057;-8,43825992;CHAO & ZWOLINSKI JPCRD 4,(1975) 251;g;2/00;883;C3H6;propylene;7,00725;859,722;255,895;-112,78;91,06;1,2;
0;-20630;205,781166808;-81983,6548839;34,2548560182;7783-06-4;G;H2S anharmonic;H2S ANHARMONIC;hydrogen sulfide;441;H2S;hydrogen sulfide;34,082;187,62;212,84;373,4;89,63;98;0,283;0,09;-20,63;-33,43;18,68;2,38;34,32;214;0,9;4,22882;806,933;251,39;185,51;227,2;0,2;2;;;;;;;;;;;;;;;;;200;6000;34,08188;-2477,59639;4,12024455;-0,00187907426;8,2142665e-06;-7,0642573e-09;2,1423486e-12;-3682,15173;1,53174068;2,9787943;0,00359760372;-1,22803151e-06;1,96833209e-10;-1,16716162e-14;-3516,07638;6,77921228;GURVICH 1989;g;4/01;164;H2S;hydrogen sulfide;7,11958;802,2266;249,61;-85,47;100,38;1,2;
0;nan;311,477408685;nan;90,2348916052;7664-93-9;G;H2SO4;H2SO4 SULFURIC ACID;sulfuric acid;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;200;6000;98,07948;-88123,0524;4,53388173;0,0310347679;-4,10421795e-05;2,95752341e-08;-8,81459071e-12;-90545,9072;3,93961412;11,3355392;0,00560829109;-1,94574192e-06;3,07136054e-10;-1,81109544e-14;-92108,7435;-29,6094003;DOROFEEVA ET AL JPCRD 32 (2003),879 . CALCULATED FROM ORIGINAL TABLES.;T;8/03;165;H2SO4;sulfuric acid;8,28538;2976,837;213,95;25;336,85;1,2;
0;-395720;256,536422994;-472206,334516;50,6194285115;7446-11-9;G;SO3;SO3;sulfur trioxide;463;O3S;sulfur trioxide;80,064;289,95;317,9;490,9;82,1;126,5;0,254;;-395,72;-370,93;40,69;7,53;42,1;298,15;;6,17575;1735,31;236,5;284,5;332,04;0,15;2;;;;;;;;;;;;;;;;;200;6000;80,0642;-47615,554;2,37461122;0,0159543297;-1,26322543e-05;2,81827264e-09;6,23371547e-13;-48926,9231;13,1043046;7,29677572;0,00273576437;-1,06377755e-06;1,80776031e-10;-1,12077527e-14;-50309,6739;-12,4246659;GURVICH 89;tpis;89;361;SO3;sulfur trioxide;6,85638;754,8178;145,11;16,8;217,7;1,2;
0;-296810;248,127032091;-370789,074618;39,8424312772;7446-09-5;G;SO2;SO2 O-S-O;sulfur dioxide;461;O2S;sulfur dioxide;64,065;197,67;263,13;430,8;78,84;122;0,269;;-296,81;-300,14;24,94;7,4;44,03;263,15;1,6;4,4072;999,9;237,19;199,71;279,47;0,02;2;;;;;;;;;;;;;;;;;200;6000;64,0648;-35697,8343;3,67480752;0,00228302107;8,46893049e-06;-1,36562039e-08;5,76271873e-12;-36945,5073;7,9686643;5,38423482;0,0016793056;-6,32062944e-07;1,08465348e-10;-6,66890336e-15;-37606,7022;-1,83130517;GURVICH 89;tpis;89;356;SO2;sulfur dioxide;7,33311;1013,4609;237,65;-75,48;157,6;1,2;
T=298,15 K;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
""".split('\n')]
#df=pd.DataFrame(data)
#df.rename(columns=df.iloc[0])
#new_header=df.iloc[0]
#df=df[1:]
#df.columns=new_header
names=[item[7] for item in data[1:-2] if len(item[21])>0] # exclude sulfuric acid with no entry on col. 21
delhf0=array([item[21].replace(',','.') for item in data[1:-2] if len(item[21])>0],dtype=float).T*1000 # exclude sulfuric acid with no entry on col. 21
delhg0=array([item[22].replace(',','.') for item in data[1:-2] if len(item[21])>0],dtype=float).T*1000 # exclude sulfuric acid with no entry on col. 21
a_low=array([[x.replace(',','.') for x in item[55:62]] for item in data[1:-2] if len(item[21])>0],dtype=float).T # exclude sulfuric acid with no entry on col. 21
a_high=array([[x.replace(',','.') for x in item[62:69]] for item in data[1:-2] if len(item[21])>0],dtype=float).T # exclude sulfuric acid with no entry on col. 21
R=8.3145 # J/mol/K
def therm_ig(t=298.15):
"""Returns thermodynamic properties of ideal gas at temperature t in K, based on NASA polynomials.
Input: temperature in K.
Output: h_ig/(J/mol), s_ig/(J/mol/K), g_ig/(J/mol), cp/(J/mol/K)
"""
if t>1000: # poly a_low is for 200 - 1000 K; a_high is for 1000 - 6000 K
a=a_high
else:
a=a_low
cp_r=[sum([a[j][i]*t**j for j in range(4+1)]) for i in range(len(names))] # cp/R
cp_ig=[R*cp_r[i] for i in range(len(names))] # J/mol/K
s_cp_r_dt=[
sum([1/(j+1)*a[j][i]*t**(j+1) for j in range(4+1)])
-sum([1/(j+1)*a_low[j][i]*298.15**(j+1) for j in range(4+1)])
for i in range(len(names))] # int(Cp/R*dT,298,15K,T)
# int(Cp/R/T*dT,298.15K,T)
s_cp_r_t_dt=[a[0][i]*log(t)+a[6][i]+
sum([1/(j)*a[j][i]*t**(j) for j in range(1,3+1)])
for i in range(len(names))] # int(Cp/(RT)*dT,0,T)
h_ig=[delhf0[i]+R*s_cp_r_dt[i] for i in range(len(names))]
s_ig=[R*s_cp_r_t_dt[i] for i in range(len(names))]
g_ig=[h_ig[i]-t*s_ig[i] for i in range(len(names))]
return h_ig,s_ig,g_ig,cp_ig
therm_ig(1000)
# -
# (H2O+CH4, S/C=2.2) -->> DR (X_CH4=0,2) -->> WGS (GGW) -->> Prod
#
# (0) (1) (2)
#
# $\begin{align}y_{0,CH_4}=\frac{1}{1+2,2}=0,3125\\
# y_{0,H_2O}=\frac{2,2}{1+2,2}=0,6875\end{align}$
#
# DR, $X_{CH_4}$ fixed
#
# $\tilde{y}_{i,DR}=\frac{\tilde{y}_{i,0}+\tilde{y}_{CH_4,0}\frac{\nu_{i,DR}}{-\nu_{CH_4,DR}}X_{CH_4}}{1+\underbrace{\tilde{y}_{CH_4,0}\frac{\sum_i(\nu_{i,DR})}{-\nu_{CH_4,DR}}}_{\epsilon_{CH_4}}X_{CH_4}}$
#
# WGS, Equilibrium at $T_{DR}$ fixed
#
# $K(T)=exp\left(\frac{-\Delta_R G^\circ(T)}{R T}\right)=K_\phi \frac{(\tilde{y}_{CO_2}p/p^\circ)(\tilde{y}_{H_2}p/p^\circ)}{(\tilde{y}_{CO}p/p^\circ)(\tilde{y}_{H_2O}p/p^\circ)}=1\cdot\frac{(\tilde{y}_{CO_2,DR}+\tilde{y}_{H_2O,DR}X_{H_2O,WGS})(\tilde{y}_{H_2,DR}+\tilde{y}_{H_2O,DR}X_{H_2O,WGS})}{(\tilde{y}_{CO,DR}-\tilde{y}_{H_2O,DR}X_{H_2O,WGS})(\tilde{y}_{H_2O,DR}-\tilde{y}_{H_2O,DR}X_{H_2O,WGS})}$
#
# $\Rightarrow 0=(1-K(T))\cdot X_{H_2O,WGS}^2+\left(\frac{\tilde{y}_{CO_2,DR}}{\tilde{y}_{H_2O,DR}}+\frac{\tilde{y}_{H_2,DR}}{\tilde{y}_{H_2O,DR}}+K(T)\cdot\left(\frac{\tilde{y}_{CO,DR}}{\tilde{y}_{H_2O,DR}}+1\right)\right)\cdot X_{H_2O,WGS}+\frac{\tilde{y}_{CO_2,DR}\cdot\tilde{y}_{H_2,DR}}{\tilde{y}_{H_2O,DR}^2}-K(T)\frac{\tilde{y}_{CO,DR}}{\tilde{y}_{H_2O,DR}}$
#
# (solve quadratic for $X_{H_2O,WGS} \Rightarrow \tilde{y}_{i,WGS}$)
# +
sc=2.2 # steam to carbon
X_ch4=0.2 # CH4 conversion
T=(624+273.15) # temperature in DR
nu=zeros([len(names),2]) # stoich. coefficients
nu[:5,0]=[3,-1,0,1,-1] # H2O+CH4->CO+3H2
nu[:5,1]=[1,0,1,-1,-1] # CO+H2O->CO2+H2
h,s,g,cp=therm_ig(T) # ideal gas therm. properties
delta_r_g=nu.T.dot(g) # Gibbs free energy change, both reactions
y0=zeros(len(names))
y0_ch4=1/(1+sc)
nu_ch4=nu[2-1,0]
y0[2-1]=1/(1+sc) # methane
y0[5-1]=sc/(1+sc) # steam
y_dr=(y0+y0[2-1]*nu[:,0]/-nu[2-1,0]*X_ch4)/(1+y0[2-1]*sum(nu[:,0])/-nu[2-1,0]*X_ch4) # exit composition of DR
K=exp(-delta_r_g/(R*T))
a=1-K[1]
b=y_dr[3-1]/y_dr[5-1]+y_dr[1-1]/y_dr[5-1]+K[1]*(y_dr[4-1]/y_dr[5-1]+1)
c=y_dr[1-1]*y_dr[3-1]/y_dr[5-1]**2-K[1]*y_dr[4-1]/y_dr[5-1]
r1,r2=[(-b+sqrt(b**2-4*a*c))/(2*a),(-b-sqrt(b**2-4*a*c))/(2*a)]
X_h2o_wgs=r1
y_dr_wgs=(y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)/(1+y_dr[5-1]*sum(nu[:,1])/-nu[5-1,1]*X_h2o_wgs) # exit composition of WGS after DR
print('{:10.6s}{:10.10s}{:10.10s}'.format('i','y_dr_i','y_WGS_i'))
for i,name in enumerate(names):
print('{:10.6s}{:10.5g}{:10.5g}'.format(name,y_dr[i],y_dr_wgs[i]))
print('\nX(CH_4,DR)={:0.5g}, X(H_2O,WGS)={:0.5g}'.format(X_ch4,X_h2o_wgs))
# -
f=lambda X_h2o_wgs: K[1]-prod((y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)**nu[:,1])
x=linspace(0,0.8,200)
plt.plot(x,[f(x_var) for x_var in x])
plt.xlabel('$X_{H_2O, WGS}$')
plt.ylabel(r'$K(T)-\Pi_i(y_i p/p^0)^{\nu_i}$');
[f(0.0975),f(0.99)]
bisect(lambda X_h2o_wgs: K[1]-prod((y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)**nu[:,1]),0.0975,0.99)
a=1-K[1]
b=y_dr[3-1]/y_dr[5-1]+y_dr[1-1]/y_dr[5-1]+K[1]*(y_dr[4-1]/y_dr[5-1]+1)
c=y_dr[1-1]*y_dr[3-1]/y_dr[5-1]**2-K[1]*y_dr[4-1]/y_dr[5-1]
r1,r2=[(-b+sqrt(b**2-4*a*c))/(2*a),(-b-sqrt(b**2-4*a*c))/(2*a)]
[r1,r2,f(r1),f(r2)]
# +
sc=2.2 # steam to carbon
X_ch4=0.5 # CH4 conversion
T=(624+273.15) # temperature in DR
nu=zeros([len(names),2]) # stoich. coefficients
nu[:5,0]=[3,-1,0,1,-1] # H2O+CH4->CO+3H2
nu[:5,1]=[1,0,1,-1,-1] # CO+H2O->CO2+H2
h,s,g,cp=therm_ig(T) # ideal gas therm. properties
delta_r_g=nu.T.dot(g) # Gibbs free energy change, both reactions
y0=zeros(len(names))
y0_ch4=1/(1+sc)
nu_ch4=nu[2-1,0]
y0[2-1]=1/(1+sc) # methane
y0[5-1]=sc/(1+sc) # steam
y_dr=(y0+y0[2-1]*nu[:,0]/-nu[2-1,0]*X_ch4)/(1+y0[2-1]*sum(nu[:,0])/-nu[2-1,0]*X_ch4) # exit composition of DR
K=exp(-delta_r_g/(R*T))
a=1-K[1]
b=y_dr[3-1]/y_dr[5-1]+y_dr[1-1]/y_dr[5-1]+K[1]*(y_dr[4-1]/y_dr[5-1]+1)
c=y_dr[1-1]*y_dr[3-1]/y_dr[5-1]**2-K[1]*y_dr[4-1]/y_dr[5-1]
r1,r2=[(-b+sqrt(b**2-4*a*c))/(2*a),(-b-sqrt(b**2-4*a*c))/(2*a)]
X_h2o_wgs=r1
y_dr_wgs=(y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)/(1+y_dr[5-1]*sum(nu[:,1])/-nu[5-1,1]*X_h2o_wgs) # exit composition of WGS after DR
print('{:10.6s}{:10.10s}{:10.10s}'.format('i','y_dr_i','y_WGS_i'))
for i,name in enumerate(names):
print('{:10.6s}{:10.5g}{:10.5g}'.format(name,y_dr[i],y_dr_wgs[i]))
print('\nX(CH_4,DR)={:0.5g}, X(H_2O,WGS)={:0.5g}'.format(X_ch4,X_h2o_wgs))
# +
sc=2.2 # steam to carbon
X_ch4=0.8 # CH4 conversion
T=(624+273.15) # temperature in DR
nu=zeros([len(names),2]) # stoich. coefficients
nu[:5,0]=[3,-1,0,1,-1] # H2O+CH4->CO+3H2
nu[:5,1]=[1,0,1,-1,-1] # CO+H2O->CO2+H2
h,s,g,cp=therm_ig(T) # ideal gas therm. properties
delta_r_g=nu.T.dot(g) # Gibbs free energy change, both reactions
y0=zeros(len(names))
y0_ch4=1/(1+sc)
nu_ch4=nu[2-1,0]
y0[2-1]=1/(1+sc) # methane
y0[5-1]=sc/(1+sc) # steam
y_dr=(y0+y0[2-1]*nu[:,0]/-nu[2-1,0]*X_ch4)/(1+y0[2-1]*sum(nu[:,0])/-nu[2-1,0]*X_ch4) # exit composition of DR
K=exp(-delta_r_g/(R*T))
a=1-K[1]
b=y_dr[3-1]/y_dr[5-1]+y_dr[1-1]/y_dr[5-1]+K[1]*(y_dr[4-1]/y_dr[5-1]+1)
c=y_dr[1-1]*y_dr[3-1]/y_dr[5-1]**2-K[1]*y_dr[4-1]/y_dr[5-1]
r1,r2=[(-b+sqrt(b**2-4*a*c))/(2*a),(-b-sqrt(b**2-4*a*c))/(2*a)]
X_h2o_wgs=r1
y_dr_wgs=(y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)/(1+y_dr[5-1]*sum(nu[:,1])/-nu[5-1,1]*X_h2o_wgs) # exit composition of WGS after DR
print('{:10.6s}{:10.10s}{:10.10s}'.format('i','y_dr_i','y_WGS_i'))
for i,name in enumerate(names):
print('{:10.6s}{:10.5g}{:10.5g}'.format(name,y_dr[i],y_dr_wgs[i]))
print('\nX(CH_4,DR)={:0.5g}, X(H_2O,WGS)={:0.5g}'.format(X_ch4,X_h2o_wgs))
# -
# ## Known conversion in steam reforming and water-gas shift
#
# Determine $T_{DR}$ for known conversions of both reactions, assuming WGS is in equilibrium.
#
# Parameters:
# * $X_{CH_4}=0.5$
# * $X_{H_2O,WGS}=0.2148$
# * $S/C=2.2$
def f(T,K):
h,s,g,cp=therm_ig(T) # ideal gas therm. properties
delta_r_g=nu[:,1].T.dot(g) # Gibbs free energy change, both reactions
return K-exp(-delta_r_g/(R*T))
# +
sc=2.2 # steam to carbon
X_ch4=0.5 # CH4 conversion
nu=zeros([len(names),2]) # stoich. coefficients
nu[:5,0]=[3,-1,0,1,-1] # H2O+CH4->CO+3H2
nu[:5,1]=[1,0,1,-1,-1] # CO+H2O->CO2+H2
y0=zeros(len(names))
y0_ch4=1/(1+sc)
nu_ch4=nu[2-1,0]
y0[2-1]=1/(1+sc) # methane
y0[5-1]=sc/(1+sc) # steam
y_dr=(y0+y0[2-1]*nu[:,0]/-nu[2-1,0]*X_ch4)/(1+y0[2-1]*sum(nu[:,0])/-nu[2-1,0]*X_ch4) # exit composition of DR
X_h2o_wgs=0.08694375/y_dr[5-1]
K=prod((y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)**nu[:,1])
T_wgs=-34783/8.3145/(-31.762/8.3145-log(K))-273.15
print('\nX(CH_4,DR)={:0.5g}, X(H_2O,WGS)={:0.5g}, T(WGS)={:0.5g} °C'.format(X_ch4,X_h2o_wgs,T_wgs))
y_dr_wgs=(y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)/(1+y_dr[5-1]*sum(nu[:,1])/-nu[5-1,1]*X_h2o_wgs) # exit composition of WGS after DR
print('{:10.6s}{:10.10s}{:10.10s}'.format('i','y_dr_i','y_WGS_i'))
for i,name in enumerate(names):
print('{:10.6s}{:10.5g}{:10.5g}'.format(name,y_dr[i],y_dr_wgs[i]))
# -
-34783/8.3145/(-31.762/8.3145-log(K))-273.15
K
def g(T):
h,s,g,cp=therm_ig(T) # ideal gas therm. properties
return s
x=linspace(500,800,200)+273.15
y=[g(x_var)[0:3] for x_var in x]
plt.plot(x,y)
K
x=linspace(500,800,200)+273.15
y=[K-f(x_var,K) for x_var in x]
plt.plot(x-273.15,y,'o')
plt.plot(x-273.15,-(-34783+31.762*x)/(R*x))
# +
T=linspace(298.15,860+273.15,30) # K
# 726,85 °C = 1000 K
DeltaG=(-395.886+0-(-192.590-200.275))*1000 # J/mol
DeltaH=(-394.623+0-(-247.857-111.983))*1000 # J/mol
DeltaS=(DeltaH-DeltaG)/(726.85+273.15) # J/mol/K
plt.semilogy(1/T,exp(-DeltaH/(R*T)+DeltaS/R),label=r'$K=e^{-\Delta_R H^\circ_{726.85°C}/(R T)+\Delta_R S^\circ_{726.85°C}/R}$') # ref. 800°C
def g(T):
h,s,g,cp=therm_ig(T) # ideal gas therm. properties
return nu[:,1].dot(g)
y=array([g(x_var) for x_var in T])
plt.semilogy(1/T,exp(-y/(R*T)),label=r'$polynomial$') # ref. 800°C
plt.legend();
# +
sc=2.2 # steam to carbon
X_ch4=0.8 # CH4 conversion
nu=zeros([len(names),2]) # stoich. coefficients
nu[:5,0]=[3,-1,0,1,-1] # H2O+CH4->CO+3H2
nu[:5,1]=[1,0,1,-1,-1] # CO+H2O->CO2+H2
y0=zeros(len(names))
y0_ch4=1/(1+sc)
nu_ch4=nu[2-1,0]
y0[2-1]=1/(1+sc) # methane
y0[5-1]=sc/(1+sc) # steam
y_dr=(y0+y0[2-1]*nu[:,0]/-nu[2-1,0]*X_ch4)/(1+y0[2-1]*sum(nu[:,0])/-nu[2-1,0]*X_ch4) # exit composition of DR
X_h2o_wgs=0.07455594/y_dr[5-1]
K=prod((y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)**nu[:,1])
T_wgs=-34783/8.3145/(-31.762/8.3145-log(K))-273.15
(-394.623+0-(-247.857-111.983))*1000
print('\nX(CH_4,DR)={:0.5g}, X(H_2O,WGS)={:0.5g}, T(WGS)={:0.5g} °C'.format(X_ch4,X_h2o_wgs,T_wgs))
y_dr_wgs=(y_dr+y_dr[5-1]*nu[:,1]/-nu[5-1,1]*X_h2o_wgs)/(1+y_dr[5-1]*sum(nu[:,1])/-nu[5-1,1]*X_h2o_wgs) # exit composition of WGS after DR
print('{:10.6s}{:10.10s}{:10.10s}'.format('i','y_dr_i','y_WGS_i'))
for i,name in enumerate(names):
print('{:10.6s}{:10.5g}{:10.5g}'.format(name,y_dr[i],y_dr_wgs[i]))
# -
K
prod(y_dr_wgs**nu[:,1])
|
DR_WGS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The following example shows you how to display the difference between two observations of each groups in a horizontal lollipop plot using the `hlines()` and the `scatter()` functions.
# +
# libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Create a dataframe
value1=np.random.uniform(size=20)
value2=value1+np.random.uniform(size=20)/4
df = pd.DataFrame({'group':list(map(chr, range(65, 85))), 'value1':value1 , 'value2':value2 })
# Reorder it following the values of the first value:
ordered_df = df.sort_values(by='value1')
my_range=range(1,len(df.index)+1)
# The horizontal plot is made using the hline function
plt.hlines(y=my_range, xmin=ordered_df['value1'], xmax=ordered_df['value2'], color='grey', alpha=0.4)
plt.scatter(ordered_df['value1'], my_range, color='skyblue', alpha=1, label='value1')
plt.scatter(ordered_df['value2'], my_range, color='green', alpha=0.4 , label='value2')
plt.legend()
# Add title and axis names
plt.yticks(my_range, ordered_df['group'])
plt.title("Comparison of the value 1 and the value 2", loc='left')
plt.xlabel('Value of the variables')
plt.ylabel('Group')
# Show the graph
plt.show()
|
src/notebooks/184-lollipop-plot-with-2-groups.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Example: CanvasXpress meter Chart No. 5
#
# This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
#
# https://www.canvasxpress.org/examples/meter-5.html
#
# This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
#
# Everything required for the chart to render is included in the code below. Simply run the code block.
# +
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="meter5",
data={
"y": {
"vars": [
"Performance"
],
"smps": [
"January"
],
"data": [
[
85
]
]
}
},
config={
"graphType": "Meter",
"meterType": "horizontal"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="meter_5.html")
|
tutorials/notebook/cx_site_chart_examples/meter_5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import HTML
# Cell visibility - COMPLETE:
#tag = HTML('''<style>
#div.input {
# display:none;
#}
#</style>''')
#display(tag)
#Cell visibility - TOGGLE:
tag = HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide()
} else {
$('div.input').show()
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<p style="text-align:right">
Toggle cell visibility <a href="javascript:code_toggle()">here</a>.</p>''')
display(tag)
# -
# ## Hitra Fourierjeva transformacija
#
# Hitra Fourierjeva transformacija (ang. *Fast-Fourier Transform*; FFT) je algoritem s katerim izračunamo diskretno Fourierjevo transformacijo ali njen inverz. Pri Fourierjevi analizi preslikamo signal iz njegove izvorne domene (običajno časovne ali prostorske) v frekvenčno domeno in obratno.
#
# Ta primer grafično prikazuje transformacijo signala iz časovne v frekvenčno domeno. V prvem koraku najprej z uporabo spustnega menija izberete tri vhodne funkcije in z uporabo drsnikov določite njihove lastnost. Izbrane funkcije so izrisane na prvem grafu, njihova vsota pa na drugem grafu. Na zadnjem grafu je prikazan rezultat FFT. Dodatno lahko opazujete vpliv šuma na končni rezultat.
#
# V analizo se lahko vključi naslednje funkcije:
# * sinusni val,
# * kosinusni val,
# * dušeni val,
# * rampa,
# * skočna funkcija.
#
# <!-- A fast Fourier transform (FFT) is an algorithm that computes the discrete Fourier transform (DFT) of a sequence, or its inverse (IDFT). Fourier analysis converts a signal from its original domain (often time or space) to a representation in the frequency domain and vice versa.
#
# In this example you can inspect (graphically) transforming signals from time-domain to frequency-domain. You are able to set the properties of three signals, by determining corresponding parameters for selected functions. These functions will be automatically visualized in first two plots - as three distinct signals, and as a combined one. The third plot, showed at the very bottom, represents the FFT output. In addition, you can inspect the effect of adding noise into consideration.
#
# The following functions can be used and combined together in this example:
# * Sine wave
# * Cosine wave
# * Damped wave
# * Ramp function
# * Step function -->
# +
# %matplotlib inline
# #%config InlineBackend.close_figures=False
from ipywidgets import interactive
from ipywidgets import widgets
from IPython.display import Latex, display, Markdown # For displaying Markdown and LaTeX code
import matplotlib.pyplot as plt
import numpy as np
import math
import matplotlib.patches as mpatches
from IPython.display import HTML, clear_output
from IPython.display import display
from IPython.display import HTML
from ipywidgets import interactive, interactive_output, VBox, HBox
from ipywidgets import widgets
from scipy import pi
from scipy.fftpack import fft
from scipy import signal
### SLIDER WIDGETS
# Sine widgets
slider_a_1 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_f_1 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_p_1 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Cosine widgets
slider_acos_1 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_fcos_1 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_pcos_1 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Damping widgets
slider_adamp_1 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_fdamp_1 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_pdamp_1 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
slider_d_1 = widgets.FloatSlider(description='spust', min=0., max=3., step=0.2, continuous_update=False)
# Sine widgets
slider_a_2 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_f_2 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_p_2 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Cosine widgets
slider_acos_2 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_fcos_2 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_pcos_2 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Damping widgets
slider_adamp_2 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_fdamp_2 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_pdamp_2 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
slider_d_2 = widgets.FloatSlider(description='spust', min=0., max=3., step=0.2, continuous_update=False)
# Sine widgets
slider_a_3 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_f_3 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_p_3 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Cosine widgets
slider_acos_3 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_fcos_3 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_pcos_3 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Damping widgets
slider_adamp_3 = widgets.FloatSlider(description='amplituda', min=0., max=4., step=0.25, continuous_update=False)
slider_fdamp_3 = widgets.FloatSlider(description='frekvenca', min=0., max=30., step=0.5, continuous_update=False)
slider_pdamp_3 = widgets.FloatSlider(description='fazni zamik', min=-10.0, max=10.0, step=0.5, continuous_update=False)
slider_d_3 = widgets.FloatSlider(description='spust', min=0., max=3., step=0.2, continuous_update=False)
# Ramp widgets
slider_aramp_1 = widgets.FloatSlider(description='a', value = 0.0, min=0.0, max=2., step=0.25, continuous_update=False)
slider_aramp_2 = widgets.FloatSlider(description='a', value = 0.0, min=0.0, max=2., step=0.25, continuous_update=False)
slider_aramp_3 = widgets.FloatSlider(description='a', value = 0.0, min=0.0, max=2., step=0.25, continuous_update=False)
# Step widgets
slider_astep_1 = widgets.FloatSlider(description='a', value = 0., min=0, max=2, step=0.1, continuous_update=False)
slider_bstep_1 = widgets.FloatSlider(description='b', value = 1, min=0, max=4., step=0.25, continuous_update=False)
# Step widgets
slider_astep_2 = widgets.FloatSlider(description='a', value = 0., min=0, max=2, step=0.1, continuous_update=False)
slider_bstep_2 = widgets.FloatSlider(description='b', value = 1, min=0, max=4., step=0.25, continuous_update=False)
# Step widgets
slider_astep_3 = widgets.FloatSlider(description='a', value = 0., min=0, max=2, step=0.1, continuous_update=False)
slider_bstep_3 = widgets.FloatSlider(description='b', value = 1, min=0, max=4., step=0.25, continuous_update=False)
# Parameters
sample_rate = 1024
N = (2 - 0) * sample_rate
time = np.linspace(0, 2, N)
noise = np.random.normal (0, 0.5, N)
frequency = np.linspace (0.0, 512, int (N/2))
waves = [0, 0, 0]
# +
# Layouts
fun1_layout = widgets.Layout(border='solid blue', width = '33%', height = '400', padding='1px')
fun2_layout = widgets.Layout(border='solid green', width = '33%', height = '400', padding='1px')
fun3_layout = widgets.Layout(border='solid red', width = '33%', height = '400', padding='1px')
outputs_layout = widgets.Layout(border='solid black', width = '100%', height = '200', padding='5px')
# Dropdown widgets
dd_fun1 = widgets.Dropdown(
options=['sinusni val', 'kosinusni val', 'dušeni val', 'rampa', 'skočna funkcija'],
value='sinusni val',
description='Izberi funkcijo:',
disabled=False,
style = {'description_width': 'initial'},
)
dd_fun2 = widgets.Dropdown(
options=['sinusni val', 'kosinusni val', 'dušeni val', 'rampa', 'skočna funkcija'],
value='sinusni val',
description='Izberi funkcijo:',
disabled=False,
style = {'description_width': 'initial'},
)
dd_fun3 = widgets.Dropdown(
options=['sinusni val', 'kosinusni val', 'dušeni val', 'rampa', 'skočna funkcija'],
value='sinusni val',
description='Izberi funkcijo:',
disabled=False,
style = {'description_width': 'initial'},
)
# Ploting
def plot_everything():
global waves
wave1 = np.array(waves[0])
wave2 = np.array(waves[1])
wave3 = np.array(waves[2])
output_time1.clear_output(wait=True)
output_time2.clear_output(wait=True)
output_fft.clear_output(wait=True)
# Plot 1: single functions
with output_time1:
#output_time1.clear_output()
plt.figure(figsize=(10,5))
plt.ylim(-5, 5)
plt.plot (time, wave1, label="Signal 1", color="b")
plt.plot (time, wave2, label="Signal 2", color="g")
plt.plot (time, wave3, label="Signal 3", color="r")
plt.title('Časovna domena: izbrani signali')
plt.xlabel ('čas [t]')
plt.ylabel ('amplituda')
plt.grid(True)
plt.axhline(y=0,lw=0.8,color='k')
plt.axvline(x=0,lw=0.8,color='k')
plt.legend(loc="upper right")
plt.show()
# Plot 2: combined wave
add_noise = noise_widget.value
if add_noise == True:
time_data = wave1 + wave2 + wave3 + noise
else:
time_data = wave1 + wave2 + wave3
with output_time2:
#output_time2.clear_output()
plt.figure(figsize=(10,5))
plt.ylim(-5, 5)
plt.plot(time, time_data)
plt.title('Časovna domena: vsota izbranih signalov')
plt.xlabel('čas [t]')
plt.ylabel('amplituda')
plt.grid(True)
plt.axhline(y=0,lw=0.8,color='k')
plt.axvline(x=0,lw=0.8,color='k')
plt.show()
display(noise_widget)
# Plot 3: FFT
freq_data = fft(time_data)
y = 2/N * np.abs (freq_data [0:np.int (N/2)])
with output_fft:
#output_fft.clear_output()
plt.figure(figsize=(10,5))
plt.ylim(0, 5)
plt.xlim(0, 40)
plt.plot(frequency, y)
plt.title('Frekvenčna domena')
plt.xlabel('frekvenca [Hz]')
plt.ylabel('magnituda')
plt.grid(True)
plt.axhline(y=0,lw=0.8,color='k')
plt.axvline(x=0,lw=0.8,color='k')
plt.show()
# Initial view
def first():
global waves
frequency = 0
phase = 0
A = 0
function1 = [A * np.sin(2 * pi * x * frequency + phase) for x in time]
function2 = [A * np.sin(2 * pi * x * frequency + phase) for x in time]
function3 = [A * np.sin(2 * pi * x * frequency + phase) for x in time]
waves = [function1, function2, function3]
with output_fun1:
display(slider_a_1, slider_f_1, slider_p_1)
with output_fun2:
display(slider_a_2, slider_f_2, slider_p_2)
with output_fun3:
display(slider_a_3, slider_f_3, slider_p_3)
f_sine(1, slider_a_1.value, slider_f_1.value, slider_p_1.value)
#Function data
def f_sine(caller, A, frequency, phase):
global waves
function1 = [A * np.sin(2 * pi * x *frequency + phase) for x in time]
if caller == 1:
waves[0] = function1
if caller == 2:
waves[1] = function1
if caller == 3:
waves[2] = function1
plot_everything()
def f_cos(caller, A, frequency, phase):
global waves
function2 = [A * np.cos(2 * pi * x * frequency + phase) for x in time]
if caller == 1:
waves[0] = function2
if caller == 2:
waves[1] = function2
if caller == 3:
waves[2] = function2
plot_everything()
def f_dwave(caller, A, frequency, phase, decay):
global waves
function3 = [A * math.exp(-decay * x) *(np.cos(2 * pi * x * frequency + phase)) for x in time]
if caller == 1:
waves[0] = function3
if caller == 2:
waves[1] = function3
if caller == 3:
waves[2] = function3
plot_everything()
def f_ramp(caller, A):
global waves
step = lambda x, a: x - a if x > a else 0
function4 = [step(t, A) for t in time]
if caller == 1:
waves[0] = function4
if caller == 2:
waves[1] = function4
if caller == 3:
waves[2] = function4
plot_everything()
def f_step(caller, a, b):
global waves
step = lambda x, a, b: b if x > a else 0
function5 = [step(t, a, b) for t in time]
if caller == 1:
waves[0] = function5
if caller == 2:
waves[1] = function5
if caller == 3:
waves[2] = function5
plot_everything()
# slider observers
def slider_change(change, sender):
if sender == 'sin_a_1':
f_sine(1, change['new'], slider_f_1.value, slider_p_1.value)
if sender == 'sin_f_1':
f_sine(1, slider_a_1.value, change['new'], slider_p_1.value)
if sender == 'sin_p_1':
f_sine(1, slider_a_1.value, slider_f_1.value, change['new'])
if sender == 'cos_a_1':
f_cos(1, change['new'], slider_fcos_1.value, slider_pcos_1.value)
if sender == 'cos_f_1':
f_cos(1, slider_acos_1.value, change['new'], slider_pcos_1.value)
if sender == 'cos_p_1':
f_cos(1, slider_acos_1.value, slider_fcos_1.value, change['new'])
if sender == 'damp_a_1':
f_dwave(1, change['new'], slider_fdamp_1.value, slider_pdamp_1.value, slider_d_1.value)
if sender == 'damp_f_1':
f_dwave(1, slider_adamp_1.value, change['new'], slider_pdamp_1.value, slider_d_1.value)
if sender == 'damp_p_1':
f_dwave(1, slider_adamp_1.value, slider_fdamp_1.value, change['new'], slider_d_1.value)
if sender == 'damp_d_1':
f_dwave(1, slider_adamp_1.value, slider_fdamp_1.value, slider_pdamp_1.value, change['new'])
if sender == 'ramp_a_1':
f_ramp(1, change['new'])
if sender == 'step_a_1':
f_step(1, change['new'], slider_bstep_1.value)
if sender == 'step_b_1':
f_step(1, slider_astep_1.value, change['new'])
###
if sender == 'sin_a_2':
f_sine(2, change['new'], slider_f_2.value, slider_p_2.value)
if sender == 'sin_f_2':
f_sine(2, slider_a_2.value, change['new'], slider_p_2.value)
if sender == 'sin_p_2':
f_sine(2, slider_a_2.value, slider_f_2.value, change['new'])
if sender == 'cos_a_2':
f_cos(2, change['new'], slider_fcos_2.value, slider_pcos_2.value)
if sender == 'cos_f_2':
f_cos(2, slider_acos_2.value, change['new'], slider_pcos_2.value)
if sender == 'cos_p_2':
f_cos(2, slider_acos_2.value, slider_fcos_2.value, change['new'])
if sender == 'damp_a_2':
f_dwave(2, change['new'], slider_fdamp_2.value, slider_pdamp_2.value, slider_d_2.value)
if sender == 'damp_f_2':
f_dwave(2, slider_adamp_2.value, change['new'], slider_pdamp_2.value, slider_d_2.value)
if sender == 'damp_p_2':
f_dwave(2, slider_adamp_2.value, slider_fdamp_2.value, change['new'], slider_d_2.value)
if sender == 'damp_d_2':
f_dwave(2, slider_adamp_2.value, slider_fdamp_2.value, slider_pdamp_2.value, change['new'])
if sender == 'ramp_a_2':
f_ramp(2, change['new'])
if sender == 'step_a_2':
f_step(2, change['new'], slider_bstep_2.value)
if sender == 'step_b_2':
f_step(2, slider_astep_2.value, change['new'])
###
if sender == 'sin_a_3':
f_sine(3, change['new'], slider_f_3.value, slider_p_3.value)
if sender == 'sin_f_3':
f_sine(3, slider_a_3.value, change['new'], slider_p_3.value)
if sender == 'sin_p_3':
f_sine(3, slider_a_3.value, slider_f_3.value, change['new'])
if sender == 'cos_a_3':
f_cos(3, change['new'], slider_fcos_3.value, slider_pcos_3.value)
if sender == 'cos_f_3':
f_cos(3, slider_acos_3.value, change['new'], slider_pcos_3.value)
if sender == 'cos_p_3':
f_cos(3, slider_acos_3.value, slider_fcos_3.value, change['new'])
if sender == 'damp_a_3':
f_dwave(3, change['new'], slider_fdamp_3.value, slider_pdamp_3.value, slider_d_3.value)
if sender == 'damp_f_3':
f_dwave(3, slider_adamp_3.value, change['new'], slider_pdamp_3.value, slider_d_3.value)
if sender == 'damp_p_3':
f_dwave(3, slider_adamp_3.value, slider_fdamp_3.value, change['new'], slider_d_3.value)
if sender == 'damp_d_3':
f_dwave(3, slider_adamp_3.value, slider_fdamp_3.value, slider_pdamp_3.value, change['new'])
if sender == 'ramp_a_3':
f_ramp(3, change['new'])
if sender == 'step_a_3':
f_step(3, change['new'], slider_bstep_3.value)
if sender == 'step_b_3':
f_step(3, slider_astep_3.value, change['new'])
slider_a_1.observe(lambda change: slider_change(change, 'sin_a_1'), names='value')
slider_f_1.observe(lambda change: slider_change(change, 'sin_f_1'), names='value')
slider_p_1.observe(lambda change: slider_change(change, 'sin_p_1'), names='value')
slider_acos_1.observe(lambda change: slider_change(change, 'cos_a_1'), names='value')
slider_fcos_1.observe(lambda change: slider_change(change, 'cos_f_1'), names='value')
slider_pcos_1.observe(lambda change: slider_change(change, 'cos_p_1'), names='value')
slider_adamp_1.observe(lambda change: slider_change(change, 'damp_a_1'), names='value')
slider_fdamp_1.observe(lambda change: slider_change(change, 'damp_f_1'), names='value')
slider_pdamp_1.observe(lambda change: slider_change(change, 'damp_p_1'), names='value')
slider_d_1.observe(lambda change: slider_change(change, 'damp_d_1'), names='value')
slider_aramp_1.observe(lambda change: slider_change(change, 'ramp_a_1'), names='value')
slider_astep_1.observe(lambda change: slider_change(change, 'step_a_1'), names='value')
slider_bstep_1.observe(lambda change: slider_change(change, 'step_b_1'), names='value')
###
slider_a_2.observe(lambda change: slider_change(change, 'sin_a_2'), names='value')
slider_f_2.observe(lambda change: slider_change(change, 'sin_f_2'), names='value')
slider_p_2.observe(lambda change: slider_change(change, 'sin_p_2'), names='value')
slider_acos_2.observe(lambda change: slider_change(change, 'cos_a_2'), names='value')
slider_fcos_2.observe(lambda change: slider_change(change, 'cos_f_2'), names='value')
slider_pcos_2.observe(lambda change: slider_change(change, 'cos_p_2'), names='value')
slider_adamp_2.observe(lambda change: slider_change(change, 'damp_a_2'), names='value')
slider_fdamp_2.observe(lambda change: slider_change(change, 'damp_f_2'), names='value')
slider_pdamp_2.observe(lambda change: slider_change(change, 'damp_p_2'), names='value')
slider_d_2.observe(lambda change: slider_change(change, 'damp_d_2'), names='value')
slider_aramp_2.observe(lambda change: slider_change(change, 'ramp_a_2'), names='value')
slider_astep_2.observe(lambda change: slider_change(change, 'step_a_2'), names='value')
slider_bstep_2.observe(lambda change: slider_change(change, 'step_b_2'), names='value')
###
slider_a_3.observe(lambda change: slider_change(change, 'sin_a_3'), names='value')
slider_f_3.observe(lambda change: slider_change(change, 'sin_f_3'), names='value')
slider_p_3.observe(lambda change: slider_change(change, 'sin_p_3'), names='value')
slider_acos_3.observe(lambda change: slider_change(change, 'cos_a_3'), names='value')
slider_fcos_3.observe(lambda change: slider_change(change, 'cos_f_3'), names='value')
slider_pcos_3.observe(lambda change: slider_change(change, 'cos_p_3'), names='value')
slider_adamp_3.observe(lambda change: slider_change(change, 'damp_a_3'), names='value')
slider_fdamp_3.observe(lambda change: slider_change(change, 'damp_f_3'), names='value')
slider_pdamp_3.observe(lambda change: slider_change(change, 'damp_p_3'), names='value')
slider_d_3.observe(lambda change: slider_change(change, 'damp_d_3'), names='value')
slider_aramp_3.observe(lambda change: slider_change(change, 'ramp_a_3'), names='value')
slider_astep_3.observe(lambda change: slider_change(change, 'step_a_3'), names='value')
slider_bstep_3.observe(lambda change: slider_change(change, 'step_b_3'), names='value')
# dropdown(1) selection change
def fun1_dropdown(change):
if (dd_fun1.value == 'sinusni val'):
with output_fun1:
output_fun1.clear_output(wait=True)
display(slider_a_1, slider_f_1, slider_p_1)
f_sine(1, slider_a_1.value, slider_f_1.value, slider_p_1.value)
if (dd_fun1.value == 'kosinusni val'):
with output_fun1:
output_fun1.clear_output(wait=True)
display(slider_acos_1, slider_fcos_1, slider_pcos_1)
f_cos(1, slider_acos_1.value, slider_fcos_1.value, slider_pcos_1.value)
if (dd_fun1.value == 'dušeni val'):
with output_fun1:
output_fun1.clear_output(wait=True)
display(slider_adamp_1, slider_fdamp_1, slider_pdamp_1, slider_d_1)
f_dwave(1, slider_adamp_1.value, slider_fdamp_1.value, slider_pdamp_1.value, slider_d_1.value)
if (dd_fun1.value == 'rampa'):
with output_fun1:
output_fun1.clear_output(wait=True)
display(slider_aramp_1)
f_ramp(1, slider_aramp_1.value)
if (dd_fun1.value == 'skočna funkcija'):
with output_fun1:
output_fun1.clear_output(wait=True)
display(slider_astep_1, slider_bstep_1)
f_step(1, slider_astep_1.value, slider_bstep_1.value)
# dropdown(2) selection change
def fun2_dropdown(change):
if (dd_fun2.value == 'sinusni val'):
with output_fun2:
output_fun2.clear_output(wait=True)
display(slider_a_2, slider_f_2, slider_p_2)
f_sine(2, slider_a_2.value, slider_f_2.value, slider_p_2.value)
if (dd_fun2.value == 'kosinusni val'):
with output_fun2:
output_fun2.clear_output(wait=True)
display(slider_acos_2, slider_fcos_2, slider_pcos_2)
f_cos(2, slider_acos_2.value, slider_fcos_2.value, slider_pcos_2.value)
if (dd_fun2.value == 'dušeni val'):
with output_fun2:
output_fun2.clear_output(wait=True)
display(slider_adamp_2, slider_fdamp_2, slider_pdamp_2, slider_d_2)
f_dwave(2, slider_adamp_2.value, slider_fdamp_2.value, slider_pdamp_2.value, slider_d_2.value)
if (dd_fun2.value == 'rampa'):
with output_fun2:
output_fun2.clear_output(wait=True)
display(slider_aramp_2)
f_ramp(2, slider_aramp_2.value)
if (dd_fun2.value == 'skočna funkcija'):
with output_fun2:
output_fun2.clear_output(wait=True)
display(slider_astep_2, slider_bstep_2)
f_step(2, slider_astep_2.value, slider_bstep_2.value)
# dropdown(3) selection change
def fun3_dropdown(change):
if (dd_fun3.value == 'sinusni val'):
with output_fun3:
output_fun3.clear_output(wait=True)
display(slider_a_3, slider_f_3, slider_p_3)
f_sine(3, slider_a_3.value, slider_f_3.value, slider_p_3.value)
if (dd_fun3.value == 'kosinusni val'):
with output_fun3:
output_fun3.clear_output(wait=True)
display(slider_acos_3, slider_fcos_3, slider_pcos_3)
f_cos(3, slider_acos_3.value, slider_fcos_3.value, slider_pcos_3.value)
if (dd_fun3.value == 'dušeni val'):
with output_fun3:
output_fun3.clear_output(wait=True)
display(slider_adamp_3, slider_fdamp_3, slider_pdamp_3, slider_d_3)
f_dwave(3, slider_adamp_3.value, slider_fdamp_3.value, slider_pdamp_3.value, slider_d_3.value)
if (dd_fun3.value == 'rampa'):
with output_fun3:
output_fun3.clear_output(wait=True)
display(slider_aramp_3)
f_ramp(3, slider_aramp_3.value)
if (dd_fun3.value == 'skočna funkcija'):
with output_fun3:
output_fun3.clear_output(wait=True)
display(slider_astep_3, slider_bstep_3)
f_step(3, slider_astep_3.value, slider_bstep_3.value)
# dropdown observers
dd_fun1.observe(fun1_dropdown, names = 'value')
dd_fun2.observe(fun2_dropdown, names = 'value')
dd_fun3.observe(fun3_dropdown, names = 'value')
# checkbox widget
def trigger(b):
plot_everything()
noise_widget = widgets.Checkbox(False, description='Dodaj šum')
noise_widget.observe(trigger)
# output layout
output_fun1 = widgets.Output()
output_fun2 = widgets.Output()
output_fun3 = widgets.Output()
output_time1 = widgets.Output(layout = outputs_layout)
output_time2 = widgets.Output(layout = outputs_layout)
output_fft = widgets.Output(layout = outputs_layout)
box1 = widgets.VBox([dd_fun1, output_fun1], layout = fun1_layout)
box2 = widgets.VBox([dd_fun2, output_fun2], layout = fun2_layout)
box3 = widgets.VBox([dd_fun3, output_fun3], layout = fun3_layout)
panel_box = widgets.HBox([box1, widgets.Label(" "), box2, widgets.Label(" "), box3])
display(panel_box, output_time1, output_time2, output_fft)
first()
# -
|
ICCT_si/examples/01/.ipynb_checkpoints/M-08_FFT-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## RF
# import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier
from rsnautils import *
pkls = L(path_pred.glob('*_tta.pkl'))
fn = pkls[0]
def read_preds(fn):
preds,targs = fn.load()
df = pd.DataFrame(preds.numpy())
df.columns = [':'.join([fn.stem[:-4], c]) for c in htypes]
return df
get_data = get_rsna_data_func(1, nw=8)
dbch = get_data(128, None, use_wgt=False)
pred_dfs = pkls.map(read_preds)
Xdf = pd.concat(pred_dfs, axis=1)
def split_data(df):
idx = L.range(df)
mask = df.index.isin(set(val_sops))
return idx[~mask],idx[mask]
preds,targs = fn.load()
t = targs.numpy()[:-2]
splits = split_data(df_comb)
y_valid = df_comb.iloc[splits[1]][htypes]
np.equal(t, y_valid.values).mean()
np.nonzero(t)
np.nonzero(y_valid.values)
loss = get_loss()
# +
def calc_loss(df):
ypred = logit(to_device(tensor(df.values)))[:136785]
targ = to_device(tensor(y_valid))
return loss(ypred, targ).cpu().item(), accuracy_multi(ypred, targ).cpu().item()
losses= dict(list(zip([fn.stem for fn in pkls], L(pred_dfs).map(calc_loss))))
pd.Series(losses).sort_values()
# -
valid_df = df_comb.loc[y_valid.index]
# +
set_seed(42)
val_patients = valid_df.PatientID.unique()
np.random.shuffle(val_patients)
split_idx = int(0.8*len(val_patients))
train_p, val_p = val_patients[0:split_idx], val_patients[split_idx:]
train_idx = valid_df.PatientID.isin(train_p).values
val_idx = valid_df.PatientID.isin(val_p).values
train_x = Xdf.loc[train_idx]
train_y = y_valid.loc[train_idx]
val_x = Xdf.loc[val_idx]
val_y = y_valid.loc[val_idx]
# -
labels = [l for l in htypes if l != 'any']
# +
# train label
# +
def feature_cols(df, label): return [c for c in df.columns if c.split(':')[1] == label]
def model_cols(df, model): return [c for c in df.columns if c.split(':')[0] == model]
def pull_features(df, label):
any_cols = feature_cols(df, 'any')
feat_cols = feature_cols(df, label)
return df[feat_cols+any_cols]
def pull_any_features(df, model):
any_cols = feature_cols(df, 'any')
m_cols = model_cols(df, model)
cols = list(set(any_cols+m_cols))
return df[cols]
# -
def get_data(label, x, y):
xl = pull_any_features(x, label) if label == 'any' else pull_features(x, label)
return xl, y[label]
def train_label(label, x, y):
print(f'training {label}')
clf = RandomForestClassifier(n_estimators=100, min_samples_leaf=10, max_features=0.9, n_jobs=16, oob_score=True)
xl,yl = get_data(label, x, y)
clf = clf.fit(xl,yl)
clf.feat_imp_cols = xl.columns
return clf
def train(label): return train_label(label, train_x, train_y)
rfs = { label:train(label) for label in htypes }
def predict_rfs(rfs, x, labels):
preds = []
for label in labels:
print(f'predict {label}')
xl = pull_any_features(x, label) if label == 'any' else pull_features(x, label)
ypred = rfs[label].predict_proba(xl)
preds.append(ypred[:,1])
return pd.DataFrame(np.stack(preds).T, index=x.index, columns=labels)
ypred = predict_rfs(rfs, val_x, htypes)
yp = tensor(ypred)
p,t = to_device((yp,tensor(val_y.values).float()))
loss(logit(p),t).item(), accuracy_multi(logit(p), t).item()
for label in htypes:
print('')
print(f'feat importances for {label}')
print('---------------------------------------------------')
fis = sorted(zip(map(lambda x: round(x, 4), rfs[label].feature_importances_), rfs[label].feat_imp_cols), reverse=True)
for fi in fis:
print(fi)
# +
def calc_loss_rf_valid(df):
ypred = logit(to_device(tensor(df.loc[val_idx].values)))
targ = to_device(tensor(val_y))
return loss(ypred, targ).cpu().item(), accuracy_multi(ypred, targ).cpu().item()
losses= dict(list(zip([fn.stem for fn in all_preds], L(pred_dfs).map(calc_loss_rf_valid))))
pd.Series(losses).sort_values()
# -
for label in htypes:
print('')
print(f'feat importances for {label}')
print('---------------------------------------------------')
fis = sorted(zip(map(lambda x: round(x, 4), rfs[label].feature_importances_), rfs[label].feat_imp_cols), reverse=True)
for fi in fis:
print(fi)
|
orig_files/14_xgboost.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pip install psycopg2-binary
pip install pprint
import psycopg2
import pprint
dbname = 'hvvolzee'
user = 'hvvolzee'
password = '<PASSWORD>' # Don't commit or share this for security purposes!
host = 'rajje.db.elephantsql.com' # Port should be included or default
pg_conn = psycopg2.connect(dbname=dbname, user=user,
password=password, host=host)
#print(pg_conn)
pg_curs = pg_conn.cursor()
# +
import sqlite3
import pandas as pd
df = pd.read_csv('/Users/julie/Desktop/repos/DS-Unit-3-Sprint-2-SQL-and-Databases/module2-sql-for-analysis/titanic.csv')
print(df.head())
print(df.shape)
df['Name']= df['Name'].str.replace("'","")
print(df.head())
# -
df_conn=sqlite3.connect('titanic.sqlite3')
df_curs = df_conn.cursor()
df.to_sql('titanic',df_conn,if_exists='replace')
titanic=df_curs.execute('SELECT * FROM titanic').fetchall()
print(df_curs.execute('PRAGMA table_info(titanic);').fetchall())
# +
create_titanic_table ="""
CREATE TABLE IF NOT EXISTS Titanic(
index INT,
Survived INT,
Pclass INT,
Name TEXT,
Sex Text,
Age REAL,
Siblings_Spouses_Aboard INT,
Parents_Children_Aboard INT,
Fare REAL
);"""
pg_curs.execute(create_titanic_table)
pg_conn.commit()
show_tables = """
SELECT
*
FROM
pg_catalog.pg_tables
WHERE
schemaname != 'pg_catalog'
AND schemaname != 'information_schema';
"""
pg_curs.execute(show_tables)
pg_curs.fetchall()
# -
for x in titanic:
insert_x = """
INSERT INTO Titanic
(Survived, Pclass, Name, Sex,Age, Siblings_Spouses_Aboard, Parents_Children_Aboard, Fare)
VALUES """ + str(x[1:]) + ";"
pg_curs.execute(insert_x)
# # What was the average age of each passenger class?
pg_curs.execute('SELECT avg(age), pclass FROM Titanic WHERE survived = 1 GROUP BY pclass')
rows = pg_curs.fetchall()
pprint.pprint(rows)
for row in rows:
print("avg age:", row[0], "class:", row[1])
# # How many passengers survived, and how many died?
pg_curs.execute('SELECT count(*),Survived FROM Titanic GROUP BY Survived')
num = pg_curs.fetchall()
pprint.pprint(num)
print("Total number of passengers survived :", num[0][0])
print("Total number of passengers died :", num[1][0])
# # How many passengers were in each class?
pg_curs.execute('SELECT count(*),Pclass FROM Titanic GROUP BY Pclass')
num1 = pg_curs.fetchall()
pprint.pprint(num1)
for n in num1:
print("Numbers of passengers:", n[0], "class:", n[1])
# # How many passengers survived/died within each class?
pg_curs.execute('SELECT count(*),Survived,Pclass FROM Titanic GROUP BY Survived,Pclass')
num2 = pg_curs.fetchall()
pprint.pprint(num2)
print("Total number of passengers died in class 1:", num2[1][0])
print("Total number of passengers died in class 2:", num2[2][0])
print("Total number of passengers died in class 3:", num2[0][0])
print("Total number of passengers survived in class 1:", num2[5][0])
print("Total number of passengers survived in class 2:", num2[4][0])
print("Total number of passengers survived in class 3:", num2[3][0])
# # What was the average age of survivors vs nonsurvivors?
pg_curs.execute('SELECT AVG(age),Survived FROM Titanic GROUP BY Survived')
num3 = pg_curs.fetchall()
pprint.pprint(num3)
print("Average age of passengers survived :", num3[0][0])
print("Average age of passengers died :", num3[1][0])
# # What was the average fare by passenger class? By survival?
pg_curs.execute('SELECT avg(Fare),Survived,Pclass FROM Titanic GROUP BY Survived,Pclass')
num4 = pg_curs.fetchall()
pprint.pprint(num4)
print("Average fare of passengers survived in class 1 :", num4[5][0])
print("Average fare of passengers survived in class 2 :", num4[4][0])
print("Average fare of passengers survived in class 3 :", num4[3][0])
print("Average fare of passengers died in class 1 :", num4[1][0])
print("Average fare of passengers died in class 2 :", num4[2][0])
print("Average fare of passengers died in class 3 :", num4[0][0])
# # How many siblings/spouses aboard on average, by passenger class? By survival?
pg_curs.execute('SELECT avg(Siblings_Spouses_Aboard),Survived,Pclass FROM Titanic GROUP BY Survived,Pclass')
num5 = pg_curs.fetchall()
pprint.pprint(num5)
print("Average Siblings_Spouses_Aboard survived in class 1 :", num5[5][0])
print("Average Siblings_Spouses_Aboard survived in class 2 :", num5[4][0])
print("Average Siblings_Spouses_Aboard survived in class 3 :", num5[3][0])
print("Average Siblings_Spouses_Aboard died in class 1 :", num5[1][0])
print("Average Siblings_Spouses_Aboard died in class 2 :", num5[2][0])
print("Average Siblings_Spouses_Aboard died in class 3 :", num5[0][0])
# # How many parents/children aboard on average, by passenger class? By survival?
pg_curs.execute('SELECT avg(Parents_Children_Aboard),Survived,Pclass FROM Titanic GROUP BY Survived,Pclass')
num6 = pg_curs.fetchall()
pprint.pprint(num6)
print("Average Parents_Children_Aboard survived in class 1 :", num5[5][0])
print("Average Parents_Children_Aboard survived in class 2 :", num5[4][0])
print("Average Parents_Children_Aboard survived in class 3 :", num5[3][0])
print("Average Parents_Children_Aboard died in class 1 :", num5[1][0])
print("Average Parents_Children_Aboard died in class 2 :", num5[2][0])
print("Average Parents_Children_Aboard died in class 3 :", num5[0][0])
# # Do any passengers have the same name?
pg_curs.execute('SELECT count(*) FROM (SELECT count(*) FROM Titanic GROUP BY Name having count(*) > 1) s')
num7 = pg_curs.fetchall()
pprint.pprint(num7)
print("There are", num7[0][0],"name used more than once.")
|
module4-acid-and-database-scalability-tradeoffs/Julie_DS324_PostgreSQL_Titanic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Procesamiento de los datos
#
# Limpieza y transformaciones, la salida estará lista para modelar.
# settings
import pandas as pd
from itertools import chain
# data path
path_input = "https://raw.githubusercontent.com/yoselalberto/ia_proyecto_final/main/data/celulares.csv"
path_salida = 'work/data/processed/celulares_procesados.csv'
# estos datos tienen el formato adecuado para imprimirlos en pantalla:
path_salida_formato = 'work/data/processed/celulares_formato.csv'
# more dependencies
import janitor
# corrigé un error en el formato de los valores de cada instancia
def replace_string(dataframe, string = ','):
# elimina el caracter molesto
df = dataframe.copy()
# column by column
for columna in df.columns.values:
df[columna] = df[columna].str.replace(string, '')
return df
# lowercase all dataframe
def df_lowercase(dataframe):
# lowercase all columns
df = dataframe.copy()
for columna in df.columns.values:
df[columna] = df[columna].str.lower()
return df
# coerse columns
def df_numeric(dataframe, columns):
df = dataframe.copy()
df[columns] = df[columns].apply(pd.to_numeric, errors='coerce')
return df
# agrupo las funciones anteriores
def df_clean(dataframe, string, columns_to_numeric):
df = dataframe.copy()
#
df_2 = replace_string(dataframe, string)
df_3 = df_lowercase(df_2)
df_4 = df_numeric(df_3, columns = columns_to_numeric)
return df_4
# limpieza parcial
def df_clean_parcial(dataframe, string, columns_to_numeric):
df = dataframe.copy()
#
df_2 = replace_string(dataframe, string)
df_3 = df_numeric(df_2, columns = columns_to_numeric)
return df_3
# los pasos los meto en funciones
def clean_tecnologia(dataframe):
df = dataframe.copy()
# tabla de soporte
tabla_tecnologias = pd.DataFrame(
{'tecnologia' : ['2g/3g/4g/4glte/5g', '4glte', '4g/gsm', '2g/3g/4g/4glte/gsm', '4g', '5g', '3g/4g/gsm', '4g/4glte/gsm/lte', '2g/3g/lte', '3g/lte'],
'tecnologia_mejor' : ['5g', '4glte', '4g', '4glte', '4g', '5g', '4g', '4glte', '4glte', '4glte']}
)
# sustitución
df_salida = df.merge(tabla_tecnologias, how = "left").drop(columns = {'tecnologia'}).rename(columns = {'tecnologia_mejor': 'tecnologia'})
# salida
return df_salida
# procesador
def clean_procesador(dataframe):
df = dataframe.copy()
#
df['procesador'] = df.procesador.str.split().str.get(0).str.replace('\d+', '')
# salida
return df
# clean operative systems
def clean_os(dataframe):
df = dataframe.copy()
#
df['sistema_operativo']= df.sistema_operativo.str.extract(r'(android|ios)', expand = False)
# salida
return df
# chain steps
def df_procesamiento(dataframe):
df = dataframe.copy()
# steps
df_tecnologia = clean_tecnologia(df)
df_procesador = clean_procesador(df_tecnologia)
df_os = clean_os(df_procesador)
# resultado
return df_os
df_prueba = pd.read_csv(path_input, dtype = 'str')
df_prueba.head(1)
# data loading
df_raw = pd.read_csv(path_input, dtype = 'str').clean_names()
df_raw
# renombro columnas
nombres = {"nombre_del_producto": 'producto_nombre', 'memoria_interna': 'memoria'}
df_inicio = df_raw.rename(columns = nombres)
# limpieza inicial
columns_numeric = ['peso', 'camara_trasera', 'camara_frontal', 'ram', 'memoria', 'precio']
#
df_limpio = df_clean(df_inicio, ',', columns_numeric).drop_duplicates().reset_index(drop = True)
df_limpio
# transformación de las columnas
df_procesado = df_procesamiento(df_limpio)
# salvado
df_procesado.to_csv(path_salida, index = False)
# ## Recomendación a mostrar
#
# El siguiente procesamiento le da formato al dataframe a mostrar.
# limpieza
df_limpio_parcial_inicio = df_clean_parcial(df_inicio, ',', columns_numeric).drop_duplicates().reset_index(drop = True)
df_limpio_parcial = clean_procesador(df_limpio_parcial_inicio)
# reordenamiento
df_limpio_parcial_orden = df_limpio_parcial[['producto_nombre', 'marca', 'color', 'sistema_operativo', 'memoria', 'ram', 'precio', 'camara_trasera', 'camara_frontal', 'pantalla', 'tecnologia', 'procesador', 'peso']]
# nombres
df_limpio_parcial_orden.columns = ['Nombre', 'Marca', 'Color', 'Sistema operativo', 'Memoria', 'Ram', 'Precio', 'Camara Trasera', 'Camara Frontal', 'Pantalla', 'Tecnologia', 'Procesador', 'Peso']
df_limpio_parcial_orden['Peso'] = df_limpio_parcial_orden['Peso'] * 1000
# lowercase al nombre de los productos
df_limpio_parcial_orden['producto_nombre'] = df_limpio_parcial_orden['Nombre'].str.lower()
df_limpio_parcial_orden
# salvado de los datos con el formato bonito
df_limpio_parcial_orden.to_csv(path_salida_formato, index = False)
|
data_processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9
# language: python
# name: python3.9
# ---
import os
import logging
from io import BytesIO
import time
import zipfile
import numpy as np
import boto3
from datetime import datetime, timezone
from time import gmtime, strftime
import json
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import math
# The difference between UTC and local timezone
timezone_offset = 0
# + [markdown] tags=[]
# ### Function Name List
# -
function_prefix = "Structures"
function_count = 4
function_name_list = [function_prefix+'_f'+str(i) for i in range(1, function_count+1)]
print(function_name_list)
mem_config_list={
'f1':1280,
'f2':896,
'f3':1536,
'f4':1088
}
# + [markdown] tags=[]
# # Execute the Application
# -
sfn_client = boto3.client('stepfunctions')
stateMachineArn='arn:aws:states:us-east-2:499537426559:stateMachine:Sequence'
# + [markdown] pycharm={"name": "#%% md\n"}
# The serverless application workflow can be found in README.md.
# -
# ## Test Run
sfn_client.start_execution(
stateMachineArn=stateMachineArn
)
# ## Configure Logging
logging.basicConfig(filename='AppExecution.log', encoding='utf-8', format='%(asctime)s.%(msecs)03d %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
# ## Execute Sequence
np.random.seed(256)
for i in range(5000):
response = sfn_client.start_execution(stateMachineArn=stateMachineArn)
RequestId = response.get('ResponseMetadata', {}).get('RequestId')
StatusCode = response.get('ResponseMetadata', {}).get('HTTPStatusCode', 'ERR')
logging.info(f'{i+1} {StatusCode} {RequestId}')
time.sleep(10)
# + [markdown] tags=[]
# ## Get the start time and the end time
# -
app_exeuction_start_time = ' '.join(os.popen('head -1 AppExecution.log').read().split(' ')[:2])
app_execution_end_time = ' '.join(os.popen('tail -1 AppExecution.log').read().split(' ')[:2])
app_exeuction_start_time = datetime.strptime(app_exeuction_start_time, '%Y-%m-%d %H:%M:%S.%f')
app_execution_end_time = datetime.strptime(app_execution_end_time, '%Y-%m-%d %H:%M:%S.%f')
app_exeuction_start_time
app_execution_end_time
app_exeuction_start_time = int(datetime.timestamp(app_exeuction_start_time))
app_execution_end_time = int(datetime.timestamp(app_execution_end_time)) + 20
# # Retrieve Logs
logclient = boto3.client('logs')
# ## Query Step Functions Logs
query_sfn_Sequence = logclient.start_query(
logGroupName='/aws/vendedlogs/states/{}-Logs'.format('Sequence'),
queryString="fields type, @timestamp| filter type = 'ExecutionStarted' or type = 'ExecutionSucceeded' | sort id desc",
startTime=app_exeuction_start_time,
endTime=app_execution_end_time,
limit = 10000
)
query_results_sfn_Sequence = logclient.get_query_results(
queryId=query_sfn_Sequence['queryId']
)
Sequence_starttimestamp = np.sort([datetime.timestamp(datetime.strptime(item[1]['value'], '%Y-%m-%d %H:%M:%S.%f'))+timezone_offset*3600 for item in query_results_sfn_Sequence['results'] if item[0]['value']=='ExecutionStarted'])
Sequence_endtimestamp = np.sort([datetime.timestamp(datetime.strptime(item[1]['value'], '%Y-%m-%d %H:%M:%S.%f'))+timezone_offset*3600 for item in query_results_sfn_Sequence['results'] if item[0]['value']=='ExecutionSucceeded'])
pd.DataFrame({'Start': Sequence_starttimestamp, 'End':Sequence_endtimestamp}).to_csv('Sequence_SFN_Logs.csv', index=False)
Sequence_sfn_logs = pd.read_csv('Sequence_SFN_Logs.csv', low_memory=False)
Sequence_sfn_logs.shape
# + [markdown] toc-hr-collapsed=false
# ## Query Lambda Function Logs
# -
# ### Functions for parsing Logs
def lambda_report_log_to_dict(log):
res={}
lis=[item.split(': ') for item in log[1]['value'].split('\t')]
res['RequestId']=lis[0][1]
res['Duration']=float(lis[1][1].split(' ')[0])
res['Billed_Duration']=int(lis[2][1].split(' ')[0])
res['Memory_Size']=int(lis[3][1].split(' ')[0])
res['Max_Memory_Used']=int(lis[4][1].split(' ')[0])
res['UTC_Timestamp'] = time.mktime(datetime.strptime(log[0]['value'], "%Y-%m-%d %H:%M:%S.%f").timetuple()) +timezone_offset*3600
return res
# ### Prepare Logs
query_lambda = []
for function in function_name_list:
query_lambda.append(logclient.start_query(
logGroupName='/aws/lambda/{}'.format(function),
queryString="fields @timestamp, @message| filter @message like 'REPORT'| sort @timestamp asc",
startTime=app_exeuction_start_time,
endTime=app_execution_end_time,
limit=10000
))
time.sleep(4)
time.sleep(10)
# ### Retrieve Logs
query_lambda_results = []
for q in query_lambda:
query_lambda_results.append(logclient.get_query_results(
queryId=q['queryId']
))
time.sleep(4)
with open('query_lambda_results.pickle', 'wb') as f:
f.write(pickle.dumps(query_lambda_results))
Sequence_lambda_logs_dict = {'f'+str(i):None for i in range(1, function_count+1)}
for i in range(1, function_count+1):
Sequence_lambda_logs_dict['f'+str(i)] = [lambda_report_log_to_dict(item) for item in query_lambda_results[i-1]['results']]
for item in Sequence_lambda_logs_dict['f'+str(i)]:
item['Function']='f'+str(i)
len(Sequence_lambda_logs_dict['f1'])
# #### Convert Logs into DataFrame and Save as CSV
Sequence_lambda_logs=pd.DataFrame()
for i in range(1, function_count+1):
Sequence_lambda_logs = Sequence_lambda_logs.append(pd.DataFrame(Sequence_lambda_logs_dict['f'+str(i)]))
Sequence_lambda_logs.index=range(Sequence_lambda_logs.shape[0])
Sequence_lambda_logs=Sequence_lambda_logs[['Function', 'Memory_Size', 'Max_Memory_Used', 'Duration', 'Billed_Duration', 'UTC_Timestamp', 'RequestId']]
Sequence_lambda_logs.to_csv('Sequence_lambda_logs.csv',index=False)
Sequence_lambda_logs = pd.read_csv('Sequence_lambda_logs.csv', low_memory=False)
Sequence_lambda_logs.columns = ['Function', 'Memory_Size', 'Max_Memory_Used', 'Duration', 'Billed_Duration', 'UTCTimestamp', 'RequestId']
Sequence_lambda_logs.head()
for i in range(1, function_count+1):
print(f"f{i}", Sequence_lambda_logs.query(f"Function == 'f{i}'").shape[0], Sequence_lambda_logs.query(f"Function == 'f{i}'")['Duration'].mean())
# +
def calculate_cost(rt: float, mem: float, pmms: float = 0.0000166667/1024/1000, ppi: float = 0.0000002) -> float:
return math.ceil(rt) * mem * pmms + ppi
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
# -
# # End-to-end RT Reported by AWS
Sequence_duration = pd.DataFrame((Sequence_sfn_logs['End'] - Sequence_sfn_logs['Start'])*1000, columns=['Duration'])
Sequence_duration.to_csv('Sequence_duration_aws.csv', index=False)
Sequence_duration = pd.read_csv('Sequence_duration_aws.csv', low_memory=False)
print('Number of Executions: ', len(Sequence_duration['Duration']))
Sequence_avg_duration_aws = np.mean(Sequence_duration['Duration'])
Sequence_mid_duration_aws = np.median(Sequence_duration['Duration'])
Sequence_percentile10_aws = np.percentile(Sequence_duration['Duration'], 10)
Sequence_percentile90_aws = np.percentile(Sequence_duration['Duration'], 90)
print('Average Duration Reported by AWS: ', Sequence_avg_duration_aws, 'ms')
print('Median Duration Reported by AWS: ', Sequence_mid_duration_aws, 'ms')
print('10-th percentile of Duration Reported by AWS: ', Sequence_percentile10_aws, 'ms')
print('90-th percentile Duration Reported by AWS: ', Sequence_percentile90_aws, 'ms')
print('Standard Deviation of Duration Reported by AWS: ', np.std(Sequence_duration['Duration']), 'ms')
# # Cost Reported by AWS
Sequence_sfn_logs.head()
Sequence_lambda_logs.head()
cost_list = []
for index, row in Sequence_sfn_logs.iterrows():
cost = 0
app_start = row['Start'] - 2.5
app_end = row['End'] + 2.5
lambda_logs = Sequence_lambda_logs.query(f"""UTCTimestamp>{app_start-4} and UTCTimestamp<{app_end+4}""")
for i, r in lambda_logs.iterrows():
memory_size = r['Memory_Size']
billed_duration = r['Billed_Duration']
cost += calculate_cost(rt=billed_duration, mem=memory_size) * 1000000
cost_list.append(cost)
Sequence_avg_cost_aws = np.mean(cost_list)
Sequence_mid_cost_aws = np.median(cost_list)
Sequence_percentile10_cost_aws = np.percentile(cost_list, 10)
Sequence_percentile90_cost_aws = np.percentile(cost_list, 90)
Sequence_std_cost_aws = np.std(cost_list)
print('Average Cost Reported by AWS: ', Sequence_avg_cost_aws, 'USD')
print('Median Cost Reported by AWS: ', Sequence_mid_cost_aws, 'USD')
print('10-th percentile of Cost Reported by AWS: ', Sequence_percentile10_cost_aws, 'USD')
print('90-th percentile Cost Reported by AWS: ', Sequence_percentile90_cost_aws, 'USD')
print('Standard Deviation of Cost Reported by AWS: ', Sequence_std_cost_aws, 'USD')
# # End-to-end RT and Cost Derived from the Modeling Algorithm
import sys
sys.path.append('../../')
from slappsim.Structures import *
from slappsim.Function import *
from slappsim.PetriApp import *
from slappsim.States import *
Structures_lambda_logs = pd.read_csv('../structures/Structures_lambda_logs.csv', low_memory=False)
Structures_lambda_logs.columns = ['Function', 'Memory_Size', 'Max_Memory_Used', 'Duration', 'Billed_Duration',
'UTCTimestamp', 'RequestId']
scheduling_overhead = pd.read_csv('../sfn-delay/Scheduling_Overhead.csv')
scheduling_overhead = np.array(scheduling_overhead['scheduling_overhead'].to_list())
function_execution_delay = pd.read_csv('../sfn-delay/Function_Execution_Delay.csv')
function_execution_delay = np.array(function_execution_delay['Duration'].to_list())
rs = np.random.RandomState(64)
random.seed(64)
# ## Define the application
f1_rt = np.array(Structures_lambda_logs.query(f"Function=='f1'")['Duration'].to_list()[500:9501])
f1_pp_fun = partial(rs.choice, a=f1_rt)
f1 = Function(pf_fun=f1_pp_fun, mem=mem_config_list['f1'], name='f1')
f2_rt = np.array(Structures_lambda_logs.query(f"Function=='f2'")['Duration'].to_list()[500:9501])
f2_pp_fun = partial(rs.choice, a=f2_rt)
f2 = Function(pf_fun=f2_pp_fun, mem=mem_config_list['f2'], name='f2')
f3_rt = np.array(Structures_lambda_logs.query(f"Function=='f3'")['Duration'].to_list()[500:9501])
f3_pp_fun = partial(rs.choice, a=f3_rt)
f3 = Function(pf_fun=f3_pp_fun, mem=mem_config_list['f3'], name='f3')
f4_rt = np.array(Structures_lambda_logs.query(f"Function=='f4'")['Duration'].to_list()[500:9501])
f4_pp_fun = partial(rs.choice, a=f4_rt)
f4 = Function(pf_fun=f4_pp_fun, mem=mem_config_list['f4'], name='f4')
sfn_scheduling_overhead_fun = partial(rs.choice, a=scheduling_overhead)
function_execution_delay_fun = partial(rs.choice, a=function_execution_delay)
delays = {'FunctionExecution': function_execution_delay_fun, 'SchedulingOverhead': sfn_scheduling_overhead_fun}
start = Start()
end = End()
sequence1 = Sequence(actions=[f1, f2, f3, f4])
structures = [sequence1]
i1 = InArc(place=start)
o1 = OutArc(place=sequence1.structure_start)
t1 = Transition(in_arcs=[i1], out_arcs=[o1])
i2 = InArc(place=sequence1.structure_end)
o2 = OutArc(place=end)
t2 = Transition(in_arcs=[i2], out_arcs=[o2])
transitions = [t1, t2]
transitions += sequence1.transitions
AppSequence = PetriApp(transitions=transitions,
functions=[f1, f2, f3, f4],
structures=structures,
delays=delays)
# ## Run the modeling algorithm
ert = []
ec = []
for i in range(100000):
rt, c, s, logs = AppSequence.execute()
ert.append(rt)
ec.append(c * 1000000)
AppSequence.reset()
Sequence_avg_cost_mdl = np.mean(ec)
Sequence_mid_cost_mdl = np.median(ec)
Sequence_percentile10_cost_mdl = np.percentile(ec, 10)
Sequence_percentile90_cost_mdl = np.percentile(ec, 90)
Sequence_std_cost_mdl = np.std(ec)
Sequence_avg_ert_mdl = np.mean(ert)
Sequence_mid_ert_mdl = np.median(ert)
Sequence_percentile10_ert_mdl = np.percentile(ert, 10)
Sequence_percentile90_ert_mdl = np.percentile(ert, 90)
Sequence_std_ert_mdl = np.std(ert)
print('Average Duration Reported by Algorithm: ', Sequence_avg_ert_mdl, 'ms')
print('Median Duration Reported by Algorithm: ', Sequence_mid_ert_mdl, 'ms')
print('10-th percentile of Duration Reported by Algorithm: ', Sequence_percentile10_ert_mdl, 'ms')
print('90-th percentile Duration Reported by Algorithm: ', Sequence_percentile90_ert_mdl, 'ms')
print('Standard Deviation of Duration Reported by Algorithm: ', Sequence_std_ert_mdl, 'ms')
print('Average Cost Reported by Algorithm: ', Sequence_avg_cost_mdl, 'USD')
print('Median Cost Reported by Algorithm: ', Sequence_mid_cost_mdl, 'USD')
print('10-th percentile of Cost Reported by Algorithm: ', Sequence_percentile10_cost_mdl, 'USD')
print('90-th percentile Cost Reported by Algorithm: ', Sequence_percentile90_cost_mdl, 'USD')
print('Standard Deviation of Cost Reported by Algorithm: ', Sequence_std_cost_mdl, 'USD')
Sequence_cost_aws = pd.DataFrame(pd.Series(cost_list), columns=['Cost'])
Sequence_cost_aws.to_csv('Sequence_cost_aws.csv', index=False)
Sequence_duration_model = pd.DataFrame(pd.Series(ert), columns=['Duration'])
Sequence_duration_model.to_csv('Sequence_duration_model.csv', index=False)
Sequence_cost_model = pd.DataFrame(pd.Series(ec), columns=['Cost'])
Sequence_cost_model.to_csv('Sequence_cost_model.csv', index=False)
|
modeling/Sequence/Sequence.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python385jvsc74a57bd016e3f6170213bc8b93f4af10cda404198c09c84502cee13f546aa00fc3fa8f5f
# ---
# +
## Unidade 2
# -
# ### Exercício 8: Rede Social 👑
# Você irá fazer uma rede social por meio de um vetor de pessoas *pessoas* e matriz *amizades*, similar ao apresentado abaixo:
# 
#
# em que, na matriz *amizades*, cada linha e coluna correspondem a pessoas que podem manter uma relação de amizade entre si. Além disso, a pessoas da i-ésima linha (e coluna!) corresponde a mesma pessoa na posição i no vetor *pessoas*. Exemplo, a pessoa na linha 0 (e coluna 0) é *Alice*, da mesma forma, a pessoa na linha e coluna 2 é a *Carol*.
#
# Nesta matriz o valor é 1 caso uma pessoa, representada na linha, e amiga da pessoa representada pela coluna. Por exemplo, *Alice* é amiga apenas de *Danielle* (veja a linha 0). Nesta rede social (assim como na vida 💁) pode ocorrer falta de reciprocidade entre as amizades, por exemplo: *Bob* é amigo de *Alice* mas *Alice* não é amiga de *Bob*.
#
# Aqui vai duas funções que você deverá fazer e testá-las por meio da matriz e vetor:
#
# - `exibe_amigos`: por meio do vetor *pessoas*, matriz *amizades* e o nome de uma pessoa, exibe o amigos desta pessoa. Você deverá armazenar esta lista de amigos em um vetor e retorná-lo.
#
# - `exibe_amigos_em_comum`: por meio do vetor *pessoas*, da matriz *amizades* e do nome de uma pessoa X e uma pessoa Y, exibe os amigos em comum entre essas duas pessoas. Você deverá armazenar essa lista de amigos em comum em um vetor e retorná-lo.
#
# Lembre-se de imprimir todas as saídas de forma que fique claro de qual entrada ela representa. A função *obtem_posicao_elemento* (exercício 3) poderá ajudar, porém *CHAME-A* e *não implemente novamente*.
#
# #implemente sua função aqui!
#
# def exibe_amigos(pessoas, amizades, nome_pessoa):
# indice = pessoas.index(nome_pessoa)
# amigos = []
#
# for ind, number in enumerate(amizades[indice]):
# if number == 1:
# amigos.append(pessoas[ind])
#
# return amigos
#
# def exibe_amigos_em_comum(pessoas, amizades, nome_pessoa_1, nome_pessoa_2):
# indice_1 = pessoas.index(nome_pessoa_1)
# indice_2 = pessoas.index(nome_pessoa_2)
#
# amigos = []
#
# for column in range(len(amizades)):
# if amizades[indice_1][column] == 1 and amizades[indice_2][column] == 1:
# amigos.append(pessoas[column])
#
# return amigos
#
# +
#crie os testes aqui!
pessoas = ["Alice","Bob","Carol"]
amizades = [[0,1,0],
[1,1,0],
[1,1,0]]
amigos = exibe_amigos(pessoas, amizades,"Bob")
print(f"Amigos de Bob: {amigos}")
#Faça mais testes paara exibe_amigos e para exibe_amigos_em_comum
amigos = exibe_amigos(pessoas, amizades,"Carol")
print(f"Amigos de Carol: {amigos}")
amigos = exibe_amigos(pessoas, amizades,"Alice")
print(f"Amigos de Alice: {amigos}")
print(f"Amigos de Alice e Carol: {exibe_amigos_em_comum(pessoas, amizades, 'Alice', 'Carol')}")
# -
# ## Unidade 3
# ### Exercício 2: Operações em string
# Nesse exercício você irá implementar duas funções que auxiliarão nos exercícios posteriores:
#
#
# - **elimina_caracteres** (2 parâmetros) Elimina todas as ocorrências dos caracteres especificados de um texto. Tanto o texto quanto os caracteres a serem removidos são passados como parâmetros. Exemplo: `elimina_caracteres('correndo contra o tempo', 'coe')` devem resultar na string `rrnd cntra tmp`. Não altere o *for* já criado! O método *.replace* pode te ajudar! consulte [documentação](https://docs.python.org/3.5/library/stdtypes.html#string-methods) ou os [slides](https://daniel-hasan.github.io/cefet-web-grad/classes/python2/#5).
#
# DICA: Será necessario usar o *.replace* uma vez para cada caractere especificado (ou seja, dentro de um laço).
def elimina_caracteres(texto, caracteres_para_eliminar):
text = texto
for caracter_eliminar in caracteres_para_eliminar:
text = text.replace(caracter_eliminar, "")
return text
# +
# Faça os testes
resultado_1 = elimina_caracteres('correndo contra o tempo', 'coe')
resultado_2 = elimina_caracteres('trigo para tigres tristes', 'ieo') #saída: trg para tgrs trsts
#lembre-se de imprimir os resultados
print(resultado_1)
print(resultado_2)
# -
# - **substitua_caracteres**: (3 parâmetros) Dado um texto, uma string de procura e uma string de reposição, sendo que a string de procura e reposição são de mesmo tamanho. Substitua no texto o caractere na posição i da string de procura pelo caractere na mesma posição i na string de reposição. O texto, os caracteres a serem procurados no texto e os caracteres a serem colocados no lugar serão passados como parâmetro.
#
# Exemplo: `substitua_caracteres('o sapo nao lava o pe', 'aoe', 'iiu')` devem resultar na string `'i sipi nii livi i pu'`.
#
# DICA: Será necessario usar o *.replace* uma vez para cada caractere especificado (ou seja, dentro de um laço).
def substitua_caracteres(texto, caracteres_procura, caracteres_substituir_por):
text = texto
for i in range(len(caracteres_procura)):
caractere_procura = caracteres_procura[i]
caractere_substituir_por = caracteres_substituir_por[i]
text = text.replace(caractere_procura, caractere_substituir_por)
return text
# +
#testes! Faça mais para verificar todos os casos.
# Lembre-se de imprimir o resultado.
resultado_1 = substitua_caracteres('o sapo nao lava o pe', 'aoe', 'iiu')
resultado_2 = substitua_caracteres('ana comprou uma ariranha', 'aoi', 'uee') #saida: unu cempreu umu urerunhu
print(resultado_1)
print(resultado_2)
# -
# ### Exercício 3: Palíndromos 👑
# Segundo a [Oxford Languages](https://languages.oup.com/google-dictionary-pt/), *palíndromos* são textos que se pode ler, indiferentemente, da esquerda para a direita ou vice-versa. Por exemplo: "arara", "asa", "mirim"... E também as frases: "A cara rajada da jararaca", "Socorram-me, subi no ônibus em Marrocos"...Perceba que, em frases, deve-se ignorar a pontuação, acentuação, espaços e maiúsculas/minúsculas.
#
# Implemente a função *verifica_palindromo* que recebe como parâmetro uma string (texto ou única palavra) e retorna *true* caso o parâmetro seja um palíndromo ou *false*, caso contrário.
#
# As funções do exercício anterior podem lhe ajudar para processamento de string, para isso *chame-as* e *não implemente-as*. Ignore as seguintes pontuações: `, - ! ?` e o espaço em branco. Além disso, acentos agudos, til e circunflexo em vogais devem ser ignorados.
#
#implemete sua função aqui!
def verifica_palindromo(texto:str)->bool:
text = texto.lower()
text = elimina_caracteres(text, " ,-!?")
text = substitua_caracteres(text, "óõô", "ooo")
text = substitua_caracteres(text, "áãâ", "aaa")
text = substitua_caracteres(text, "éê", "ee")
text = substitua_caracteres(text, "íî", "ii")
text = substitua_caracteres(text, "úû", "uu")
for index in range(len(text)):
inverse_index = len(text) - index - 1
if(text[index] != text[inverse_index]):
return False
return True
#testes
print(verifica_palindromo("após a sopa"))
print(verifica_palindromo("azul"))
print(verifica_palindromo("arara"))
# ### Exercício 5: Funções como parâmetro 2
# Você irá implementar a função *executa_operacao_vetor* que recebe como parâmetro uma operação (função) e dois vetores, *vetor1* e *vetor2*, de mesmo tamanho, e executa uma operação entre os elementos da mesma posição deste vetor.
# Esta operação é uma função que será chamada para cada posição deste vetor (ex: *operacao(vetor1[i], vetor2[i])*) e retornará o resultado desta operação. Caso os dois vetores sejam de tamanho diferentes, deve-se exibir uma *mensagem de erro* e não realizar a operação.
#
# Veja o exemplo abaixo, considerando a operação soma (implementada no exercício 0.2).
#
# 
#
# Você deverá implementar a função *executa_operacao_vetor* e três operações distintas (por exemplo, *soma(a, b)*, *multiplica(a, b)* e *subtrai(a, b)*), cada uma, criando uma função de forma uma diferente, siga o tutorial e o exercício anterior.
#
# PS: Lembre-se de sempre rodar a função antes e trocar os null
# +
#implemente sua função aqui!
def soma(a, b):
return a+b
def multiplica(a, b):
return a*b
def subtrai(a, b):
return a-b
def executa_operacao_vetor(operacao, vetor_1, vetor_2):
if len(vetor_1) != len(vetor_2):
raise Exception("Os vetores passados são de tamanho diferentes")
vetor = []
for index in range(len(vetor_1)):
vetor.append(operacao(vetor_1[index],vetor_2[index]))
return vetor
# +
#testes! Lembre-se de testar todas as funções e imprimir o resultado
vetor_1 = [4,2,-1,10]
vetor_2 = [10,2,3,5]
print(executa_operacao_vetor(soma, vetor_1, vetor_2))
print(executa_operacao_vetor(subtrai, vetor_1, vetor_2))
print(executa_operacao_vetor(multiplica, vetor_1, vetor_2))
# +
#Lançamento de exceção
print(executa_operacao_vetor(soma, [1,2], []))
# -
|
python_bas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Import MNIST data
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
# -
# ## 2. Create comptuation graph
import tensorflow as tf
import numpy as np
print(tf.__version__)
# ### 2.1 Define the neural network structure
n_inputs = 28*28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# ### 2.2. Create placeholders for training data
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="InputImages")
y = tf.placeholder(tf.int64, shape=(None), name="TrueLabel")
# ### 2.3 Create neural network with two hidden layers and one output layer
hidden1 = tf.layers.dense(X, n_hidden1, name='hidden1', activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name='hidden2', activation=tf.nn.relu)
output = tf.layers.dense(hidden2, n_outputs, name='output')
# ### 2.4. Define cost function
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=output)
cost = tf.reduce_mean(cross_entropy)
# ### 2.5. Define optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
training = optimizer.minimize(cost)
# ### 2.5. Define how to calculate prediction accuracy
compare = tf.nn.in_top_k(output,y,1)
accuracy = tf.reduce_mean(tf.cast(compare,tf.float32))
# ## 3. Execute the computation graph
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Define mini-batch size.
n_epochs = 40
batch_size = 50
n_batches = int(np.ceil(mnist.train.num_examples/batch_size))
with tf.Session() as sess:
sess.run(init)
for epoch in np.arange(n_epochs):
for batch_index in np.arange(n_batches):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training,feed_dict={X:X_batch, y:y_batch})
accuracy_train = sess.run(accuracy,feed_dict={X:X_batch, y:y_batch})
accuracy_test = sess.run(accuracy,feed_dict={X:mnist.test.images, y:mnist.test.labels})
print(epoch, "Train accuracy:", accuracy_train, "Test accuracy:", accuracy_test)
save_patch = saver.save(sess,'./my_final_model.ckpt')
# ## 4. Make predictions
with tf.Session() as sess:
saver.restore(sess, "./my_final_model.ckpt") # or better, use save_path
X_new_scaled = mnist.test.images[:20]
Z = output.eval(feed_dict={X: X_new_scaled})
y_pred = np.argmax(Z, axis=1)
print("Predicted classes:", y_pred)
print("Actual classes: ", mnist.test.labels[:20])
# ## 5. Visualize the computation graph
# +
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = b"<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# -
show_graph(tf.get_default_graph())
|
Lab9/TF_DNN_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def train_LSTM(model, input_, target, epochs = 100, optimizer = 'Adam', loss_fn = 'MSE', lr = 0.001):
# optimizer : Adam, RMSProp
# loss_fn : MSE, RMSE
if loss_fn == 'MSE':
loss_fn = nn.MSELoss()
else:
print("Warning : Wrong loss Function")
return
if optimizer == "Adam":
optim_fn = optim.Adam(model.parameters(), lr=lr)
elif optimizer == "RMSProp":
optim_fn = optim.RMSprop(model.parameters(),lr = lr)
else:
print("warning : wrong optimizer")
return
train_loss_list = []
for i in range(epochs):
train_loss = 0.0
model.train()
model.zero_grad()
optim_fn.zero_grad()
for j in range(len(x)):
seq, labels = x[j], y[j]
seq = torch.tensor(seq[np.newaxis], dtype = torch.float32, device='cuda')
labels = torch.tensor(labels[np.newaxis], dtype = torch.float32, device='cuda')
model.zero_grad()
optim_fn.zero_grad()
model.hidden = [hidden.to('cuda') for hidden in model.init_hidden()]
y_pred = model(seq)
loss = loss_fn(y_pred, labels)
loss.backward()
optim_fn.step()
train_loss += np.sqrt(loss.item())
train_loss = train_loss/len(input_)
train_loss_list.append(train_loss)
if i % 10 == 0.0:
print("{}번째 epoch의 train loss =".format(i),train_loss)
return train_loss_list, model
|
cs224w/one3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Importing libraries and Dataset
# +
import pandas as pd
import numpy as np
import os, time, gc
import lightgbm as lgb
from sklearn.metrics import r2_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import ensemble, tree, svm, naive_bayes, neighbors,multiclass, linear_model,calibration , gaussian_process, neural_network , semi_supervised , discriminant_analysis
from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from lightgbm import LGBMModel,LGBMClassifier
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from IPython.display import display
from IPython.core.display import HTML
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
# -
train = pd.read_csv('data/train_data.csv')
test = pd.read_csv('data/test_data.csv')
train_dic = pd.read_csv('data/train_data_dictionary.csv')
display(HTML('<h3>Dataset Description</h3>'))
display(train_dic)
display(HTML('<h3>Sample Data From Training Dataset</h3>'))
train.sample(5)
display(HTML('<h3>Sample Data From Testing Dataset</h3>'))
test.sample(5)
display(HTML('<h3>Duplicate Rows in Datasets</h3>'))
display(HTML(f'<b>Duplicate Rows in Training Dataset</b> : {train.shape[0] - train.drop_duplicates(keep="first", inplace=False).shape[0]} Rows'))
display(HTML(f'<b>Duplicate Rows in Testing Dataset</b> : {test.shape[0] - test.drop_duplicates(keep="first", inplace=False).shape[0]} Rows'))
# # Data Exploration
display(HTML(f'<b>Training Dataset Shape</b> : {train.shape[0]} Rows * {train.shape[1]} Columns'))
display(HTML(f'<b>Testing Dataset Shape</b> : {test.shape[0]} Rows * {test.shape[1]} Columns'))
display(HTML('<h3>Colums Description for Training Dataset</h3>'))
display(HTML('<ul><li>City_Code_Patient & Bed Grade Have Null Values.</li>\
<li>Dataset is highly Skewed that can Effect the Predictions.</li></ul>'))
stats = []
for f in train.columns:
stats.append((f ,train[f].nunique() , train[f].isnull().sum()*100/train.shape[0] ,
train[f].value_counts(normalize = True , dropna = True).values[0]*100 ,
train[f].dtype))
stats_df = pd.DataFrame(stats , columns= ['Columns', 'Number of Unique Values' , 'Percentage of Data missing in train' ,
'Percentage of Biggest Category' ,
'Columns DataType'])
display(stats_df)
# display(stats_df.sort_values('Percentage of Data missing in train', ascending=False))
display(HTML('<h3>Correlation Matrix for Training Dataset</h3>'))
display(HTML('<ul><li>No Correlation was found in Dataset.</li></ul>'))
correlations = train.corr(method='pearson')
fig, axs = plt.subplots(figsize=(16, 16))
sns.heatmap(correlations)
display(HTML('<h3>Dataset of Most Occuring Patient ID</h3>'))
display(HTML('\
<ul><li>Severity is noted as Extreme</li>\
<li>Patient have gone through different Hospital_code, Ward_Type, Ward_Facility_Code and other Departments</li>\
</ul>'))
train[train['patientid'] == 66714].sample(5)
# ## Data Visualizations
display(HTML('<h3>Value Count Plot</h3><i>Dataframe show highly Skewed Data(Imbalanced)</i>'))
categorical_col = train.select_dtypes(include=['object']).columns
plt.style.use('ggplot')
for column in categorical_col:
plt.figure(figsize=(20,4))
plt.subplot(121)
train[column].value_counts().plot(kind='bar')
plt.title(column)
# # Data Preprocessing
display(HTML('<h3>Combining Both Datasets</h3>'))
train['is_train'] = True
test['is_train'] = False
data = pd.concat([train, test] , axis = 0)
data.sample(10)
display(HTML('<h3>Handling Missing Values With Forward Fill</h3>'))
data['Bed Grade']=data['Bed Grade'].fillna(method="ffill",axis=0)
data['City_Code_Patient']=data['City_Code_Patient'].fillna(method="ffill",axis=0)
data[['Bed Grade' , 'City_Code_Patient']].isnull().sum()
data['Avg_Bill_per_patient'] = data.groupby('patientid')['Admission_Deposit'].transform('sum')
data['Mean_Bill_per_patient'] = data.groupby('patientid')['Admission_Deposit'].transform('mean')
data['Avg_Rooms_per_hospital_code'] = data.groupby('Hospital_code')['Available Extra Rooms in Hospital'].transform('mean')
data['Max_Rooms_per_hospital_code'] = data.groupby('Hospital_code')['Available Extra Rooms in Hospital'].transform('max')
data['Avg_Rooms_per_hospital_type_code'] = data.groupby('Hospital_type_code')['Available Extra Rooms in Hospital'].transform('mean')
data.sample(10)
# +
le = LabelEncoder()
mm_scaler = MinMaxScaler()
cols = list(data.select_dtypes(include='object').columns)
cols.remove('Stay')
for i in cols:
data[i] = le.fit_transform(data[i])
data[['Admission_Deposit']] = mm_scaler.fit_transform(data[['Admission_Deposit']])
data[['Avg_Bill_per_patient']] = mm_scaler.fit_transform(data[['Avg_Bill_per_patient']])
data[['Avg_Rooms_per_hospital_code']] = mm_scaler.fit_transform(data[['Avg_Rooms_per_hospital_code']])
data[['Max_Rooms_per_hospital_code']] = mm_scaler.fit_transform(data[['Max_Rooms_per_hospital_code']])
data[['Avg_Rooms_per_hospital_type_code']] = mm_scaler.fit_transform(data[['Avg_Rooms_per_hospital_type_code']])
data[['Mean_Bill_per_patient']] = mm_scaler.fit_transform(data[['Mean_Bill_per_patient']])
# -
data = data.sample(frac = 1.0)
train = data[data['is_train'] == True]
test = data[data['is_train'] == False]
Y_train = train['Stay']
to_drop = ['case_id' , 'patientid', 'Stay', 'is_train']
train.drop(columns = to_drop , inplace = True)
test = test[train.columns]
display(HTML('<h3>Correlation Matrix for Training Dataset</h3>'))
display(HTML('<ul><li>No Correlation was found in Dataset.</li></ul>'))
correlations = train.corr(method='pearson')
fig, axs = plt.subplots(figsize=(16, 16))
sns.heatmap(correlations)
x_train, x_test, y_train, y_test = train_test_split(train , Y_train , test_size = 0.1 , shuffle = 2020)
MLA = [
# neighbors.KNeighborsClassifier(n_jobs = -1), #Take too much time Too much
# semi_supervised.LabelPropagation(), # RAM Run Out
# svm.SVC(probability=True), # Take too much time
# svm.NuSVC(), # specified nu is infeasible
# linear_model.LogisticRegressionCV(multi_class='multinomial'), # Take too much time Too much
# gaussian_process.GaussianProcessClassifier(n_jobs = -1), # require too much RAM
# calibration.CalibratedClassifierCV(),# take too much time #8min 8s
# ensemble.BaggingClassifier(n_jobs = -1),
# neural_network.MLPClassifier(), # Take too much time
# ensemble.ExtraTreesClassifier(n_jobs = -1),
# svm.LinearSVC(multi_class='crammer_singer'),
neighbors.NearestCentroid(),
tree.DecisionTreeClassifier(),
tree.ExtraTreeClassifier(),
naive_bayes.GaussianNB(),
naive_bayes.BernoulliNB(),
discriminant_analysis.LinearDiscriminantAnalysis(),
discriminant_analysis.QuadraticDiscriminantAnalysis(),
linear_model.RidgeClassifier(),
linear_model.RidgeClassifierCV(),
linear_model.Perceptron(),
linear_model.PassiveAggressiveClassifier(),
linear_model.SGDClassifier(),
ensemble.RandomForestClassifier(n_jobs = -1),
ensemble.AdaBoostClassifier(), # take too much time
ensemble.GradientBoostingClassifier(),
]
# %%time
col = []
algorithms = pd.DataFrame(columns = col)
ind= 0
time_now = time.time()
for a in MLA:
gc.collect()
print(a.__class__.__name__)
a.fit(x_train , y_train )
pred = a.predict(x_test)
acc = accuracy_score(y_test , pred)
Alg = a.__class__.__name__
algorithms.loc[ind , 'Algorithm'] = Alg
algorithms.loc[ind , 'Accuracy'] =round( acc * 100 , 2)
algorithms.loc[ind , 'Time_Taken_to_train'] = round(time.time() - time_now ,5)
ind+=1
algorithms.sort_values(by=['Accuracy'] , ascending = False , inplace = True)
algorithms['Time_Taken_to_train_100'] = algorithms['Time_Taken_to_train']*1 + 100
algorithms
plt.figure(figsize=(24,24))
plt.rcParams.update({'font.size': 20})
g = sns.barplot('Accuracy' , 'Algorithm' , data = algorithms)
g.set_xlabel('CV_score and Time')
g.set_title('Algo Score vs Algorithm with Time scale')
g.plot( 'Time_Taken_to_train_100','Algorithm', data=algorithms, linestyle='--', marker='o')
plt.savefig('Main.jpg' , quality = 95)
# # Using Light Gradient Boosting Machine
# +
clf = LGBMClassifier( n_estimators=1000,
#objective ='multiclass',
feature_name ='Stay',max_depth=20,
eval_metric='multiclass',
)
clf.fit(x_train, y_train,
eval_set=[(x_test,y_test)],
verbose=50,early_stopping_rounds=50
)
# +
p_train_lgbm = clf.predict(x_train)
print("train score :",accuracy_score(y_train, p_train_lgbm))
p_train_lgbm_val = clf.predict(x_test)
print("Validation score :",accuracy_score(y_test, p_train_lgbm_val))
# -
# Plotting Most Important Features in Gradient Boosting Machine Model
plt.rcParams.update({'font.size': 8})
feat_importances = pd.Series(clf.feature_importances_, index=x_train.columns)
feat_importances.nlargest(20).plot(kind='barh')
|
Assignment 1/Healthcare.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# +
def read_dataset():
df = pd.read_csv("Naval Mine/dataset_40_sonar.csv")
X = df[df.columns[0:60]].values
Y = df[df.columns[60]]
encode = LabelEncoder()
encode.fit(Y)
Y = encode.transform(Y)
Y = one_hot_encode(Y)
return (X,Y)
def one_hot_encode(labels):
n_labels=len(labels)
n_unique_labels=len(np.unique(labels))
one_hot_encode=np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels]=1
return one_hot_encode
X, Y=read_dataset()
X, Y=shuffle(X, Y,random_state=1)
train_x,test_x,train_y,test_y = train_test_split(X, Y,test_size=0.20,random_state=415)
print(train_x.shape)
print(train_y.shape)
print(test_x.shape)
print(test_y.shape)
print(Y.shape)
# +
# Important parameter and variables to work with tensors
learning_rate=0.3
training_epochs=1000
cost_history = np.empty(shape=[1], dtype=float)
n_dim = X.shape[1]
print("n_dim= ",n_dim)
n_class = 2
model_path = "Naval Mine/NMI"
# Define the number of layers and number of neurons for each layer
n_hidden_1 = 60
n_hidden_2 = 60
n_hidden_3 = 60
n_hidden_4 = 60
x = tf.placeholder(tf.float32,[None,n_dim])
y_ = tf.placeholder(tf.float32,[None,n_class])
w = tf.Variable(tf.zeros([n_dim,n_class]))
b = tf.Variable(tf.zeros(n_class))
weigths = {
'h1': tf.Variable(tf.truncated_normal([n_dim, n_hidden_1])),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3])),
'h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4])),
'out': tf.Variable(tf.truncated_normal([n_hidden_4, n_class]))
}
biases = {
'b1': tf.Variable(tf.truncated_normal([n_hidden_1])),
'b2': tf.Variable(tf.truncated_normal([n_hidden_2])),
'b3': tf.Variable(tf.truncated_normal([n_hidden_3])),
'b4': tf.Variable(tf.truncated_normal([n_hidden_4])),
'out': tf.Variable(tf.truncated_normal([n_class]))
}
#define our model
def multilayer_perceptron(x,weigths,biases):
layer_1 = tf.add(tf.matmul(x, weigths['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weigths['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
layer_3 = tf.add(tf.matmul(layer_2, weigths['h3']), biases['b3'])
layer_3 = tf.nn.sigmoid(layer_3)
layer_4 = tf.add(tf.matmul(layer_3, weigths['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
out_layer = tf.matmul(layer_4, weigths['out']) + biases['out']
return out_layer
init = tf.global_variables_initializer()
saver = tf.train.Saver()
y = multilayer_perceptron(x,weigths,biases)
cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y,labels=y_))
training_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
sess = tf.Session()
sess.run(init)
mse_history = []
accuracy_history = []
for epoch in range(training_epochs):
sess.run(training_step, feed_dict = {x:train_x, y_:train_y})
cost = sess.run(cost_function, feed_dict = {x:train_x, y_:train_y})
cost_history = np.append(cost_history, cost)
correct_prediction = tf.equal(tf.arg_max(y,1), tf.arg_max(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
accuracy = sess.run(accuracy, feed_dict = {x: train_x, y_:train_y})
accuracy_history.append(accuracy)
pred_y = sess.run(y, feed_dict={x:test_x})
mse = tf.reduce_mean(tf.square(pred_y - test_y))
mse_ = sess.run(mse)
mse_history.append(mse_)
print('epoch= ',epoch,' cost=',cost,' mse=',mse_,' Train_accuracy=',accuracy)
# +
save_path = saver.save(sess,model_path)
print('Model Saved in file : ',save_path)
plt.plot(mse_history)
plt.show()
plt.plot(accuracy_history)
plt.show()
# +
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print('Test Accuracy= ',(sess.run(accuracy, feed_dict={x: test_x, y_:test_y})))
pred_y = sess.run(y, feed_dict={x:test_x})
mse = tf.reduce_mean(tf.square(pred_y - test_y))
print('MSE= ',sess.run(mse))
# -
|
3(Training). Naval Mine Identifier (Multilayer Perceptron) NN Tensorflow .ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Programming exercises##
#
# +
#1.Write a program that finds all the numbers that are divisible by 7 but are not multiples of 5 between 2000 and 3200
a = 0
for x in range(2000, 3200+1):
if x % 7 == 0 and x % 5 != 0:
a += x
print(a)
# +
#2. write a program that calculates whether a number is prime or not
n = int(input("enter a number: "))
if n >= 1:
flag = True
for i in range(2, n):
if (n % i) == 0:
flag = False
break
if flag:
print('n is prime')
else:
print('n is not prime')
#Another method
# divisor = 0
# for i in range(1, n + 1):
# if n % i == 0:
# divisores += 1
# if divisores == 2:
# print('n is prime')
# else:
# print('n is not prime')
# +
#3. Write a program that counts how many numbers are multiples of 2 in a list
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
m = []
count = 0
for x in l:
if x % 2 == 0:
count += 1
m.append(x)
print(count)
# +
#4. Write a program that inverts a text string
name = input('chain text: ')
inverted = ""
for x in range(len(name)):
inverted += name[-x - 1]
print(inverted)
# +
#5. """ An example program that illustrates the use of docstrings """
def nand(bool1, bool2):
"""Take two Boolean values bool1 and bool2
and return the specified Boolean values"""
if bool1:
if bool2:
return False
else:
return True
else:
return True
# +
#6. Calculate the number of days since you have born
"""One way in which to determine the number of
days in a month is to subtract the first of
the given month from the first of the next month.
The result should be the number of days in the given month."""
import datetime
def days_in_month(year, month):
"""
Inputs:
year - an integer between datetime.MINYEAR and datetime.MAXYEAR
representing the year
month - an integer between 1 and 12 representing the month
Returns:
The number of days in the input month.
"""
if (datetime.MINYEAR <= year <= datetime.MAXYEAR) and (1 <= month <= 11):
date1 = datetime.date(year, month, 1)
date2 = datetime.date(year, month + 1, 1)
return (date2 - date1).days
elif (datetime.MINYEAR <= year <= datetime.MAXYEAR) and (month == 12):
date1 = datetime.datetime(year, month, 1)
date2 = datetime.datetime(year + 1, 1, 1)
return (date2 - date1).days
else:
return False
def is_valid_date(year, month, day):
"""
Inputs:
year - an integer representing the year
month - an integer representing the month
day - an integer representing the day
Returns:
True if year-month-day is a valid date and
False otherwise
"""
days = days_in_month(year, month)
if ((datetime.MINYEAR <= year <= datetime.MAXYEAR) and (1<= month <= 12) and (0 < day <= days)):
return True
else:
return False
is_valid_date(2021, 8, 43)
def days_between(year1, month1, day1, year2, month2, day2):
"""
Inputs:
year1 - an integer representing the year of the first date
month1 - an integer representing the month of the first date
day1 - an integer representing the day of the first date
year2 - an integer representing the year of the second date
month2 - an integer representing the month of the second date
day2 - an integer representing the day of the second date
Returns:
The number of days from the first date to the second date.
Returns 0 if either date is invalid or the second date is
before the first date.
"""
if (is_valid_date(year1, month1, day1) and is_valid_date(year2, month2, day2)):
date1 = datetime.date(year1, month1, day1)
date2 = datetime.date(year2, month2, day2)
if date2 > date1:
return date2 - date1
else:
return 0
else:
return 0
def age_in_days(year, month, day):
"""
Inputs:
year - an integer representing the birthday year
month - an integer representing the birthday month
day - an integer representing the birthday day
Returns:
The age of a person with the input birthday as of today.
Returns 0 if the input date is invalid or if the input
date is in the future.
"""
if is_valid_date(year, month, day):
today = datetime.date.today()
birthday = datetime.date(year, month, day)
if (birthday < today):
person_age_in_days = days_between(year, month, day, today.year, today.month, today.day)
return person_age_in_days
else:
return 0
else:
return 0
print(age_in_days(1995, 2, 18))
# +
#7. Format
mood1 = "happy"
mood2 = "Exited"
sentence1 = "I feel {1}, do you feel {0}? Or are you {0}? I'm not sure if we should be {1}.".format(mood1, mood2)
print(sentence1)
# +
#8. Format
name1 = "Pierre"
age1 = 7
name2 = "May"
age2 = 13
line1 = "{0:^7} {1:>3}".format(name1, age1)
line2 = "{0:^7} {1:>3}".format(name2, age2)
print(line1)
print(line2)
|
Problems.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BLU03 - Learning Notebook - Part 3 of 3 - Web scraping
#
#
# ## 1. Introduction
#
# In the context of data wrangling, we've already talked about three data sources: files, databases and public APIs.
# Now it's time to delve into the Web!
#
# As we all know, there is a huge amount of data in the Web. Whenever we search something on Google, it shows us thousands of web pages full of answers.
#
# However, there is a problem here: in most of the cases, the web pages show us the data in a beautiful but unstructured way. This makes sense, since the purpose of a web page is to be read by a human and not to have its content analysed by some computer program.
#
# So we are left with the boring task of copying and pasting the data we want into csv files or excel tables, possibly thousands of times, before feeding it to some data model...
#
# But worry no more!
#
# <img src="media/web_scraping_to_the_rescue.png" width=350/>
# ## 2. What is web scraping
#
# [Web scraping](https://en.wikipedia.org/wiki/Web_scraping) is the name given to the process of extracting data from web pages in an automated way.
# There are many [techniques](https://en.wikipedia.org/wiki/Web_scraping#Techniques) that can be used to do web scraping and the one we're going to explore here is HTML parsing.
#
# A web page is an HTML document, so HTML parsing means to split the contents of a web page into several small pieces and select the parts we find interesting. This technique is useful when we want to extract data from many web pages that share a common template.
# ## 3. Understanding the HTML code of a web page
#
# Before jumping to the part where we actually do web scraping, let's first understand the structure and code of a web page.
#
# Usually, a web page has 3 different types of code:
# * **HTML**: used to display the content of the web page
# * **CSS**: used to apply styles to the web page, it's what makes the page pretty
# * **JavaScript**: this is what makes the page dynamic, like triggering an action when a button is clicked.
#
# We'll focus now on the HTML part, since it's the one that is related what we want, which is data.
#
# In the file **../web_pages/nationalmuseum.html** you can see an example of an HTML document that represents a web page. Let's see the code.
# use ! type for Windows (use full path)
# ! cat web_pages/nationalmuseum.html
# And this is how the page looks in a browser.
#
# 
# As you can see above, an HTML page is a collection of HTML elements, where an element has the form:
# ```<tagname> content </tagname>```.
#
# HTML elements can be nested with other HTML elements, meaning that the content between the start and end tags can be a set of elements.
#
# An HTML element can also have no content. In that case, it's simply a tagname, like this:
# ```<tagname>```.
#
# Let's go through the elements in this page:
# - the ```<!DOCTYPE html>``` says that this document is an HTML document
# - the ```<html>``` element is the root element of an HTML page
# - the ```<body>``` element has the page content
# - the ```<h1>``` element is a large heading
# - the ```<h3>``` element is a smaller heading
# - the ```<p>``` element is a paragraph
# - the ```<br>``` element is a line break, which is an example of an element without content
# ## 4. How to scrape the web
#
# Now let's go to the fun part!
#
# Going back to our movies database, you can see that there are some characters for which we're missing the character_name.
# You can try to query the database to find which are these characters, but in the meanwhile, we gathered them in file **../data/missing_character_names.csv**.
# +
import pandas as pd
import requests
# Import some helper functions to print shorter outputs
import utils
from bs4 import BeautifulSoup
# -
missing_character_names = pd.read_csv('data/missing_character_names.csv')
missing_character_names.head()
# Can you think of a good way to get this missing data? An internet movie database seems like a very good candidate! Fortunately, the LDSA has got you covered.
#
# As an exercise, let's try to find <NAME>'s character name in the movie with ID `tt0116405`. A quick internet search reveals that this movie is called **Getting Away With Murder**.
#
# The first thing to do is to open the web page that has the content we're interested in: **https://s02-infrastructure.s3.eu-west-1.amazonaws.com/ldsa_imdb/index.html#**
#
# It should look like this:
#
# <img src="media/imdb_movie_page.png"/>
#
# Now, let's scroll down to the cast section of the page, since this is what we'll be scraping.
#
# <img src="media/imdb_cast.png"/>
#
# In order to get the page's content, we'll use a GET request.
#
# We can get the content from the response, which will be... a bunch of incomprehensible HTML.
# +
response = requests.get("https://s02-infrastructure.s3.eu-west-1.amazonaws.com/ldsa_imdb/index.html#")
# Printing short output, if you want to see everything, delete the friendly_print function call
utils.friendly_print_string(response.content)
# -
# And here is where **Beautiful Soup** can help us. Beautiful soup is a package for parsing HTML documents. It allows us to break down HTML documents into smaller components, and extract the information we need. You can check its documentation [here](https://www.crummy.com/software/BeautifulSoup/bs4/doc/).
#
# First, we need to create an instance of the BeautifulSoup class, passing it the HTML document to parse.
soup = BeautifulSoup(response.content, 'html.parser')
# By calling the **prettify** method, we can see the HTML elements of the document in a pretty and indented way.
# Printing short output, if you want to see everything, delete the friendly_print function call
utils.friendly_print_string(soup.prettify())
# By calling the **children** property of the soup, we can parse it into smaller elements.
#
# We can see that this soup has two top-level elements:
#
# * a Doctype element, with the value 'html'.
# * a Tag element, with tag html.
#
# As we've seen before, the Doctype element simply indicates that our soup corresponds to an html document (a webpage).
#
# We're particularly interested in the `html` Tag element, which is where the actual HTML content is.
# +
soup_children = list(soup.children)
# inspecting the types of the elements in the soup
[type(item) for item in soup_children]
# -
# To get the `html` tag element from the soup, we can just call it by its name.
soup.html
# We can navigate through the tags contained inside the `html` tag, to get to any element in the page.
#
# Let's check out the title of the page. This is contained in the `title` tag.
#
# We can find it two levels below the `html` tag, inside the `head` tag:
soup.html.head.title
# We can see that this tag has no children tags. Its content is simply a string, which we can get by calling the **get_text** method:
soup.html.head.title.get_text()
# By now, you must be thinking that this is a somewhat complicated process, as it requires manually inspecting the HTML document and navigating through thousands of tags in order to find the interesting content in the middle of a big mess. And you're right!
#
# However, there is an easier way to access the interesting content directly.
#
# First, you need to open the **developer tools** of your browser, in the page you want to scrape.
# These are tools that allow you to look in greater detail at the content of the website and at the processes running in the background.
#
# Usually, you just have to right-click the page and select the "Inspect" option.
# If that's not the case, just Google "How to open developer tools in *\<your browser\>*".
# 
# The developer tools will open at the bottom or on the side of the window. We're only interested in the **Inspector** tool, which allows us to look at the HTML elements that correspond to the different parts of the page.
#
# After clicking on the small arrow (circled in red), you can click on any object in the page with your mouse, and you'll see the correspondent HTML element highlighted in the developer tools window. Similarly, if you hover over the HTML code in the Inspector window, the corresponding part of the page will be highlighted.
# 
# By inspection, we can see that all the information about the actors/actresses is inside an element with tag **div** and **class** `actor-list`. The class of an HTML element can be useful to identify what its content might be.
# 
# We can inspect even further and notice that the `actor-list` div has three children. The children have two classes - `actor-info`and `grid-container` - which seem to indicate that each children contains information for a single actor.
#
# Drilling down a bit more, we notice that the `actor-info` div contains two children, with `div` tags and classes `actor-portrait`/`actor-data`.
#
# Finally inside `actor-data`, we can find two children with `p` tags. These elements don't have a class, but have an **attribute** with name `infotype` and value `actor-name`/`character-name`. Attributes can also be used to identify the content of an element.
# 
# So we have arrived at the character names, which is exactly what we set out to discover!
#
# Let try to replicate this process using our _beautiful soup_.
# First, call the soup's **find_all** method to find the div element with class `actor-list`(and make sure there's only one in this page).
# pay attention to the underscore after class (class_) in the function's parameters.
# this is because "class" is a Python keyword.
actor_list = soup.find_all('div', class_="actor-list")
print("Number of elements found: ", len(actor_list))
# Cool! Now let's search for all children that have tag `div`, and the `actor-info` class:
# +
actor_info = actor_list[0].find_all('div', class_='actor-info')
# Checking out how many were found:
print(f"Found {len(actor_info)} actors.\n")
# Checking out one of them
print(actor_info[0].prettify())
# -
# Looks correct!
#
# Now, let's focus on the first actor - <NAME> - and discover the name of its character.
#
# For that, we simply have to look for the `<p>` children elements with attribute **infotype**=**character-name**. Since we're looking for a single children, we can use the `find` method:
#
# Since searching for children is recursive (meaning: it searches for immediate children, then children-of-children, and so on), we don't need to find the `actor-data` div first.
# +
character_name = actor_info[0].find('p', infotype='character-name')
character_name
# -
# We can extract the text using the `get_text` method:
character_name.get_text()
# We have found <NAME>'s character in **Getting Away With Murder**, which is <NAME>!
#
# And the best part is that it will only take some minutes to get all the other character and actor names. You're invited to do that as an exercise.
# ## 5. Optional
#
# ### 5.1 Scraping and the Law
#
# [This](https://benbernardblog.com/web-scraping-and-crawling-are-perfectly-legal-right/) is an interesting article about the subject, bottom line being: when scraping web pages, don't use a very high request rate, so that the owners of the website don't get angry.
#
# ### 5.2 Scraping and JavaScript
#
# Sometimes, when scraping web pages, you'll need to navigate from one page to the other, click buttons, or take other actions that enter the JavaScript domain. In such cases, Beautiful Soup is not enough to fill your needs. If you find yourself in this position, take a look at [Selenium](https://www.seleniumhq.org/).
#
# ### 5.3 Website changes
#
# One of the biggest difficulties regarding scraping is that if there are changes to the layout of the website you're trying to scrape, you will inevitably need to rewrite part (or all) of your scraping code. This is why, for learning purposes, we are scraping a website hosted by the LDSA. If you are feeling brave, try scraping the same information from the official IMDB movie page!
|
S02 - Data Wrangling/BLU03 - Data Sources/Learning Notebook - Part 3 of 3 - Web scraping.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Rossman store sales example
# data from https://www.kaggle.com/c/rossmann-store-sales/data
# code from https://github.com/vidyasagarv/predict-sales
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import sys; sys.path.append('predict-sales') # in case jupyter run from directory above
from salesdata import process_data
from quilt.data.examples import prophet as pdata
# Load data
store = pdata.rossman_store_sales.store()
train = pdata.rossman_store_sales.train()
print('training data loaded: {} records for {} stores'.format(len(train), len(store)))
train = process_data(train, store)
X_train = train.drop(['Sales', 'Customers'], axis = 1)
y_train = train.Sales
print('training data processed: {} records'.format(len(train)))
# Fit random forest model
rf = RandomForestRegressor(n_jobs = -1, n_estimators = 15)
rf.fit(X_train, y_train)
print('model fit')
# +
# Load and process test data
test = pdata.rossman_store_sales.test()
test = process_data(test, store)
# Ensure same columns in test data as training
for col in train.columns:
if col not in test.columns:
test[col] = np.zeros(test.shape[0])
test = test.sort_index(axis=1).set_index('Id')
print('test data loaded and processed')
# -
# Make predictions
X_test = test.drop(['Sales', 'Customers'], axis=1).values
y_test = rf.predict(X_test)
# display output - Id is
result = pd.DataFrame({'Id': test.index.values, 'Sales': y_test}).set_index('Id')
result = result.sort_index()
result[0:10]
|
predict-sales/predict-store-sales.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Predicting Student Admissions with Neural Networks
# In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
# - GRE Scores (Test)
# - GPA Scores (Grades)
# - Class rank (1-4)
#
# The dataset originally came from here: http://www.ats.ucla.edu/
#
# ## Loading the data
# To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
# - https://pandas.pydata.org/pandas-docs/stable/
# - https://docs.scipy.org/
# +
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
# -
# ## Plotting the data
#
# First let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
# +
# Importing matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
# -
# Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
# +
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
# -
# This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.
#
# ## TODO: One-hot encoding the rank
# Use the `get_dummies` function in pandas in order to one-hot encode the data.
#
# Hint: To drop a column, it's suggested that you use `one_hot_data`[.drop( )](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html).
# +
# TODO: Make dummy variables for rank and concat existing columns
one_hot_data = pass
# TODO: Drop the previous rank column
one_hot_data = pass
# Print the first 10 rows of our data
one_hot_data[:10]
# -
# ## TODO: Scaling the data
# The next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
# +
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
# Printing the first 10 rows of our procesed data
processed_data[:10]
# -
# ## Splitting the data into Training and Testing
# In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
# +
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
# -
# ## Splitting the data into features and targets (labels)
# Now, as a final step before the training, we'll split the data into features (X) and targets (y).
# +
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
print(features[:10])
print(targets[:10])
# -
# ## Training the 2-layer Neural Network
# The following function trains the 2-layer neural network. First, we'll write some helper functions.
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
# # TODO: Backpropagate the error
# Now it's your turn to shine. Write the error term. Remember that this is given by the equation $$ (y-\hat{y}) \sigma'(x) $$
# TODO: Write the error term formula
def error_term_formula(x, y, output):
pass
# +
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = error_formula(y, output)
# The error term
error_term = error_term_formula(x, y, output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
print("Epoch:", e)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("=========")
print("Finished training!")
return weights
weights = train_nn(features, targets, epochs, learnrate)
# -
# ## Calculating the Accuracy on the Test Data
# Calculate accuracy on test data
test_out = sigmoid(np.dot(features_test, weights))
predictions = test_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
|
intro-neural-networks/student-admissions/StudentAdmissions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import nltk
nltk.download('all-corpora')
nltk.download('punkt')
# +
from nltk.tokenize import word_tokenize
sentence = "Natural language processing (NLP) is a subfield of computer science, information engineering, and artificial intelligence concerned with the interactions between computers and human (natural) languages, in particular how to program computers to process and analyze large amounts of natural language data."
print(word_tokenize(sentence))
# +
from nltk.tokenize import sent_tokenize
paragraph = "Natural language processing (NLP) is a subfield of computer science, information engineering, and artificial intelligence concerned with the interactions between computers and human (natural) languages, in particular how to program computers to process and analyze large amounts of natural language data. Challenges in natural language processing frequently involve speech recognition, natural language understanding, and natural language generation."
print(sent_tokenize(paragraph))
# -
|
1.NLP_PREP/1.3.1.1.nltk.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming_Assingment13
# ### Question 1:
# Write a program that calculates and prints the value according to the given formula:
# Q = Square root of [(2 * C * D)/H]
# Following are the fixed values of C and H:
# C is 50. H is 30.
# D is the variable whose values should be input to your program in a comma-separated
# sequence.
# Example
# Let us assume the following comma separated input sequence is given to the program:
# 100,150,180
# The output of the program should be:
# 18,22,24
#
#
# +
import math
numbers = input("Provide D in with comma separated: ")
numbers = numbers.split(',')
result_list = []
result_string = ''
for D in numbers:
Q = round(math.sqrt(2 * 50 * int(D) / 30))
result_list.append(str(Q))
print(','.join(result_list))
# -
# ### Question 2:
# Write a program which takes 2 digits, X,Y as input and generates a 2-dimensional array. The
# element value in the i-th row and j-th column of the array should be i*j.
# Note: i=0,1.., X-1; j=0,1,¡Y-1.
# Example
# Suppose the following inputs are given to the program:
# 3,5
# Then, the output of the program should be:
# [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]
#
#
# +
def createMatrix(n,m):
M=[]
print("Enter the element :")
for i in range(n):
#stor row
row =[]
for j in range(m):
row.append(i*j)
M.append(row)
return(M)
x = int(input("Enter x : "))
y = int(input("enter y : "))
createMatrix(x,y)
# -
# ### Question 3:
# Write a program that accepts a comma separated sequence of words as input and prints the
# words in a comma-separated sequence after sorting them alphabetically.
# Suppose the following input is supplied to the program:
# without,hello,bag,world
# Then, the output should be:
# bag,hello,without,world
#
#
items=[x for x in input('Enter comma seperated words ').split(',')]
items.sort()
print(','.join(items))
# ### Question 4:
# Write a program that accepts a sequence of whitespace separated words as input and prints
# the words after removing all duplicate words and sorting them alphanumerically.
# Suppose the following input is supplied to the program:
# hello world and practice makes perfect and hello world again
# Then, the output should be:
# again and hello makes perfect practice world
#
#
items=[x for x in input('Enter space sepeated words ').split(' ')]
print(' '.join(sorted(list(set(items)))))
# ### Question 5:
# Write a program that accepts a sentence and calculate the number of letters and digits.
# Suppose the following input is supplied to the program:
# hello world! 123
# Then, the output should be:
# LETTERS 10
# DIGITS 3
#
#
s = input("Input a string : ")
digits=letters=0
for c in s:
if c.isdigit():
digits += 1
elif c.isalpha():
letters += 1
else:
pass
print("Letters", letters)
print("Digits", digits)
# ### Question 6:
# A website requires the users to input username and password to register. Write a program to
# check the validity of password input by users.
# Following are the criteria for checking the password:
# 1. At least 1 letter between [a-z]
# 2. At least 1 number between [0-9]
# 1. At least 1 letter between [A-Z]
# 3. At least 1 character from [$#@]
# 4. Minimum length of transaction password: 6
# 5. Maximum length of transaction password: 12
# Your program should accept a sequence of comma separated passwords and will check them
# according to the above criteria. Passwords that match the criteria are to be printed, each
# separated by a comma.
# Example
# If the following passwords are given as input to the program:
# ABd1234@1,a F1#,2w3E*,2We3345
# Then, the output of the program should be:
# ABd1234@1
# +
import re
password= input("Enter your password : ")
x = True
while x:
if (len(password) < 6 or len(password) > 12):
break
elif not re.search("[a-z]",password):
break
elif not re.search("[0-9]",password):
break
elif not re.search("[A-Z]",password):
break
elif not re.search("[$#@]",password):
break
elif re.search("\s",password):
break
else:
print("Valid Password")
x=False
break
if x:
print("Not a Valid Password")
|
Programming_Assingment13.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview of `eolearn.core`
#
# `eolearn.core` is the main subpackage which implements basic building blocks (`EOPatch`, `EOTask` and `EOWorkflow`) and commonly used functionalities.
# ## EOPatch
# EOPatch is common data-object that contains contains multi-temporal remotely sensed data of a single patch (area) of Earth’s surface typically defined by a bounding box in specific coordinate reference system.
#
# There’s no limitation on the amount of data, or the type of data that can be stored. But typically, all of the information is internally stored in form of NumPy arrays as the following features:
#
# - DATA with shape `t x n x m x d`: Time- and position-dependent remote sensing data (e.g. bands) of float type.
# - MASK with shape `t x n x m x d`: Time- and position-dependent mask (e.g. ground truth, cloud/shadow mask, super pixel identifier) of integer or boolean type.
# - SCALAR with shape `t x d`: Time-dependent and position-independent remote sensing data (e.g. weather data,) of float type.
# - LABEL with shape `t x d`: Time-dependent and position-independent label (e.g. ground truth) of integer or boolean type.
# - VECTOR: A collection of time-dependent geometry objects stored as a `geopandas.GeoDataFrame` with geometry and `TIMESTAMP` columns.
# - DATA_TIMELESS with shape `n x m x d`: Time-independent and position-dependent remote sensing data (e.g. elevation model) of float type.
# - MASK_TIMELESS with shape `n x m x d`: Time-independent and position-dependent mask (e.g. ground truth, region of interest mask) of integer or boolean type.
# - SCALAR_TIMELESS with shape `d`: Time-independent and position-independent remote sensing data of float type.
# - LABEL_TIMELESS with shape `d`: Time-independent and position-independent label of integer or boolean type.
# - VECTOR_TIMELESS: A collection of time-dependent geometry objects stored as a `geopandas.GeoDataFrame` with geometry column.
# - META_INFO: A dictionary of additional metadata information (e.g. resolution, time difference).
# - BBOX: A bounding box of the patch which is an instance of `sentinelhub.BBox`. It holds information about coordinates and CRS.
# - TIMESTAMP: A list of dates of size `t` which are instances of `datetime.datetime` or `datetime.date`.
#
# Note: `t` specifies time component, `n` and `m` are spatical components (number of rows and columns), and `d` is an additional component for data with multiple channels.
# Create an empty patch
# +
from eolearn.core import EOPatch
patch = EOPatch()
# -
# Set a feature to EOPatch. Each feature has to belong to one of the feature types listed above.
# +
import numpy as np
from eolearn.core import FeatureType
new_bands = np.zeros((5, 10, 10, 13), dtype=np.float32)
patch[FeatureType.DATA]['bands'] = new_bands
# or patch.data['bands'] = new_bands
# -
# Check current content of `EOPatch` with it's string representation.
patch
# Get all non-empty features of EOPatch
patch.get_features()
# Get a feature from EOPatch
data = patch[FeatureType.DATA]['bands']
# or patch.data['bands']
# Save EOPatch to local folder. In case `EOPatch` would already exist in the specified location we are also giving a permission to overwrite its features.
# +
from eolearn.core import OverwritePermission
patch.save('./example_patch', overwrite_permission=OverwritePermission.OVERWRITE_FEATURES)
# -
# Load EOPatch from the same folder
patch2 = EOPatch.load('./example_patch')
# Compare EOPatches
patch == patch2
# Remove a feature from EOPatch
del patch2[FeatureType.DATA]['bands']
# or del patch.data['bands']
# Make a shallow and deep copy of EOPatch. Shallow copy will copy only a reference to data but not the data itself.
# +
patch1 = patch.__copy__()
patch2 = patch.__deepcopy__()
patch.data['bands'] += 1
patch == patch1, patch == patch2
# -
# Concatenate two EOPatches
# +
patch2[FeatureType.DATA]['bands2'] = new_bands
patch + patch2
# or EOPatch.concatenate(patch, patch2)
# -
# ## EOTask
#
# An EO task is any class the inherits from the abstract `EOTask` class. Each EO task has to implement the execute method; invoking __call__ on a EO task instance invokes the execute method. EO tasks are meant primarily to operate on EO patches (i.e. instances of EOPatch).
# Add a feature using the EOTask
# +
from eolearn.core import AddFeature # AddFeature is a simple EOTask which adds a feature to a given EOPatch
patch = EOPatch()
feature = (FeatureType.DATA, 'bands')
add_feature = AddFeature(feature)
data = np.zeros((5, 100, 100, 13))
patch = add_feature.execute(patch, data)
# or patch = add_feature(patch, data)
patch
# -
# Create a composite task using a multiplication operator (`a * b`) function
# +
from eolearn.core import CopyTask, RenameFeature
copy_task = CopyTask()
rename_feature = RenameFeature((FeatureType.DATA, 'bands', 'the_bands'))
copy_rename_task = rename_feature * copy_task
new_patch = copy_rename_task(patch)
new_patch
# -
# If a task doesn’t exist yet, the user can implement it and easily include it into his/hers workflow. There is very little or almost no overhead in the implementation of a new EOTask as seen from this minimal example
# +
from eolearn.core import EOTask
class FooTask(EOTask):
def __init__(self, foo_param):
self.foo_param = foo_param
def execute(self, eopatch, *, patch_specific_param=None):
# do what foo does on input eopatch and return it
return eopatch
# -
# EOTask’s arguments are either static (set when EOTask is initialized; i.e.e foo_param above) or dynamic (set during the execution of the workflow; i.e. patch_specific_param above).
# The list of all EOTasks in the `eolearn.core` subpackage is available here https://eo-learn.readthedocs.io/en/latest/eotasks.html#core
# ## EOWorkflow
#
# A workflow is a directed (acyclic) graph composed of instances of EOTask objects. Each task may take as input the results of other tasks and external arguments. The external arguments are passed anew each time the workflow is executed. The workflow builds the computational graph, performs dependency resolution, and executes the tasks. If the input graph is cyclic, the workflow raises a CyclicDependencyError.
#
# The result of a workflow execution is an immutable mapping from tasks to results. The result contains tasks with zero out-degree (i.e. terminal tasks).
# Create a workflow
# +
from eolearn.core import EOWorkflow, Dependency
workflow = EOWorkflow([
Dependency(add_feature, inputs=[]),
Dependency(copy_task, inputs=[add_feature]),
Dependency(rename_feature, inputs=[copy_task])
])
# Instead of Dependecy class also just a tuple can be used
result = workflow.execute({
add_feature: {'eopatch': patch,
'data': new_bands}
})
result
# -
# Display the dependency graph
# +
# %matplotlib inline
workflow.dependency_graph('graph.png')
# -
# For a linear workflow such as previous one you can also use `LinearWorkflow` class
# +
from eolearn.core import LinearWorkflow
workflow = LinearWorkflow(add_feature, copy_task, rename_feature)
result = workflow.execute({
add_feature: {'eopatch': patch,
'data': new_bands}
})
workflow.dependency_graph('graph.png')
# -
# ## EOExecutor
#
# `EOExecutor` handles execution and monitoring of workflows. It enables executing a workflow multiple times and in parallel. It monitors execution times and handles any error that might occur in the process. At the end it generates a report which contains summary of the workflow and process of execution.
#
# Execute previously defined workflow with different arguments
# +
from eolearn.core import EOExecutor
execution_args = [ # EOWorkflow will be executed for each of these 3 dictionaries:
{add_feature: {'eopatch': patch,
'data': new_bands}},
{add_feature: {'eopatch': patch,
'data': new_bands - 1}},
{add_feature: {'eopatch': patch,
'data': new_bands * 10}},
]
executor = EOExecutor(workflow, execution_args, save_logs=True, logs_folder='.')
executor.run(workers=3) # The execution will use at most 3 parallel processes
# -
# Make the report
# +
# %matplotlib
executor.make_report()
print('Report was saved to location: {}'.format(executor.get_report_filename()))
|
examples/core/CoreOverview.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import subprocess
import os
import pandas as pd
import numpy as np
import requests
from datetime import datetime
from bs4 import BeautifulSoup
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
# +
mpl.rcParams['figure.figsize'] = [16, 9]
pd.set_option('display.max_rows', 500)
sns.set(style="darkgrid")
# -
# 
# # DATA UNDERSTANDING
# RKI, webscrape (webscraping) https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html
#
# <NAME> (GITHUB) https://github.com/CSSEGISandData/COVID-19.git
#
# REST API services to retreive data https://npgeo-corona-npgeo-de.hub.arcgis.com/
# # Git cloaning
data_path= '../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw=pd.read_csv(data_path)
pd_raw.tail()
# # WEB SCRAPING
page=requests.get("https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html")
soup=BeautifulSoup(page.content,'html.parser')
html_table=soup.find('table')
all_rows=html_table.find_all('tr')
final_data_list=[]
for pos,rows in enumerate(all_rows):
col_list=[each_col.get_text(strip=True) for each_col in rows.find_all('td')]
final_data_list.append(col_list)
pd_daily_status=pd.DataFrame(final_data_list).dropna().rename(columns={0:'state',
1:'cases',
2:'chnages',
3:'cases_per_100k',
4:'fatal',
5:'comment'})
pd_daily_status.head()
# # REST API
data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
import json
json_object=json.loads(data.content)
type(json_object)
json_object.keys()
full_list=[]
for pos,each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd.DataFrame(full_list)
|
notebooks/step1_Data understanding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question and Answer
import logging
import json
from simpletransformers.question_answering import QuestionAnsweringModel, QuestionAnsweringArgs
import os
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
file_dev = "/dataset/tydiqa/tydiqa-goldp-v1.1-dev.json"
file_train = "/dataset/tydiqa/tydiqa-goldp-v1.1-train.json"
# +
with open(file_dev) as f:
data_dev = json.load(f)
with open(file_train) as f:
data_train = json.load(f)
# -
"""
counter = 0
for i in range(len(data_dev["data"])):
#for i in range(5):
if counter > 2:
break
if 'indonesia' in data_dev["data"][i]['paragraphs'][0]['qas'][0]['id']:
counter += 1
print(data_dev["data"][i]['paragraphs'][0]['qas'][0]['id'])
print(data_dev["data"][i])
"""
data_dev_id = []
for i in range(len(data_dev["data"])):
if 'indonesia' in data_dev["data"][i]['paragraphs'][0]['qas'][0]['id']:
for paragraph in data_dev["data"][i]['paragraphs']:
data_dev_id.append(paragraph)
data_train_id = []
for i in range(len(data_train["data"])):
if 'indonesia' in data_train["data"][i]['paragraphs'][0]['qas'][0]['id']:
for paragraph in data_train["data"][i]['paragraphs']:
data_train_id.append(paragraph)
len(data_dev_id), len(data_train_id)
data_dev_id[:2]
data_train_id[:2]
# Configure the model
model_args = QuestionAnsweringArgs()
model_args.train_batch_size = 16
model_args.evaluate_during_training = True
model_args.output_dir = '/output/qa/bert-base-indonesian'
model_args.best_model_dir = '/output/qa/bert-base-indonesian/best_model'
model_args.fp16 = False
model_args.num_train_epochs = 5
model_args.overwrite_output_dir = True
model_args.do_lower_case = True
model_args
# +
model = QuestionAnsweringModel(
"bert", "cahya/bert-base-indonesian-522M", args=model_args
#"roberta", "cahya/roberta-base-indonesian-522M", args=model_args,
#"xlmroberta", "xlm-roberta-base", args=model_args,
)
# -
# Train the model
model.train_model(data_train_id, eval_data=data_dev_id)
# Evaluate the model
result, texts = model.eval_model(data_dev_id)
result
texts
# +
# Make predictions with the model
to_predict = [
[
{
"context": "Presiden Rorowilis adalah Presiden keseratus Indonesia. Beliau wafat dini hari tanggal 1 Agustus 2100. Ketika itu beliau berusia 100 tahun. Presiden Rorowilis dimakamkan di Taman Kopo, Cirebon.",
"qas": [
{
"question": "Kapan Presiden Rorowilis meninggal dunia?",
"id": "0",
},
{
"question": "dimana Presiden Rorowilis dikuburkan?",
"id": "1",
},
{
"question": "Siapakah Presiden Rorowilis?",
"id": "2",
},
{
"question": "Berapa umur Presiden Rorowilis ketika meninggal dunia?",
"id": "3",
}
],
}
],
[
{
"context": "Mikrohidro atau yang dimaksud dengan Pembangkit Listrik Tenaga Mikrohidro (PLTMH), adalah suatu pembangkit listrik skala kecil yang menggunakan tenaga air sebagai tenaga penggeraknya seperti, saluran irigasi, sungai atau air terjun alam dengan cara memanfaatkan tinggi terjunan dan jumlah debit air. Mikrohidro merupakan sebuah istilah yang terdiri dari kata mikro yang berarti kecil dan hidro yang berarti air. Secara teknis, mikrohidro memiliki tiga komponen utama yaitu air, turbin dan generator. Mikrohidro mendapatkan energi dari aliran air yang memiliki perbedaan ketinggian tertentu.",
"qas": [
{
"question": "Apa Mikrohidro itu?",
"id": "0",
},
{
"question": "apa komponen mikrohidro?",
"id": "1",
},
{
"question": "dimana Mikrohidro mendapatkan energi?",
"id": "2",
}
],
}
]
]
# -
context_id = 1
answers, probabilities = model.predict(to_predict[context_id])
print("Context: {}\n".format(to_predict[context_id][0]['context']))
for id in range(len(to_predict[context_id][0]['qas'])):
print("Q: {}".format(to_predict[context_id][0]['qas'][id]['question']))
print("A: {}\n".format(answers[id]['answer'][0]))
|
Transformers/Tasks/question-answering-simple-training.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ElasticNet Regression with scikit-learn
# This notebook creates and measures an ElasticNet regression model using sklearn.
#
# * Method: ElasticNet Regression
# * Dataset: Big Mart dataset
# ## Imports
# +
import numpy as np
import pandas as pd
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Load the Data
data = pd.read_csv('/home/students/data/bigmart/big_mart_train.csv')
data.dtypes
data.head()
data.describe(include='all')
# ## Data Preprocessing
# Handle missing values
data['Item_Weight'].fillna((data['Item_Weight'].mean()), inplace=True)
data['Item_Visibility'] = data['Item_Visibility'].replace(0,np.mean(data['Item_Visibility']))
data['Outlet_Establishment_Year'] = 2013 - data['Outlet_Establishment_Year']
data['Outlet_Size'].fillna('Small',inplace=True)
# Create dummy variables to convert categorical data into numeric values
object_cols = list(data.select_dtypes(include=['object']).columns)
dummies = pd.get_dummies(data[object_cols], prefix= object_cols)
data.drop(object_cols, axis=1, inplace = True)
X = pd.concat([data, dummies], axis =1)
X.head()
X.describe()
# ## Fit an ElasticNet Regression Model
# Splitting into training and cv for cross validation
X = X.drop('Item_Outlet_Sales',1)
X_train, X_test, Y_train, Y_test = \
train_test_split(X, data.Item_Outlet_Sales, test_size=0.3, random_state=42)
# +
'''
Create an instance of a ElasticNet Regression model
alpha = a + b
l1_ratio = a / (a+b)
a and b are the weights assigned to L1 and L2 term respectively
If l1_ratio = 1 then we have Lasso
If l1_ratio = 0 then we have Ridge
If l1_ratio is between 0 and 1 we have a combination
'''
model = ElasticNet(alpha=1, l1_ratio=0.5, normalize=False)
model.fit(X_train, Y_train)
# -
# **Intercept Coefficient**: represents the mean change in the response variable for one unit of change in the predictor variable while holding everything else constant. It isolates the role of one variable from all others.
# Print the intercept coefficient
print('Estimated intercept coefficient: {}'.format(model.intercept_))
# Create a dataframe with the features and coefficients
fc_df = pd.DataFrame(list(zip(X.columns, model.coef_)), columns=['features', 'coefficients'])
fc_df.head()
# ## Predict a Price
y_pred = model.predict(X_test)
# Create a plot to compare actual sales (Y_test) and the predicted sales (pred_test)
fig = plt.figure(figsize=(20,10))
plt.scatter(Y_test, y_pred)
plt.xlabel("Actual Sales: $Y_i$")
plt.ylabel("Predicted Sales: $\hat{Y}_i$")
plt.title("Actual vs. Predicted Sales: $Y_i$ vs. $\hat{Y}_i$")
plt.show()
# ## Model Evaluation
# ### Mean Squared Error
# Get the Mean Squared Error (MSE) for all predictions
mse = mean_squared_error(Y_train, model.predict(X_train))
print("MSE Training Data: {}".format(mse))
# Get the MSE for the test data
print("MSE Test Data: {}".format(mean_squared_error(Y_test, model.predict(X_test))))
# ### Variance (R^2) Score
#
# * Explains how much of the variability of a factor can be caused or explained by its relationship to another factor; how well the model is predicting.
# * A score of 1 means a perfect prediction
# * A score of 0 means the model always predicts the expected value of y, disregarding the input features
print("Variance Score: %.2f" % r2_score(Y_test, y_pred))
# ## Residual Plot
# **Residuals**: the difference between the predictions and the actuals.
#
#
# **Interpretation**: If the model is working well then the data should be randomly scattered around line zero. If there is structure in the data, that means the model is not capturing something, perhaps interaction between two variables or it's time dependent. Check the parameters of your model.
# Create a residual plot
fig = plt.figure(figsize=(20,10))
plt.scatter(model.predict(X_train), model.predict(X_train) - Y_train, c='b', s=40, alpha=0.5)
plt.scatter(model.predict(X_test), model.predict(X_test) - Y_test, c='g', s=40)
plt.hlines(y=0, xmin=0, xmax=50)
plt.ylabel("Residuals")
plt.title("Residual Plot Using Training (Blue) and Test (Green) Data")
plt.show()
# **Interpretation**
#
# The funnel shape indicates Heteroskedasticity. The variance of error terms(residuals) is not constant. Generally, non-constant variance arises in the presence of outliers or extreme leverage values. These values get too much weight, thereby disproportionately influencing the model’s performance.
#
# This indicates signs of non linearity in the data which has not been captured by the model.
# ## Different Alpha
# ## Fit a New Model
model_2 = ElasticNet(alpha=.5, l1_ratio=0.5, normalize=False)
model_2.fit(X_train, Y_train)
print('Estimated intercept coefficient: {}'.format(model_2.intercept_))
# ### Predict a Price
y2_pred = model_2.predict(X_test)
# Create a plot to compare actual sales (Y_test) and the predicted sales (pred_test)
fig = plt.figure(figsize=(20,10))
plt.scatter(Y_test, y2_pred)
plt.xlabel("Actual Sales: $Y_i$")
plt.ylabel("Predicted Sales: $\hat{Y}_i$")
plt.title("Actual vs. Predicted Sales: $Y_i$ vs. $\hat{Y}_i$")
plt.show()
# ## Model Evaluation
# ### Mean Squared Error
# Get the Mean Squared Error (MSE) for all predictions
mse = mean_squared_error(Y_train, model_2.predict(X_train))
print("MSE Training Data: {}".format(mse))
# Get the MSE for the test data
print("MSE Test Data: {}".format(mean_squared_error(Y_test, model_2.predict(X_test))))
# ### Variance (R^2) Score
#
# * Explains how much of the variability of a factor can be caused or explained by its relationship to another factor; how well the model is predicting.
# * A score of 1 means a perfect prediction
# * A score of 0 means the model always predicts the expected value of y, disregarding the input features
print("Variance Score: %.2f" % r2_score(Y_test, y2_pred))
# ## Residual Plot
# **Residuals**: the difference between the predictions and the actuals.
#
#
# **Interpretation**: If the model is working well then the data should be randomly scattered around line zero. If there is structure in the data, that means the model is not capturing something, perhaps interaction between two variables or it's time dependent. Check the parameters of your model.
# Create a residual plot
fig = plt.figure(figsize=(20,10))
plt.scatter(model_2.predict(X_train), model_2.predict(X_train) - Y_train, c='b', s=40, alpha=0.5)
plt.scatter(model_2.predict(X_test), model_2.predict(X_test) - Y_test, c='g', s=40)
plt.hlines(y=0, xmin=0, xmax=50)
plt.ylabel("Residuals")
plt.title("Residual Plot Using Training (Blue) and Test (Green) Data")
plt.show()
|
code/day_5/7 - ElasticNet Regression with Sklearn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AWS Elastic Kubernetes Service (EKS) Deep MNIST
# In this example we will deploy a tensorflow MNIST model in Amazon Web Services' Elastic Kubernetes Service (EKS).
#
# This tutorial will break down in the following sections:
#
# 1) Train a tensorflow model to predict mnist locally
#
# 2) Containerise the tensorflow model with our docker utility
#
# 3) Send some data to the docker model to test it
#
# 4) Install and configure AWS tools to interact with AWS
#
# 5) Use the AWS tools to create and setup EKS cluster with Seldon
#
# 6) Push and run docker image through the AWS Container Registry
#
# 7) Test our Elastic Kubernetes deployment by sending some data
#
# #### Let's get started! 🚀🔥
#
# ## Dependencies:
#
# * Helm v3.0.0+
# * A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)
# * kubectl v1.14+
# * EKS CLI v0.1.32
# * AWS Cli v1.16.163
# * Python 3.6+
# * Python DEV requirements
#
# ## 1) Train a tensorflow model to predict mnist locally
# We will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf
if __name__ == '__main__':
x = tf.placeholder(tf.float32, [None,784], name="x")
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b, name="y")
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))
saver = tf.train.Saver()
saver.save(sess, "model/deep_mnist_model")
# -
# ## 2) Containerise the tensorflow model with our docker utility
# First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content:
# !cat .s2i/environment
# Now we can build a docker image named "deep-mnist" with the tag 0.1
# !s2i build . seldonio/seldon-core-s2i-python36:1.2.3-dev deep-mnist:0.1
# ## 3) Send some data to the docker model to test it
# We first run the docker image we just created as a container called "mnist_predictor"
# !docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1
# Send some random features that conform to the contract
import matplotlib.pyplot as plt
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
# +
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
# We now test the REST endpoint expecting the same result
endpoint = "0.0.0.0:5000"
batch = x
payload_type = "ndarray"
sc = SeldonClient(microservice_endpoint=endpoint)
# We use the microservice, instead of the "predict" function
client_prediction = sc.microservice(
data=batch,
method="predict",
payload_type=payload_type,
names=["tfidf"])
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
# -
# !docker rm mnist_predictor --force
# ## 4) Install and configure AWS tools to interact with AWS
# First we install the awscli
# !pip install awscli --upgrade --user
# #### Configure aws so it can talk to your server
# (if you are getting issues, make sure you have the permmissions to create clusters)
# + language="bash"
# # You must make sure that the access key and secret are changed
# aws configure << END_OF_INPUTS
# YOUR_ACCESS_KEY
# YOUR_ACCESS_SECRET
# us-west-2
# json
# END_OF_INPUTS
# -
# #### Install EKCTL
# *IMPORTANT*: These instructions are for linux
# Please follow the official installation of ekctl at: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html
# !curl --silent --location "https://github.com/weaveworks/eksctl/releases/download/latest_release/eksctl_$(uname -s)_amd64.tar.gz" | tar xz
# !chmod 755 ./eksctl
# !./eksctl version
# ## 5) Use the AWS tools to create and setup EKS cluster with Seldon
# In this example we will create a cluster with 2 nodes, with a minimum of 1 and a max of 3. You can tweak this accordingly.
#
# If you want to check the status of the deployment you can go to AWS CloudFormation or to the EKS dashboard.
#
# It will take 10-15 minutes (so feel free to go grab a ☕).
#
# ### IMPORTANT: If you get errors in this step...
# It is most probably IAM role access requirements, which requires you to discuss with your administrator.
# + language="bash"
# ./eksctl create cluster \
# --name demo-eks-cluster \
# --region us-west-2 \
# --nodes 2
# -
# ### Configure local kubectl
# We want to now configure our local Kubectl so we can actually reach the cluster we've just created
# !aws eks --region us-west-2 update-kubeconfig --name demo-eks-cluster
# And we can check if the context has been added to kubectl config (contexts are basically the different k8s cluster connections)
# You should be able to see the context as "...aws:eks:eu-west-1:27...".
# If it's not activated you can activate that context with kubectlt config set-context <CONTEXT_NAME>
# !kubectl config get-contexts
# ## Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).
# ## Push docker image
# In order for the EKS seldon deployment to access the image we just built, we need to push it to the Elastic Container Registry (ECR).
#
# If you have any issues please follow the official AWS documentation: https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-basics.html
# ### First we create a registry
# You can run the following command, and then see the result at https://us-west-2.console.aws.amazon.com/ecr/repositories?#
# !aws ecr create-repository --repository-name seldon-repository --region us-west-2
# ### Now prepare docker image
# We need to first tag the docker image before we can push it
# + language="bash"
# export AWS_ACCOUNT_ID=""
# export AWS_REGION="us-west-2"
# if [ -z "$AWS_ACCOUNT_ID" ]; then
# echo "ERROR: Please provide a value for the AWS variables"
# exit 1
# fi
#
# docker tag deep-mnist:0.1 "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository"
# -
# ### We now login to aws through docker so we can access the repository
!`aws ecr get-login --no-include-email --region us-west-2`
# ### And push the image
# Make sure you add your AWS Account ID
# + language="bash"
# export AWS_ACCOUNT_ID=""
# export AWS_REGION="us-west-2"
# if [ -z "$AWS_ACCOUNT_ID" ]; then
# echo "ERROR: Please provide a value for the AWS variables"
# exit 1
# fi
#
# docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository"
# -
# ## Running the Model
# We will now run the model.
#
# Let's first have a look at the file we'll be using to trigger the model:
# !cat deep_mnist.json
# Now let's trigger seldon to run the model.
#
# We basically have a yaml file, where we want to replace the value "REPLACE_FOR_IMAGE_AND_TAG" for the image you pushed
# + language="bash"
# export AWS_ACCOUNT_ID=""
# export AWS_REGION="us-west-2"
# if [ -z "$AWS_ACCOUNT_ID" ]; then
# echo "ERROR: Please provide a value for the AWS variables"
# exit 1
# fi
#
# sed 's|REPLACE_FOR_IMAGE_AND_TAG|'"$AWS_ACCOUNT_ID"'.dkr.ecr.'"$AWS_REGION"'.amazonaws.com/seldon-repository|g' deep_mnist.json | kubectl apply -f -
# -
# And let's check that it's been created.
#
# You should see an image called "deep-mnist-single-model...".
#
# We'll wait until STATUS changes from "ContainerCreating" to "Running"
# !kubectl get pods
# ## Test the model
# Now we can test the model, let's first find out what is the URL that we'll have to use:
# !kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'
# We'll use a random example from our dataset
import matplotlib.pyplot as plt
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
# We can now add the URL above to send our request:
# +
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
host = "a68bbac487ca611e988060247f81f4c1-707754258.us-west-2.elb.amazonaws.com"
port = "80" # Make sure you use the port above
batch = x
payload_type = "ndarray"
sc = SeldonClient(
gateway="ambassador",
ambassador_endpoint=host + ":" + port,
namespace="default",
oauth_key="oauth-key",
oauth_secret="oauth-secret")
client_prediction = sc.predict(
data=batch,
deployment_name="deep-mnist",
names=["text"],
payload_type=payload_type)
print(client_prediction)
# -
# ### Let's visualise the probability for each label
# It seems that it correctly predicted the number 7
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
|
examples/models/aws_eks_deep_mnist/aws_eks_deep_mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 48054, "status": "ok", "timestamp": 1573636583219, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="2mU_ZLUH5jz4" outputId="c4cbc445-56dd-49df-de87-c34e18da5f6c"
import tensorflow as tf
import glob
import nibabel as nib
import os
import time
import pandas as pd
import numpy as np
from mricode.utils import log_textfile
from mricode.utils import copy_colab
tf.__version__
# -
tf.test.is_gpu_available()
# + colab={} colab_type="code" id="nH4XzW8C5yhH"
path_output = './'
path_tfrecords = '/data2/res64/down/'
path_csv = '/data2/csv/'
#path_csv = '/content/drive/My Drive/Capstone/05_Data/01_Label/'
filename_res = {'train': 'intell_residual_train.csv', 'val': 'intell_residual_valid.csv', 'test': 'intell_residual_test.csv'}
filename_norm = {'train': 'intell_train.csv', 'val': 'intell_valid.csv', 'test': 'intell_test.csv'}
filename_fluid = {'train': 'training_fluid_intelligence_sri.csv', 'val': 'validation_fluid_intelligence_sri.csv', 'test': 'test_fluid_intelligence_sri.csv'}
filename_final = filename_res
sample_size = 'site16_allimages'
batch_size = 8
onlyt1 = False
t1_mean = 0.35196779465675354
t2_mean = 0.5694633522033692
t1_std = 0.8948413240464094
t2_std = 1.2991791534423829
# + colab={} colab_type="code" id="ZzpJsO5Rx_LM"
# + colab={} colab_type="code" id="96CI6bJ26JIo"
def return_iter(path, sample_size, batch_size=8, onlyt1=False):
# Some definitions
if onlyt1:
read_features = {
't1': tf.io.FixedLenFeature([], dtype=tf.string),
'subjectid': tf.io.FixedLenFeature([], dtype=tf.string)
}
else:
read_features = {
't1': tf.io.FixedLenFeature([], dtype=tf.string),
't2': tf.io.FixedLenFeature([], dtype=tf.string),
'subjectid': tf.io.FixedLenFeature([], dtype=tf.string)
}
def _parse_(serialized_example, decoder = np.vectorize(lambda x: x.decode('UTF-8')), onlyt1=False):
example = tf.io.parse_single_example(serialized_example, read_features)
subjectid = example['subjectid']
if not(onlyt1):
t1 = tf.expand_dims(tf.reshape(tf.io.decode_raw(example['t1'], tf.int8), (64,64,64)), axis=-1)
t2 = tf.expand_dims(tf.reshape(tf.io.decode_raw(example['t2'], tf.float32), (64,64,64)), axis=-1)
return ({'t1': t1, 't2': t2, 'subjectid': subjectid})
else:
t1 = tf.expand_dims(tf.reshape(tf.io.decode_raw(example['t1'], tf.int8), (256,256,256)), axis=-1)
return ({'t1': t1, 'subjectid': subjectid})
train_ds = tf.data.TFRecordDataset(path +'t1t2_train_' + str(sample_size) + '_v4.tfrecords')
val_ds = tf.data.TFRecordDataset(path + 't1t2_val_' + str(sample_size) + '_v4.tfrecords')
test_ds = tf.data.TFRecordDataset(path + 't1t2_test_' + str(sample_size) + '_v4.tfrecords')
train_iter = train_ds.map(lambda x:_parse_(x, onlyt1=onlyt1)).shuffle(True).batch(batch_size)
val_iter = val_ds.map(lambda x:_parse_(x, onlyt1=onlyt1)).batch(batch_size)
test_iter = test_ds.map(lambda x:_parse_(x, onlyt1=onlyt1)).batch(batch_size)
return train_iter, val_iter, test_iter
def return_csv(path, filenames={'train': 'intell_train.csv', 'val': 'intell_valid.csv', 'test': 'intell_test.csv'}, fluid = False):
train_df = pd.read_csv(path + filenames['train'])
val_df = pd.read_csv(path + filenames['val'])
test_df = pd.read_csv(path + filenames['test'])
norm = None
if fluid:
train_df.columns = ['subjectkey', 'fluid_res', 'fluid']
val_df.columns = ['subjectkey', 'fluid_res', 'fluid']
test_df.columns = ['subjectkey', 'fluid_res', 'fluid']
train_df['subjectkey'] = train_df['subjectkey'].str.replace('_', '')
val_df['subjectkey'] = val_df['subjectkey'].str.replace('_', '')
test_df['subjectkey'] = test_df['subjectkey'].str.replace('_', '')
if not(fluid):
for df in [train_df, val_df, test_df]:
df['race.ethnicity'] = df['race.ethnicity'] - 1
df['married'] = df['married'] - 1
df['high.educ_group'] = 0
df.loc[(train_df['high.educ']>=11) & (df['high.educ']<=12),'high.educ_group'] = 1
df.loc[(train_df['high.educ']>=13) & (df['high.educ']<=13),'high.educ_group'] = 2
counter = 3
for i in range(14,22):
df.loc[(df['high.educ']>=i) & (df['high.educ']<=i),'high.educ_group'] = counter
df['income_group'] = 0
counter = 1
for i in range(4,11):
df.loc[(df['income']>=i) & (df['income']<=i),'income_group'] = counter
counter += 1
norm = {}
for col in ['BMI', 'age', 'vol', 'weight', 'height', 'nihtbx_fluidcomp_uncorrected', 'nihtbx_cryst_uncorrected',
'nihtbx_pattern_uncorrected', 'nihtbx_picture_uncorrected',
'nihtbx_list_uncorrected', 'nihtbx_flanker_uncorrected',
'nihtbx_picvocab_uncorrected', 'nihtbx_cardsort_uncorrected',
'nihtbx_totalcomp_uncorrected', 'nihtbx_reading_uncorrected']:
mean = train_df[col].mean()
std = train_df[col].std()
train_df[col + '_norm'] = (train_df[col]-mean)/std
val_df[col + '_norm'] = (val_df[col]-mean)/std
test_df[col + '_norm'] = (test_df[col]-mean)/std
norm[col] = {'mean': mean, 'std': std}
return train_df, val_df, test_df, norm
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 4520, "status": "ok", "timestamp": 1573636640714, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="B-rSS9vT6TuF" outputId="f5b43cbc-7d59-4c7c-b6d6-78882b1ef3e1"
train_iter, val_iter, test_iter = return_iter(path_tfrecords, sample_size, batch_size, onlyt1=onlyt1)
# + colab={} colab_type="code" id="_WxRhswDxB6a"
a = next(iter(train_iter))
# + colab={} colab_type="code" id="lJ0VnyEzBAes"
if False:
t1_mean = 0.
t1_std = 0.
t2_mean = 0.
t2_std = 0.
n = 0.
for b in train_iter:
t1_mean += np.mean(b['t1'])
t1_std += np.std(b['t1'])
t2_mean += np.mean(b['t2'])
t2_std += np.std(b['t2'])
n += np.asarray(b['t1']).shape[0]
t1_mean /= n
t1_std /= n
t2_mean /= n
t2_std /= n
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 2924, "status": "ok", "timestamp": 1573636640716, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="mSwhEqDPM_r8" outputId="14a4ac0b-9587-4776-fc64-1e008f0c5c31"
t1_mean, t1_std, t2_mean, t2_std
# + colab={} colab_type="code" id="PAyV5ktlA6K1"
train_df, val_df, test_df, norm_dict = return_csv(path_csv, filename_final, False)
# + colab={"base_uri": "https://localhost:8080/", "height": 442} colab_type="code" executionInfo={"elapsed": 3147, "status": "ok", "timestamp": 1573636641787, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="0xYy4XUyVBeC" outputId="a6a19d59-31ff-4064-f1dc-952befcce4b3"
norm_dict
# + colab={"base_uri": "https://localhost:8080/", "height": 697} colab_type="code" executionInfo={"elapsed": 2660, "status": "ok", "timestamp": 1573636641788, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="_SVRoaUTgv1O" outputId="7b86ff58-14d7-488a-9b56-4962b095211e"
train_df.max()
# + colab={} colab_type="code" id="FmJMaxLJLIgE"
import tensorflow as tf
from tensorflow.keras.layers import Conv3D
from tensorflow import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.utils import conv_utils
# + colab={} colab_type="code" id="eTk95ptFV5oN"
cat_cols = {'female': 2, 'race.ethnicity': 5, 'high.educ_group': 4, 'income_group': 8, 'married': 6}
#cat_cols = {}
num_cols = [x for x in list(val_df.columns) if '_norm' in x]
#num_cols = num_cols[0:3]
# + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" executionInfo={"elapsed": 1134, "status": "ok", "timestamp": 1573636642235, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="gSh5zfycjI7A" outputId="b7a137d4-fb48-413e-9307-fcab56fc28d8"
num_cols
# + colab={} colab_type="code" id="1j3q7l7DDWSq"
from tensorflow.keras import Model
class MyDNN(Model):
def __init__(self, cat_cols, num_cols):
super(MyDNN, self).__init__()
self.cat_cols = cat_cols
self.num_cols = num_cols
self.ac = tf.keras.layers.ReLU()
self.maxpool = tf.keras.layers.MaxPool3D(pool_size=(2, 2, 2), data_format='channels_last')
self.conv1 = tf.keras.layers.Conv3D(
filters = 1,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv3D(
filters = 16,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv3D(
filters = 32,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn3 = tf.keras.layers.BatchNormalization()
self.conv4 = tf.keras.layers.Conv3D(
filters = 256,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn4 = tf.keras.layers.BatchNormalization()
self.fc = {}
for k in list(self.cat_cols.keys()):
self.fc[k] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(self.cat_cols[k], activation='softmax')
])
for i in range(len(self.num_cols)):
self.fc[self.num_cols[i]] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(1)
])
#self.fc1 = tf.keras.Sequential([
# tf.keras.layers.Dense(256, activation='relu'),
# tf.keras.layers.BatchNormalization(),
# tf.keras.layers.Dense(1)
# ])
#self.fc2 = tf.keras.Sequential([
# tf.keras.layers.Dense(256, activation='relu'),
# tf.keras.layers.BatchNormalization(),
# tf.keras.layers.Dense(1)
# ])
#self.fc3 = tf.keras.Sequential([
# tf.keras.layers.Dense(256, activation='relu'),
# tf.keras.layers.BatchNormalization(),
# tf.keras.layers.Dense(1)
# ])
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.ac(x)
x = tf.keras.layers.GlobalAveragePooling3D()(x)
out = {}
for k in list(self.fc.keys()):
out[k] = self.fc[k](x)
#['age_norm', 'vol_norm', 'weight_norm']
#out['age_norm'] = self.fc1(x)
#out['vol_norm'] = self.fc2(x)
#out['weight_norm'] = self.fc3(x)
return out
# + colab={} colab_type="code" id="pwzdY28gkCdN"
from tensorflow.keras import Model
class MyDNN(Model):
def __init__(self, cat_cols, num_cols):
super(MyDNN, self).__init__()
self.cat_cols = cat_cols
self.num_cols = num_cols
self.ac = tf.keras.layers.ReLU()
self.maxpool = tf.keras.layers.MaxPool3D(pool_size=(2, 2, 2), data_format='channels_last')
self.conv1 = tf.keras.layers.Conv3D(
filters = 32,
kernel_size = 3,
padding='valid',
data_format='channels_last',
input_shape = (64,64,64,2)
)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv3D(
filters = 64,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv3D(
filters = 128,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn3 = tf.keras.layers.BatchNormalization()
self.conv4 = tf.keras.layers.Conv3D(
filters = 256,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn4 = tf.keras.layers.BatchNormalization()
self.fc = {}
for k in list(self.cat_cols.keys()):
self.fc[k] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(self.cat_cols[k], activation='softmax')
])
for i in range(len(self.num_cols)):
self.fc[self.num_cols[i]] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(1)
])
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.ac(x)
x = tf.keras.layers.GlobalAveragePooling3D()(x)
out = {}
for k in list(self.fc.keys()):
out[k] = self.fc[k](x)
return out
# + colab={} colab_type="code" id="q9PZkxi_k5b-"
from tensorflow.keras import Model
class MyDNN(Model):
def __init__(self, cat_cols, num_cols):
super(MyDNN, self).__init__()
self.cat_cols = cat_cols
self.num_cols = num_cols
self.ac = tf.keras.layers.ReLU()
self.maxpool = tf.keras.layers.MaxPool3D(pool_size=(2, 2, 2), data_format='channels_last')
self.conv1 = tf.keras.layers.Conv3D(
filters = 32,
kernel_size = 3,
padding='valid',
data_format='channels_last',
input_shape = (64,64,64,2)
)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv3D(
filters = 64,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv3D(
filters = 128,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn3 = tf.keras.layers.BatchNormalization()
self.conv4 = tf.keras.layers.Conv3D(
filters = 256,
kernel_size = 3,
padding='valid',
data_format='channels_last',
name='lastconv_1'
)
self.bn4 = tf.keras.layers.BatchNormalization()
self.fc = {}
for k in list(self.cat_cols.keys()):
self.fc[k] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(self.cat_cols[k], activation='softmax', name='output_' + k)
])
for i in range(len(self.num_cols)):
self.fc[self.num_cols[i]] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(1)
])
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.ac(x)
x = tf.keras.layers.GlobalAveragePooling3D()(x)
out = {}
for k in list(self.fc.keys()):
out[k] = self.fc[k](x)
return out
# + colab={} colab_type="code" id="tpab_A9TDYeK"
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(lr = 0.001)
# + colab={} colab_type="code" id="hROMApYiDagm"
def calc_loss_acc(out_loss, out_acc, y_true, y_pred, cat_cols, num_cols, norm_dict):
for col in num_cols:
tmp_col = col
tmp_std = norm_dict[tmp_col.replace('_norm','')]['std']
tmp_y_true = tf.cast(y_true[col], tf.float32).numpy()
tmp_y_pred = np.squeeze(y_pred[col].numpy())
if not(tmp_col in out_loss):
out_loss[tmp_col] = np.sum(np.square(tmp_y_true-tmp_y_pred))
else:
out_loss[tmp_col] += np.sum(np.square(tmp_y_true-tmp_y_pred))
if not(tmp_col in out_acc):
out_acc[tmp_col] = np.sum(np.square((tmp_y_true-tmp_y_pred)*tmp_std))
else:
out_acc[tmp_col] += np.sum(np.square((tmp_y_true-tmp_y_pred)*tmp_std))
for col in list(cat_cols.keys()):
tmp_col = col
if not(tmp_col in out_loss):
out_loss[tmp_col] = tf.keras.losses.SparseCategoricalCrossentropy()(tf.squeeze(y_true[col]), tf.squeeze(y_pred[col])).numpy()
else:
out_loss[tmp_col] += tf.keras.losses.SparseCategoricalCrossentropy()(tf.squeeze(y_true[col]), tf.squeeze(y_pred[col])).numpy()
if not(tmp_col in out_acc):
out_acc[tmp_col] = tf.reduce_sum(tf.dtypes.cast((y_true[col] == tf.argmax(y_pred[col], axis=-1)), tf.float32)).numpy()
else:
out_acc[tmp_col] += tf.reduce_sum(tf.dtypes.cast((y_true[col] == tf.argmax(y_pred[col], axis=-1)), tf.float32)).numpy()
return(out_loss, out_acc)
def format_output(out_loss, out_acc, n, cols, print_bl=False):
loss = 0
acc = 0
output = []
for col in cols:
output.append([col, out_loss[col]/n, out_acc[col]/n])
loss += out_loss[col]/n
acc += out_acc[col]/n
df = pd.DataFrame(output)
df.columns = ['name', 'loss', 'acc']
if print_bl:
print(df)
return(loss, acc, df)
@tf.function
def train_step(X, y, model, optimizer, cat_cols, num_cols):
with tf.GradientTape() as tape:
predictions = model(X)
i = 0
loss = tf.keras.losses.MSE(tf.cast(y[num_cols[i]], tf.float32), tf.squeeze(predictions[num_cols[i]]))
for i in range(1,len(num_cols)):
loss += tf.keras.losses.MSE(tf.cast(y[num_cols[i]], tf.float32), tf.squeeze(predictions[num_cols[i]]))
for col in list(cat_cols.keys()):
loss += tf.keras.losses.SparseCategoricalCrossentropy()(tf.squeeze(y[col]), tf.squeeze(predictions[col]))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return(y, predictions, loss)
@tf.function
def test_step(X, y, model):
predictions = model(X)
return(y, predictions)
def epoch(data_iter, df, model, optimizer, cat_cols, num_cols, norm_dict):
out_loss = {}
out_acc = {}
n = 0.
n_batch = 0.
total_time_dataload = 0.
total_time_model = 0.
start_time = time.time()
for batch in data_iter:
total_time_dataload += time.time() - start_time
start_time = time.time()
t1 = (tf.cast(batch['t1'], tf.float32)-t1_mean)/t1_std
t2 = (batch['t2']-t2_mean)/t2_std
subjectid = decoder(batch['subjectid'])
y = get_labels(df, subjectid, list(cat_cols.keys())+num_cols)
X = tf.concat([t1, t2], axis=4)
if optimizer != None:
y_true, y_pred, loss = train_step(X, y, model, optimizer, cat_cols, num_cols)
else:
y_true, y_pred = test_step(X, y, model)
out_loss, out_acc = calc_loss_acc(out_loss, out_acc, y_true, y_pred, cat_cols, num_cols, norm_dict)
n += X.shape[0]
n_batch += 1
if (n_batch % 10) == 0:
print(n_batch)
total_time_model += time.time() - start_time
start_time = time.time()
return (out_loss, out_acc, n, total_time_model, total_time_dataload)
def get_labels(df, subjectid, cols = ['nihtbx_fluidcomp_uncorrected_norm']):
subjects_df = pd.DataFrame(subjectid)
result_df = pd.merge(subjects_df, df, left_on=0, right_on='subjectkey', how='left')
output = {}
for col in cols:
output[col] = np.asarray(result_df[col].values)
return output
def best_val(df_best, df_val, df_test):
df_best = pd.merge(df_best, df_val, how='left', left_on='name', right_on='name')
df_best = pd.merge(df_best, df_test, how='left', left_on='name', right_on='name')
df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'best_loss_test'] = df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'cur_loss_test']
df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'best_loss_val'] = df_best.loc[df_best['best_loss_val']>=df_best['cur_loss_val'], 'cur_loss_val']
df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_test'] = df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_test']
df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_val'] = df_best.loc[(df_best['best_acc_val']<=df_best['cur_acc_val'])&(df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_val']
df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_test'] = df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_test']
df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'best_acc_val'] = df_best.loc[(df_best['best_acc_val']>=df_best['cur_acc_val'])&(~df_best['name'].isin(['female', 'race.ethnicity', 'high.educ_group', 'income_group', 'married'])), 'cur_acc_val']
df_best = df_best.drop(['cur_loss_val', 'cur_acc_val', 'cur_loss_test', 'cur_acc_test'], axis=1)
return(df_best)
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" executionInfo={"elapsed": 415, "status": "ok", "timestamp": 1573636653401, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="8iQKsZftjsPR" outputId="22d8df05-3f92-4c31-d831-e9308b0db4c9"
cat_cols, num_cols
# + colab={} colab_type="code" id="XK13wNFtoIC4"
modelname = 'site16_downsample_t1t2_test_'
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 46355, "status": "ok", "timestamp": 1573636701683, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="RV24LE3k-00n" outputId="067125be-6ce2-40c9-998e-62f800ccb2f1"
decoder = np.vectorize(lambda x: x.decode('UTF-8'))
template = 'Epoch {0}, Loss: {1:.3f}, Accuracy: {2:.3f}, Val Loss: {3:.3f}, Val Accuracy: {4:.3f}, Time Model: {5:.3f}, Time Data: {6:.3f}'
for col in [0]:
log_textfile(path_output + modelname + 'multitask_test' + '.log', cat_cols),
log_textfile(path_output + modelname + 'multitask_test' + '.log', num_cols)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(lr = 0.001)
model = MyDNN(cat_cols, num_cols)
df_best = None
for e in range(10):
log_textfile(path_output + modelname + 'multitask_test' + '.log', 'Epochs: ' + str(e))
loss = tf.Variable(0.)
acc = tf.Variable(0.)
val_loss = tf.Variable(0.)
val_acc = tf.Variable(0.)
test_loss = tf.Variable(0.)
test_acc = tf.Variable(0.)
train_out_loss, train_out_acc, n, time_model, time_data = epoch(train_iter, train_df, model, optimizer, cat_cols, num_cols, norm_dict)
val_out_loss, val_out_acc, n, _, _ = epoch(val_iter, val_df, model, None, cat_cols, num_cols, norm_dict)
test_out_loss, test_out_acc, n, _, _ = epoch(test_iter, test_df, model, None, cat_cols, num_cols, norm_dict)
loss, acc, _ = format_output(train_out_loss, train_out_acc, n, list(cat_cols.keys())+num_cols)
val_loss, val_acc, df_val = format_output(val_out_loss, val_out_acc, n, list(cat_cols.keys())+num_cols, print_bl=False)
test_loss, test_acc, df_test = format_output(test_out_loss, test_out_acc, n, list(cat_cols.keys())+num_cols, print_bl=False)
df_val.columns = ['name', 'cur_loss_val', 'cur_acc_val']
df_test.columns = ['name', 'cur_loss_test', 'cur_acc_test']
if e == 0:
df_best = pd.merge(df_test, df_val, how='left', left_on='name', right_on='name')
df_best.columns = ['name', 'best_loss_test', 'best_acc_test', 'best_loss_val', 'best_acc_val']
df_best = best_val(df_best, df_val, df_test)
print(df_best[['name', 'best_loss_test', 'best_acc_test']])
print(df_best[['name', 'best_loss_val', 'best_acc_val']])
log_textfile(path_output + modelname + 'multitask_test' + '.log', template.format(e, loss, acc, val_loss, val_acc, time_model, time_data))
df_best.to_csv(path_output + modelname + 'multitask_test' + '.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" executionInfo={"elapsed": 52176, "status": "ok", "timestamp": 1573636708782, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="ENiEqcTM2o5b" outputId="7661277a-f2b1-485e-fbfb-85fc5e7d9c94"
# !pip install tf-explain
# + colab={"base_uri": "https://localhost:8080/", "height": 190} colab_type="code" executionInfo={"elapsed": 57697, "status": "ok", "timestamp": 1573636714920, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="je8ZbBq3tZys" outputId="3c420738-3d47-44ed-9f75-25a268651cf3"
# !pip install nilearn
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 56450, "status": "ok", "timestamp": 1573636715232, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="cv99Rx9vtUBF" outputId="ad64ddb7-7353-4df0-fe1e-f3b4ac938005"
import glob
from nilearn import plotting
import matplotlib.pyplot as plt
import nilearn
import numpy as np
import nibabel as nib
from nilearn.image import crop_img
import os
import tqdm
# %matplotlib inline
# + colab={} colab_type="code" id="tyWmz3FstC8G"
a = next(iter(test_iter))
t1 = (tf.cast(a['t1'], tf.float32)-t1_mean)/t1_std
t2 = (a['t2']-t2_mean)/t2_std
X = tf.concat([t1, t2], axis=4)
# + colab={} colab_type="code" id="k1J0Fsn1Ijr2"
inputs = tf.keras.Input(shape=(64,64,64,2), name='inputlayer123')
a = model(inputs)['female']
mm = Model(inputs=inputs, outputs=a)
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 54985, "status": "ok", "timestamp": 1573636716755, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="1yvswMaT2lKD" outputId="bcc1195f-8702-4a2b-c41d-f264fb24143c"
mm.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1327, "status": "ok", "timestamp": 1573636746552, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="4rORUysK2cM3" outputId="03452a7a-6db7-4f87-b1be-f05fbdb00b2a"
mm.get_layer('my_dnn').get_layer('lastconv_1').output
# + colab={} colab_type="code" id="wf3u-dJ1PIT_"
from tf_explain.core.smoothgrad import SmoothGrad
explainer = SmoothGrad()
grid = explainer.explain((X[0:1], _), mm, 0, 20, 1.)
# + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" executionInfo={"elapsed": 4327, "status": "ok", "timestamp": 1573636753055, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="SsBZysWOs34f" outputId="fe3a9a63-539e-4e88-a10b-57738791cd7f"
ppp = '/content/drive/My Drive/Capstone/05_Data/02_Sample_MRI/downsampled_resize/T2/sub-NDARINVFJJPAA2A_T2.nii.gz'
org = nib.load(ppp)
plotting.plot_anat(nilearn.image.new_img_like(ppp, grid, affine=None, copy_header=False))
plotting.show()
# + colab={} colab_type="code" id="-G7cjveLADUG"
from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
# + colab={} colab_type="code" id="nhXfjGsxADeD"
explainer = OcclusionSensitivity()
# + colab={} colab_type="code" id="_gGRXRJSADkG"
?? OcclusionSensitivity
# + colab={"base_uri": "https://localhost:8080/", "height": 351} colab_type="code" executionInfo={"elapsed": 4225, "status": "error", "timestamp": 1573636765486, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="Q1GJABrBBDOx" outputId="8b8bc278-991b-4a4a-dbb0-dea4c55ffeee"
sensitivity_maps = np.array(
[
explainer.get_sensitivity_map(mm, X[0], 0, 4)
]
)
# + colab={} colab_type="code" id="o3SM73D7BVZS"
image = X[0]
patch_size = 4
class_index=0
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1766, "status": "ok", "timestamp": 1573637561679, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="mOYf3Oh0G42B" outputId="1c2d487b-7d54-4afa-efef-9b2242d897c3"
image.shape
# + colab={} colab_type="code" id="yCP9mb_bC6SR"
def apply_grey_patch(image, top_left_x, top_left_y, top_left_z, patch_size):
"""
Replace a part of the image with a grey patch.
Args:
image (numpy.ndarray): Input image
top_left_x (int): Top Left X position of the applied box
top_left_y (int): Top Left Y position of the applied box
patch_size (int): Size of patch to apply
Returns:
numpy.ndarray: Patched image
"""
patched_image = np.array(image, copy=True)
patched_image[
top_left_x : top_left_x + patch_size, top_left_y : top_left_y + patch_size, top_left_z : top_left_z + patch_size, 0
] = 0
return patched_image
import math
sensitivity_map = np.zeros((
math.ceil(image.shape[0] / patch_size),
math.ceil(image.shape[1] / patch_size),
math.ceil(image.shape[2] / patch_size),
))
# + colab={"base_uri": "https://localhost:8080/", "height": 289} colab_type="code" executionInfo={"elapsed": 41009, "status": "ok", "timestamp": 1573637656803, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="urOT4dZqFzvY" outputId="2476c3b8-e514-40a0-b2a7-0e70bad97ad0"
for index_z, top_left_z in enumerate(range(0, image.shape[2], patch_size)):
print(index_z, top_left_z)
patches = [
apply_grey_patch(image, top_left_x, top_left_y, top_left_z, patch_size)
for index_x, top_left_x in enumerate(range(0, image.shape[0], patch_size))
for index_y, top_left_y in enumerate(range(0, image.shape[1], patch_size))
]
coordinates = [
(index_y, index_x)
for index_x, _ in enumerate(range(0, image.shape[0], patch_size))
for index_y, _ in enumerate(range(0, image.shape[1], patch_size))
]
predictions = mm.predict(np.array(patches), batch_size=1)
target_class_predictions = [prediction[class_index] for prediction in predictions]
for (index_y, index_x), confidence in zip(coordinates, target_class_predictions):
sensitivity_map[index_y, index_x, index_z] = 1 - confidence
# + colab={} colab_type="code" id="TJNEmMOrG0Hi"
from skimage.transform import resize
# + colab={} colab_type="code" id="YnQgVHssHqWX"
sm = resize(sensitivity_map, (64,64,64))
# + colab={} colab_type="code" id="8dT5GWoNHvKu"
heatmap = (sm - np.min(sm)) / (sm.max() - sm.min())
# + colab={"base_uri": "https://localhost:8080/", "height": 103} colab_type="code" executionInfo={"elapsed": 2692, "status": "ok", "timestamp": 1573638001081, "user": {"displayName": "<NAME>", "photoUrl": "<KEY>", "userId": "06737229821528734971"}, "user_tz": -60} id="J53m22tPIIe-" outputId="53c0ebad-1730-47ff-9766-17bd8a5d0034"
import cv2
step = 4
n_slices = int(64/4)
i = 0
n = 0
data = (heatmap * 255).astype("uint8")
slice = 0
fig, ax = plt.subplots(1, n_slices, figsize=[18, 1.2*1])
for _ in range(n_slices):
#tmp_data = cv2.applyColorMap(cv2.cvtColor(data[:,:,slice], cv2.COLOR_GRAY2BGR), colormap)
ax[n].imshow(data[:,:,slice])
ax[n].set_xticks([])
ax[n].set_yticks([])
if i == 0:
ax[n].set_title(str(slice), color='r')
else:
ax[n].set_title('', color='r')
n += 1
slice += step
# + colab={} colab_type="code" id="AvHj5mgfBTsU"
''def apply_grey_patch(image, top_left_x, top_left_y, top_left_z, patch_size):
"""
Replace a part of the image with a grey patch.
Args:
image (numpy.ndarray): Input image
top_left_x (int): Top Left X position of the applied box
top_left_y (int): Top Left Y position of the applied box
patch_size (int): Size of patch to apply
Returns:
numpy.ndarray: Patched image
"""
patched_image = np.array(image, copy=True)
patched_image[
top_left_y : top_left_y + patch_size, top_left_x : top_left_x + patch_size, top_left_z : top_left_z + patch_size, :
] = 127.5
return patched_image
import math
sensitivity_map = np.zeros((
math.ceil(image.shape[0] / patch_size),
math.ceil(image.shape[1] / patch_size),
math.ceil(image.shape[2] / patch_size),
))
patches = [
apply_grey_patch(image, top_left_x, top_left_y, top_left_z, patch_size)
for index_x, top_left_x in enumerate(range(0, image.shape[0], patch_size))
for index_y, top_left_y in enumerate(range(0, image.shape[1], patch_size))
for index_z, top_left_z in enumerate(range(0, image.shape[2], patch_size))
]
coordinates = [
(index_y, index_x, index_z)
for index_x, _ in enumerate(range(0, image.shape[0], patch_size))
for index_y, _ in enumerate(range(0, image.shape[1], patch_size))
]
# + colab={} colab_type="code" id="5tykySZbBhd6"
predictions = mm.predict(np.array(patches), batch_size=1)
target_class_predictions = [
prediction[class_index] for prediction in predictions
]
for (index_y, index_x), confidence in zip(coordinates, target_class_predictions):
sensitivity_map[index_y, index_x] = 1 - confidence
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 24896, "status": "ok", "timestamp": 1573636310007, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="T7CP5XZmB5zk" outputId="51b20ba5-c4a1-437f-c380-1a091287d046"
sensitivity_map.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 307} colab_type="code" executionInfo={"elapsed": 2668, "status": "error", "timestamp": 1573635968909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="YSWQ9aLgADhm" outputId="2064a784-4ba7-48a1-fe7b-14423fb33eff"
grid = explainer.explain((X[0:1], _), mm, 0, 20, 1.)
# + colab={} colab_type="code" id="GsN40IBZADbd"
# + colab={} colab_type="code" id="<KEY>"
from tf_explain.core.grad_cam import GradCAM
explainer = GradCAM()
# + colab={} colab_type="code" id="JleqbVNFt61o"
??GradCAM
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 928, "status": "ok", "timestamp": 1573635431699, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="y9rvvybc68Zz" outputId="d733eeb0-9623-4abe-d37c-808b55b6abf5"
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1155, "status": "ok", "timestamp": 1573635511676, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="Klr1s868--Pd" outputId="91ae4930-a29b-40e7-e6ba-b54f339f9de5"
mm.get_layer('my_dnn_2').get_layer('sequential_40').output
# + colab={} colab_type="code" id="A98V_pwSQRHe"
grad_model = tf.keras.models.Model(
[mm.inputs], [mm.get_layer('my_dnn_2').get_layer('lastconv_1').output, mm.get_layer('my_dnn_2').get_layer('sequential_40').output]
)
# + colab={} colab_type="code" id="ZtMPV1LRu-M9"
import cv2
def image_to_uint_255(image):
"""
Convert float images to int 0-255 images.
Args:
image (numpy.ndarray): Input image. Can be either [0, 255], [0, 1], [-1, 1]
Returns:
numpy.ndarray:
"""
if image.dtype == np.uint8:
return image
if image.min() < 0:
image = (image + 1.0) / 2.0
return (image * 255).astype("uint8")
def heatmap_display(heatmap, original_image, colormap=cv2.COLORMAP_VIRIDIS):
"""
Apply a heatmap (as an np.ndarray) on top of an original image.
Args:
heatmap (numpy.ndarray): Array corresponding to the heatmap
original_image (numpy.ndarray): Image on which we apply the heatmap
colormap (int): OpenCV Colormap to use for heatmap visualization
Returns:
np.ndarray: Original image with heatmap applied
"""
heatmap = cv2.resize(heatmap, original_image.shape[0])
image = image_to_uint_255(original_image)
heatmap = (heatmap - np.min(heatmap)) / (heatmap.max() - heatmap.min())
heatmap = cv2.applyColorMap(
cv2.cvtColor((heatmap * 255).astype("uint8"), cv2.COLOR_GRAY2BGR), colormap
)
output = cv2.addWeighted(cv2.cvtColor(image, cv2.COLOR_RGB2BGR), 0.7, heatmap, 1, 0)
return cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
# + colab={} colab_type="code" id="0x7n6xNlVuCE"
colormap=cv2.COLORMAP_VIRIDIS
with tf.GradientTape() as tape:
X_in = tf.cast(X[0:1], tf.float32)
conv_outputs, predictions = grad_model(X_in)
loss = predictions[:, 0]
tape.watch(loss)
tape.watch(conv_outputs)
grads = tape.gradient(loss, conv_outputs)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1109, "status": "ok", "timestamp": 1573635531488, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="zkzfoc8651jB" outputId="57b5fff2-116a-4c75-cbf0-e4526ee4cf32"
grads.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1231, "status": "ok", "timestamp": 1573635624114, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="yEUMgSwp_fvl" outputId="507004b5-d2bb-4890-fcc8-1b64f6bbf34d"
conv_outputs.shape
# + colab={} colab_type="code" id="NbK4gsGD5z8G"
guided_grads = (
tf.cast(conv_outputs > 0, "float32") * tf.cast(grads > 0, "float32") * grads
)
cams = GradCAM.generate_ponderated_output(conv_outputs, guided_grads)
#heatmaps = np.array([
# heatmap_display(cam.numpy(), image, colormap)
# for cam, image in zip(cams, X[0:1])
# ]
#)
# + colab={} colab_type="code" id="GQRFygoO_lhl"
?? GradCAM
# + colab={} colab_type="code" id="KJQ3A6D1vOga"
cam = cams[0].numpy()
original_image = X[0].numpy()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 553, "status": "ok", "timestamp": 1573635677057, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="jq6WwRHM_suQ" outputId="1260086f-78c7-4e90-83f5-29fbd499bd52"
cam.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 994, "status": "ok", "timestamp": 1573635684025, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="8-vBiYBL_uHv" outputId="38340d19-c523-4e35-c8a0-c02b2ed97298"
original_image.shape
# + colab={} colab_type="code" id="Lf1EpM0huWkN"
image = image_to_uint_255(original_image)
# + colab={} colab_type="code" id="RfWTQbouwPr6"
cam = (cam - np.min(cam)) / (cam.max() - cam.min())
# + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" executionInfo={"elapsed": 3100, "status": "ok", "timestamp": 1573635695548, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="Vc3-eSm_Wdtp" outputId="628281a8-8722-4854-fb6f-907dea010039"
ppp = '/content/drive/My Drive/Capstone/05_Data/02_Sample_MRI/downsampled_resize/T2/sub-NDARINVFJJPAA2A_T2.nii.gz'
org = nib.load(ppp)
plotting.plot_anat(nilearn.image.new_img_like(ppp, cam, affine=None, copy_header=False))
plotting.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 103} colab_type="code" executionInfo={"elapsed": 2499, "status": "ok", "timestamp": 1573632489896, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="Zon4lQxqxy_g" outputId="e27ca556-4b0e-419d-f080-948504cda843"
step = 4
n_slices = int(64/4)
i = 0
n = 0
data = (cam * 255).astype("uint8")
slice = 0
fig, ax = plt.subplots(1, n_slices, figsize=[18, 1.2*1])
for _ in range(n_slices):
tmp_data = cv2.applyColorMap(cv2.cvtColor(data[:,:,slice], cv2.COLOR_GRAY2BGR), colormap)
ax[n].imshow(tmp_data)
ax[n].set_xticks([])
ax[n].set_yticks([])
if i == 0:
ax[n].set_title(str(slice), color='r')
else:
ax[n].set_title('', color='r')
n += 1
slice += step
# + colab={"base_uri": "https://localhost:8080/", "height": 850} colab_type="code" executionInfo={"elapsed": 988, "status": "ok", "timestamp": 1573632429787, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="CswyaFSSzUT1" outputId="e23d5f93-8b96-493c-943a-220806dd2676"
image[:,:,slice]
# + colab={"base_uri": "https://localhost:8080/", "height": 850} colab_type="code" executionInfo={"elapsed": 1103, "status": "ok", "timestamp": 1573632359963, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="sHi5Wa4OxzHv" outputId="f75dcc53-bb56-4004-c563-0732b9b00f95"
cv2.applyColorMap(cv2.cvtColor(data[:,:,slice], cv2.COLOR_GRAY2BGR), colormap)
# + colab={} colab_type="code" id="6j0saosJV0nl"
grads
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 1724, "status": "ok", "timestamp": 1573573601574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="9ll5ZhBYS0CC" outputId="da8f25d9-8618-4d94-a49c-6d12f26e452c"
# + colab={"base_uri": "https://localhost:8080/", "height": 190} colab_type="code" executionInfo={"elapsed": 5602, "status": "ok", "timestamp": 1573573592894, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="2l2aRBO0S0lH" outputId="8eb4d09c-aa88-4223-dc2d-872ff68cab0b"
# + colab={} colab_type="code" id="fmCx2QDeTgV5"
ppp = '/content/drive/My Drive/Capstone/05_Data/02_Sample_MRI/downsampled_resize/T2/sub-NDARINVFJJPAA2A_T2.nii.gz'
org = nib.load(ppp)
# + colab={} colab_type="code" id="3aOZOBl7UAYf"
# + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" executionInfo={"elapsed": 2719, "status": "ok", "timestamp": 1573573909666, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="x9nuB0SiS3lG" outputId="da89a83a-9b5d-4286-c265-f9b6d614177e"
# + colab={} colab_type="code" id="dSeow2ZMTSM4"
nilearn.image.
# + colab={"base_uri": "https://localhost:8080/", "height": 307} colab_type="code" executionInfo={"elapsed": 1068, "status": "error", "timestamp": 1573570989091, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="aUUkRh73Ikou" outputId="9280f05b-2745-4b4f-eb3c-22a9bb5c2ee3"
import tensorflow as tf
mm = tf.keras.Model(inputs=inputs, output=inputs)
# + colab={} colab_type="code" id="5iG2jty7ItI6"
aaa = model.fc['female']
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1101, "status": "ok", "timestamp": 1573572176029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="rBkxNGXJMm3f" outputId="8f9443a0-ce63-4b6b-cfd6-5bdcb191d7f8"
aaa.layers[-1]
# + colab={"base_uri": "https://localhost:8080/", "height": 164} colab_type="code" executionInfo={"elapsed": 1594, "status": "error", "timestamp": 1573571926766, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="R7pvBAzEMgIb" outputId="b674a27f-9b52-49c7-d0d6-bda6bebe1508"
list(model.fc['female'].children())
# + colab={} colab_type="code" id="aQ6iuJbGEym7"
from tensorflow.keras import Model
class WrapperMyDNN(Model):
def __init__(self, model):
super(WrapperMyDNN, self).__init__()
self.model = model
#self.output = self.model.fc['female'].layers[-1]
def call(self, x):
x_tmp = self.model(x)
return(x_tmp['female'])
# + colab={} colab_type="code" id="S4Qim0gN0Lua"
?? Model
# + colab={} colab_type="code" id="IbySX5WM0F4G"
from tensorflow.keras import Model
class MyDNN2(Model):
def __init__(self, cat_cols, num_cols, **kwargs):
super(MyDNN2, self).__init__(**kwargs)
self.cat_cols = cat_cols
self.num_cols = num_cols
self.ac = tf.keras.layers.ReLU()
self.maxpool = tf.keras.layers.MaxPool3D(pool_size=(2, 2, 2), data_format='channels_last')
self.conv1 = tf.keras.layers.Conv3D(
filters = 32,
kernel_size = 3,
padding='valid',
data_format='channels_last',
input_shape = (64,64,64,2)
)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv3D(
filters = 64,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv3D(
filters = 128,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn3 = tf.keras.layers.BatchNormalization()
self.conv4 = tf.keras.layers.Conv3D(
filters = 256,
kernel_size = 3,
padding='valid',
data_format='channels_last'
)
self.bn4 = tf.keras.layers.BatchNormalization()
self.fc = {}
for k in list(self.cat_cols.keys()):
self.fc[k] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(self.cat_cols[k], activation='softmax')
])
for i in range(len(self.num_cols)):
self.fc[self.num_cols[i]] = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(1)
])
def call(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.ac(x)
x = self.maxpool(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.ac(x)
x = tf.keras.layers.GlobalAveragePooling3D()(x)
out = {}
for k in list(self.fc.keys()):
out[k] = self.fc[k](x)
return out
# + colab={} colab_type="code" id="DytC-LQsPZSd"
model2 = WrapperMyDNN(model)
a = next(iter(test_iter))
t1 = (tf.cast(a['t1'], tf.float32)-t1_mean)/t1_std
t2 = (a['t2']-t2_mean)/t2_std
X = tf.concat([t1, t2], axis=4)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1130, "status": "error", "timestamp": 1573572569092, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="-FnN0VNHOuHC" outputId="e39d17e4-1629-4f90-d4e3-e8341a78c956"
model2.output
# + colab={"base_uri": "https://localhost:8080/", "height": 477} colab_type="code" executionInfo={"elapsed": 4623, "status": "error", "timestamp": 1573572511655, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="1HQqLXqWWxXJ" outputId="1422a528-c7a4-4e68-db58-1485075ac412"
from tf_explain.core.smoothgrad import SmoothGrad
explainer = SmoothGrad()
grid = explainer.explain((X, None), model2, 1, 20, 1.)
# + colab={} colab_type="code" id="cP7thtjAqunC"
output = []
output.append(['colname', 'mse'])
for col in num_cols:
mean = test_df[col].mean()
mse_norm = np.mean(np.square(test_df[col]-mean))
output.append([col, mse_norm])
for col in list(cat_cols.keys()):
mean = test_df[col].value_counts().idxmax()
mse_norm = np.mean(test_df[col]==mean)
output.append([col, mse_norm])
# + colab={} colab_type="code" id="Rw_KzTw6Q4wr"
a = next(iter(test_iter))
# + colab={} colab_type="code" id="ben3zkRr32sD"
t1 = (tf.cast(a['t1'], tf.float32)-t1_mean)/t1_std
t2 = (a['t2']-t2_mean)/t2_std
X = tf.concat([t1, t2], axis=4)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1117, "status": "ok", "timestamp": 1573490357380, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="622IROlaVWVu" outputId="9b50cdce-6128-4fc7-9757-80d1fdce306f"
X.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 1122, "status": "ok", "timestamp": 1573490251609, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="ynswVv2MUwZi" outputId="276f09a5-4318-4fa2-a029-34a2a8c4dd3d"
model2.model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 477} colab_type="code" executionInfo={"elapsed": 5540, "status": "error", "timestamp": 1573490614301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="2RwCwYigS0UO" outputId="7f1ede39-9d2b-40f1-eed3-f1b72b4573f9"
from tf_explain.core.smoothgrad import SmoothGrad
explainer = SmoothGrad()
grid = explainer.explain((X, None), model2, 1, 20, 1.)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 3698, "status": "ok", "timestamp": 1573490096976, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="b033WUfMTy_S" outputId="e71254e5-e55f-488b-a2ce-5ce5f7117b5d"
# !ls /content/drive/My\ Drive/Capstone/
# + colab={"base_uri": "https://localhost:8080/", "height": 935} colab_type="code" executionInfo={"elapsed": 8973, "status": "ok", "timestamp": 1573490110703, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="-RohbTtqS4qd" outputId="497f7261-9a93-4151-a55e-d449ecfaa40b"
IMAGE_PATH = '/content/drive/My Drive/Capstone/cat.jpg'
model_res = tf.keras.applications.vgg16.VGG16(weights='imagenet', include_top=True)
img = tf.keras.preprocessing.image.load_img(IMAGE_PATH, target_size=(224, 224))
img = tf.keras.preprocessing.image.img_to_array(img)
model_res.summary()
data = ([img], None)
tabby_cat_class_index = 281
explainer = SmoothGrad()
# Compute SmoothGrad on VGG16
grid = explainer.explain(data, model_res, tabby_cat_class_index, 20, 1.)
explainer.save(grid, '.', 'smoothgrad.png')
# + colab={} colab_type="code" id="8kJ4t1IgT_FF"
from google.colab import files
files.download('./smoothgrad.png')
# + colab={} colab_type="code" id="zDC5mlR9UpWo"
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1594, "status": "ok", "timestamp": 1573489069509, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="-dvYng1iQBUQ" outputId="b4ddf708-f070-43a7-938b-12bc92419372"
model2(X)
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" executionInfo={"elapsed": 1585, "status": "ok", "timestamp": 1573488955519, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": -60} id="7JMy-Zmg3_8t" outputId="a0d77624-e846-451d-c8f8-84f587118f0b"
'model.call(X)['female']
# + colab={} colab_type="code" id="mIYd0IsfiKER"
pd.DataFrame(output).to_csv(path_output + 'baseline.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" executionInfo={"elapsed": 811, "status": "ok", "timestamp": 1572985942408, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": 300} id="j3kFZEWAQ5XU" outputId="c9769171-4ee2-4e40-d803-fcde7a266188"
val_df.groupby(['married']).count() / val_df.shape[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 514} colab_type="code" executionInfo={"elapsed": 1403, "status": "ok", "timestamp": 1572884554781, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": 300} id="6ivDV4iphO9A" outputId="43969338-33ff-44f8-e207-2b03cf78d9a2"
pd.merge(pd.DataFrame(output), df, left_on=0, right_on=0)
# + colab={} colab_type="code" id="3TEP3PWnc4-g"
cols = ['nihtbx_fluidcomp_uncorrected', 'nihtbx_cryst_uncorrected',
'nihtbx_pattern_uncorrected', 'nihtbx_picture_uncorrected',
'nihtbx_list_uncorrected', 'nihtbx_flanker_uncorrected',
'nihtbx_picvocab_uncorrected', 'nihtbx_cardsort_uncorrected',
'nihtbx_totalcomp_uncorrected', 'nihtbx_reading_uncorrected']
# + colab={} colab_type="code" id="GG6ZMe11hqLf"
output = []
output.append(['colname', 'mse'])
for col in cols:
mean = val_df[col].mean()
mse_norm = np.mean(np.square(val_df[col]-mean))
output.append([col, mse_norm])
# + colab={"base_uri": "https://localhost:8080/", "height": 390} colab_type="code" executionInfo={"elapsed": 793, "status": "ok", "timestamp": 1572621420115, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDViwxKXBjVKZfXMWGuXrQ48D62bye6HNutAOX0=s64", "userId": "06737229821528734971"}, "user_tz": 240} id="6YJAmfWtil8q" outputId="1cf07203-3ec7-4448-e8dd-1e7be5ab8636"
pd.DataFrame(output)
# + colab={} colab_type="code" id="OViFTzBKioI2"
# + colab={} colab_type="code" id="AExg0jq563dS"
model.get_layer('lastconv')
|
mirimages-master/06_SimpleDL_MultiTask_Valid.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # KMeans Clustering Example
# A data set that identifies different types of iris's is used to demonstrate KMeans in SAP HANA.
# ## Iris Data Set
# The data set used is from University of California, Irvine (https://archive.ics.uci.edu/ml/datasets/iris, for tutorials use only). This data set contains attributes of a plant iris. There are three species of Iris plants.
# <table>
# <tr><td>Iris Setosa</td><td><img src="images/Iris_setosa.jpg" title="Iris Sertosa" style="float:left;" width="300" height="50" /></td>
# <td>Iris Versicolor</td><td><img src="images/Iris_versicolor.jpg" title="Iris Versicolor" style="float:left;" width="300" height="50" /></td>
# <td>Iris Virginica</td><td><img src="images/Iris_virginica.jpg" title="Iris Virginica" style="float:left;" width="300" height="50" /></td></tr>
# </table>
#
# The data contains the following attributes for various flowers:
# <table align="left"><tr><td>
# <li align="top">sepal length in cm</li>
# <li align="left">sepal width in cm</li>
# <li align="left">petal length in cm</li>
# <li align="left">petal width in cm</li>
# </td><td><img src="images/sepal_petal.jpg" style="float:left;" width="200" height="40" /></td></tr></table>
#
# Although the flower is identified in the data set, we will cluster the data set into 3 clusters since we know there are three different flowers. The hope is that the cluster will correspond to each of the flowers.
#
# A different notebook will use a classification algorithm to predict the type of flower based on the sepal and petal dimensions.
from hana_ml import dataframe
from hana_ml.algorithms.pal import clustering
import numpy as np
import pandas as pd
import logging
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
# ## Load data
# The data is loaded into 4 tables - full set, test set, training set, and the validation set:
# <li>IRIS_DATA_FULL_TBL</li>
# <li>IRIS_DATA_TRAIN_TBL</li>
# <li>IRIS_DATA_TEST_TBL</li>
# <li>IRIS_DATA_VALIDATION_TBL</li>
#
# To do that, a connection is created and passed to the loader.
#
# There is a config file, <b>config/e2edata.ini</b> that controls the connection parameters and whether or not to reload the data from scratch. In case the data is already loaded, there would be no need to load the data. A sample section is below. If the config parameter, reload_data is true then the tables for test, training, and validation are (re-)created and data inserted into them.
#
# Although this ini file has other sections, please do not modify them. Only the [hana] section should be modified.
# #########################<br>
# [hana]<br>
# url=host.sjc.sap.corp<br>
# user=username<br>
# passwd=<PASSWORD><br>
# port=3xx15<br>
# <br>
# #########################<br>
from data_load_utils import DataSets, Settings
url, port, user, pwd = Settings.load_config("../../config/e2edata.ini")
connection_context = dataframe.ConnectionContext(url, port, user, pwd)
full_tbl, training_tbl, validation_tbl, test_tbl = DataSets.load_iris_data(connection_context)
# # Create Data Frames
# Create the data frames for the full set since this is an unsupervised learning example.
#
# Let us also do some data exploration.
# ## Define Dataset
# Data frames are used keep references to data so computation on large data sets in HANA can happen in HANA. Trying to bring the entire data set into the client will likely result in out of memory exceptions.
full_set = connection_context.table(full_tbl)
# ## Simple Exploration
# Let us look at the number of rows in the data set
print('Number of rows in full set: {}'.format(full_set.count()))
# ### Let's look at the columns
print(full_set.columns)
# ### Let us look at some rows
full_set.head(5).collect()
# ### Let's look at the data types
full_set.dtypes()
# ### Let's check how many SPECIES are in the data set.
full_set.distinct("SPECIES").collect()
# # Create Model
# The lines below show the ease with which clustering can be done.
# Set up the features and labels for the model and create the model
features = ['SEPALLENGTHCM','SEPALWIDTHCM','PETALLENGTHCM','PETALWIDTHCM']
label = ['SPECIES']
kmeans = clustering.KMeans(thread_ratio=0.2, n_clusters=3, distance_level='euclidean',
max_iter=100, tol=1.0E-6, category_weights=0.5, normalization='min_max')
predictions = kmeans.fit_predict(full_set, 'ID', features).collect()
print(predictions)
# # Plot the data
def plot_kmeans_results(data_set, features, predictions):
# use this to estimate what each cluster_id represents in terms of flowers
# ideal would be 50-50-50 for each flower, so we can see there are some mis clusterings
class_colors = {0: 'r', 1: 'b', 2: 'k'}
predictions_colors = [class_colors[p] for p in predictions['CLUSTER_ID'].values]
red = plt.Line2D(range(1), range(1), c='w', marker='o', markerfacecolor='r', label='Iris-virginica', markersize=10, alpha=0.9)
blue = plt.Line2D(range(1), range(1), c='w', marker='o', markerfacecolor='b', label='Iris-versicolor', markersize=10, alpha=0.9)
black = plt.Line2D(range(1), range(1), c='w', marker='o', markerfacecolor='k', label='Iris-setosa', markersize=10, alpha=0.9)
for x, y in itertools.combinations(features, 2):
plt.figure(figsize=(10,5))
plt.scatter(full_set[[x]].collect(), data_set[[y]].collect(), c=predictions_colors, alpha=0.6, s=70)
plt.grid()
plt.xlabel(x, fontsize=15)
plt.ylabel(y, fontsize=15)
plt.tick_params(labelsize=15)
plt.legend(handles=[red, blue, black])
plt.show()
# %matplotlib notebook
#above allows interactive 3d plot
sizes=10
for x, y, z in itertools.combinations(features, 3):
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111, projection='3d')
ax.scatter3D(data_set[[x]].collect(), data_set[[y]].collect(), data_set[[z]].collect(), c=predictions_colors, s=70)
plt.grid()
ax.set_xlabel(x, labelpad=sizes, fontsize=sizes)
ax.set_ylabel(y, labelpad=sizes, fontsize=sizes)
ax.set_zlabel(z, labelpad=sizes, fontsize=sizes)
ax.tick_params(labelsize=sizes)
plt.legend(handles=[red, blue, black])
plt.show()
print(pd.concat([predictions, full_set[['SPECIES']].collect()], axis=1).groupby(['SPECIES','CLUSTER_ID']).size())
# %matplotlib inline
plot_kmeans_results(full_set, features, predictions)
|
Python-API/pal/notebooks/irisFlowerClustering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="77gENRVX40S7"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="d8jyt37T42Vf"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" colab={} colab_type="code" id="aPxHdjwW5P2j"
#@title MIT License
#
# Copyright (c) 2017 <NAME> # IGNORE_COPYRIGHT: cleared by OSS licensing
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="hRTa3Ee15WsJ"
# # Transfer Learning Using Pretrained ConvNets
# + [markdown] colab_type="text" id="dQHMcypT3vDT"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/images/transfer_learning"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/images/transfer_learning.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/images/transfer_learning.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="2X4KyhORdSeO"
# In this tutorial we will discuss how to classify cats vs dogs images by using transfer learning from a pre-trained network. This will allows us to get higher accuracies than we saw by training our network from scratch.
#
# A **pre-trained model** is a saved network that was previously trained on a large dataset, typically on a large-scale image-classification task. We can either use the pretrained model as it is or transfer learning using the pretrained convents. The intuition behind **transfer learning** is that if this model trained on a large and general enough dataset, this model will effectively serve as a generic model of the visual world. We can leverage these learned feature maps without having to train a large model on a large dataset by using these models as the basis of our own model specific to our task. There are 2 scenarios of transfer learning using a pretrained model:
#
# 1. **Feature Extraction** - use the representations of learned by a previous network to extract meaningful features from new samples. We simply add a new classifier, which will be trained from scratch, on top of the pretrained model so that we can repurpose the feature maps learned previously for our dataset. **Do we use the entire pretrained model or just the convolutional base?** - We use the feature extraction portion of these pretrained convnets (convolutional base) since they are likely to be generic features and learned concepts over a picture. However, the classification part of the pretrained model is often specific to original classification task, and subsequently specific to the set of classes on which the model was trained.
# 2. **Fine-Tuning** - unfreezing a few of the top layers of a frozen model base used for feature extraction, and jointly training both the newly added classifier layers as well as the last layers of the frozen model. This allows us to "fine tune" the higher order feature representations in addition to our final classifier in order to make them more relevant for the specific task involved.
#
# **We will follow the general machine learning workflow:**
#
# 1. Examine and understand data
# 2. Build an input pipeline - using Keras ImageDataGenerator as we did in the image classification tutorial
# 3. Compose our model
# * Load in our pretrained model (and pretrained weights)
# * Stack our classification layers on top
# 4. Train our model
# 5. Evaluate model
#
# We will see an example of using the pre-trained convnet as the feature extraction and then fine-tune to train the last few layers of the base model.
#
# **Audience:** This post is geared towards beginners with some Keras API and ML background. To get the most out of this post, you should have some basic ML background, know what CNNs are, and be familiar with the Keras Sequential API.
#
# **Time Estimated**: 30 minutes
# + colab={} colab_type="code" id="iBMcobPHdD8O"
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow import keras
print("TensorFlow version is ", tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# + [markdown] colab_type="text" id="v77rlkCKW0IJ"
# ## Data preprocessing
# + [markdown] colab_type="text" id="aXzwKdouXf1h"
# ### Download data - cats_and_dogs_filtered.zip
# We will download a filtered version of Kaggle's [Dogs vs Cats](https://www.kaggle.com/c/dogs-vs-cats/data) dataset. Then store the downloaded zip file to the "/tmp/" directory.
# + colab={} colab_type="code" id="nRnO59Kr6enO"
zip_file = tf.keras.utils.get_file(origin="https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip",
fname="cats_and_dogs_filtered.zip", extract=True)
base_dir, _ = os.path.splitext(zip_file)
# + [markdown] colab_type="text" id="9_6h-c5EXN91"
# ### Prepare training and validation cats and dogs datasets
# Create the training and validation directories for cats datasets and dog datasets.
# + colab={} colab_type="code" id="RWcldM4TXLen"
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
print ('Total training cat images:', len(os.listdir(train_cats_dir)))
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
print ('Total training dog images:', len(os.listdir(train_dogs_dir)))
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
print ('Total validation cat images:', len(os.listdir(validation_cats_dir)))
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
print ('Total validation dog images:', len(os.listdir(validation_dogs_dir)))
# + [markdown] colab_type="text" id="wvidPx6jeFzf"
# ### Create Image Data Generator with Image Augmentation
#
# We will use ImageDataGenerator to rescale the images.
#
# To create the train generator, specify where the train dataset directory, image size, batch size and binary classification mode.
#
# The validation generator is created the same way.
# + colab={} colab_type="code" id="y3PM6GVHcC31"
image_size = 160 # All images will be resized to 160x160
batch_size = 32
# Rescale all images by 1./255 and apply image augmentation
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale=1./255)
validation_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # Source directory for the training images
target_size=(image_size, image_size),
batch_size=batch_size,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = validation_datagen.flow_from_directory(
validation_dir, # Source directory for the validation images
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode='binary')
# + [markdown] colab_type="text" id="OkH-kazQecHB"
# ## Create the base model from the pre-trained convnets
# We will create the base model from the **MobileNet V2** model developed at Google, and pre-trained on the ImageNet dataset, a large dataset of 1.4M images and 1000 classes of web images. This is a powerful model. Let's see what the features that it has learned can do for our cat vs. dog problem.
#
# First, we need to pick which intermediate layer of MobileNet V2 we will use for feature extraction. A common practice is to use the output of the very last layer before the flatten operation, the so-called "bottleneck layer". The reasoning here is that the following fully-connected layers will be too specialized to the task the network was trained on, and thus the features learned by these layers won't be very useful for a new task. The bottleneck features, however, retain much generality.
#
# Let's instantiate an MobileNet V2 model pre-loaded with weights trained on ImageNet. By specifying the **include_top=False** argument, we load a network that doesn't include the classification layers at the top, which is ideal for feature extraction.
# + colab={} colab_type="code" id="19IQ2gqneqmS"
IMG_SHAPE = (image_size, image_size, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
# + [markdown] colab_type="text" id="rlx56nQtfe8Y"
# ## Feature extraction
# We will freeze the convolutional base created from the previous step and use that as a feature extractor, add a classifier on top of it and train the top-level classifier.
# + [markdown] colab_type="text" id="CnMLieHBCwil"
# ### Freeze the convolutional base
# It's important to freeze the convolutional based before we compile and train the model. By freezing (or setting `layer.trainable = False`), we prevent the weights in these layers from being updated during training.
# + colab={} colab_type="code" id="OTCJH4bphOeo"
base_model.trainable = False
# + colab={} colab_type="code" id="KpbzSmPkDa-N"
# Let's take a look at the base model architecture
base_model.summary()
# + [markdown] colab_type="text" id="wdMRM8YModbk"
# #### Add a classification head
# + [markdown] colab_type="text" id="0iqnBeZrfoIc"
# Now let's add a few layers on top of the base model:
# + colab={} colab_type="code" id="eApvroIyn1K0"
model = tf.keras.Sequential([
base_model,
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(1, activation='sigmoid')
])
# + [markdown] colab_type="text" id="g0ylJXE_kRLi"
# ### Compile the model
#
# You must compile the model before training it.
# + colab={} colab_type="code" id="RpR8HdyMhukJ"
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.0001),
loss='binary_crossentropy',
metrics=['accuracy'])
# + colab={} colab_type="code" id="I8ARiyMFsgbH"
model.summary()
# + [markdown] colab_type="text" id="lxOcmVr0ydFZ"
# These 1.2K trainable parameters are divided among 2 TensorFlow `Variable` objects, the weights and biases of the two dense layers:
# + colab={} colab_type="code" id="krvBumovycVA"
len(model.trainable_variables)
# + [markdown] colab_type="text" id="RxvgOYTDSWTx"
# ### Train the model
#
# After training for 10 epochs, we are able to get ~94% accuracy.
#
# If you have more time, train it to convergence (50 epochs, ~96% accuracy)
#
# + colab={} colab_type="code" id="Om4O3EESkab1"
epochs = 10
steps_per_epoch = train_generator.n // batch_size
validation_steps = validation_generator.n // batch_size
history = model.fit_generator(train_generator,
steps_per_epoch = steps_per_epoch,
epochs=epochs,
workers=4,
validation_data=validation_generator,
validation_steps=validation_steps)
# + [markdown] colab_type="text" id="Hd94CKImf8vi"
# ### Learning curves
#
# Let's take a look at the learning curves of the training and validation accuracy / loss, when using the MobileNet V2 base model as a fixed feature extractor.
# + [markdown] colab_type="text" id="l7HOsQTPNgO9"
# If you train to convergence (`epochs=50`) the resulting graph should look like this:
#
# 
# + colab={} colab_type="code" id="53OTCh3jnbwV"
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,max(plt.ylim())])
plt.title('Training and Validation Loss')
plt.show()
# + [markdown] colab_type="text" id="CqwV-CRdS6Nv"
# ## Fine tuning
# In our feature extraction experiment, we were only training a few layers on top of an MobileNet V2 base model. The weights of the pre-trained network were **not** updated during training. One way to increase performance even further is to "fine-tune" the weights of the top layers of the pre-trained model alongside the training of the top-level classifier. The training process will force the weights to be tuned from generic features maps to features associated specifically to our dataset.
#
# Note: this should only be attempted after you have trained the top-level classifier with the pre-trained model set to non-trainable. If you add a randomly initialized classifier on top of a pre-trained model and attempt to train all layers jointly, the magnitude of the gradient updates will be too large (due to the random weights from the classifier) and your pre-trained model will just forget everything it has learned.
#
# Additionally, the reasoning behind fine-tuning the top layers of the pre-trained model rather than all layers of the pre-trained model is the following: in a convnet, the higher up a layer is, the more specialized it is. The first few layers in a convnet learned very simple and generic features, which generalize to almost all types of images. But as you go higher up, the features are increasingly more specific to the dataset that the model was trained on. The goal of fine-tuning is to adapt these specialized features to work with the new dataset.
# + [markdown] colab_type="text" id="CPXnzUK0QonF"
# ### Un-freeze the top layers of the model
#
# + [markdown] colab_type="text" id="rfxv_ifotQak"
# All we need to do is unfreeze the `base_model`, and set the bottom layers be un-trainable. Then, recompile the model (necessary for these changes to take effect), and resume training.
# + colab={} colab_type="code" id="4nzcagVitLQm"
base_model.trainable = True
# + colab={} colab_type="code" id="-4HgVAacRs5v"
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
# + [markdown] colab_type="text" id="4Uk1dgsxT0IS"
# ### Compile the model
#
# Compile the model using a much-lower training rate.
# + colab={} colab_type="code" id="NtUnaz0WUDva"
model.compile(optimizer = tf.keras.optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['accuracy'])
# + colab={} colab_type="code" id="WwBWy7J2kZvA"
model.summary()
# + colab={} colab_type="code" id="bNXelbMQtonr"
len(model.trainable_variables)
# + [markdown] colab_type="text" id="4G5O4jd6TuAG"
# ### Continue Train the model
# + [markdown] colab_type="text" id="0foWUN-yDLo_"
# If you trained to convergence earlier, this will get you a few percent more accuracy.
# + colab={} colab_type="code" id="ECQLkAsFTlun"
history_fine = model.fit_generator(train_generator,
steps_per_epoch = steps_per_epoch,
epochs=epochs,
workers=4,
validation_data=validation_generator,
validation_steps=validation_steps)
# + [markdown] colab_type="text" id="TfXEmsxQf6eP"
# ### Learning curves
#
# Let's take a look at the learning curves of the training and validation accuracy / loss, when fine tuning the last few layers of the MobileNet V2 base model, as well as the classifier on top of it. Note the validation loss much higher than the training loss which means there maybe some overfitting.
#
# **Note**: the training dataset is fairly small, and is similar to the original datasets that MobileNet V2 was trained on, so fine-tuning may result in overfitting.
#
# + [markdown] colab_type="text" id="DNtfNZKlInGT"
# If you train to convergence (`epochs=50`) the resulting graph should look like this:
#
# 
# + colab={} colab_type="code" id="PpA8PlpQKygw"
acc += history_fine.history['acc']
val_acc += history_fine.history['val_acc']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
# + colab={} colab_type="code" id="chW103JUItdk"
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.9, 1])
plt.plot([epochs-1,epochs-1], plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 0.2])
plt.plot([epochs-1,epochs-1], plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# + [markdown] colab_type="text" id="_TZTwG7nhm0C"
# # Key takeaways
# In summary here is what we covered in this tutorial on how to do transfer learning using a pre-trained model to improve accuracy:
# * Using a pre-trained model for **feature extraction** - when working with a small dataset, it is common to leverage the features learned by a model trained on a larger dataset in the same domain. This is done by instantiating the pre-trained model and adding a fully connected classifier on top. The pre-trained model is "frozen" and only the weights of the classifier are updated during training.
# In this case, the convolutional base extracts all the features associated with each image and we train a classifier that determines, given these set of features to which class it belongs.
# * **Fine-tuning** a pre-trained model - to further improve performance, one might want to repurpose the top-level layers of the pre-trained models to the new dataset via fine-tuning.
# In this case, we tune our weights such that we learn highly specified and high level features specific to our dataset. This only make sense when the training dataset is large and very similar to the original dataset that the pre-trained model was trained on.
#
|
site/en/tutorials/images/transfer_learning.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.5
# language: julia
# name: julia-0.4
# ---
using Polynomials, PyPlot, Interact
A = [ 0.325 -0.075 0.075 -0.075
0.025 0.225 -0.025 -0.275
0.15 -0.05 0.25 -0.05
-0.1 -0.1 0.1 0.4 ]
λ = eigvals(A)
x = linspace(0, 0.6, 100)
plot(x, [det(A - λ*I) for λ in x], "r-")
plot(x, zeros(x), "k--")
plot(λ, zeros(λ), "bo")
xlabel(L"\lambda")
ylabel("\det(A - λI)")
title("Characteristic Polynomial")
A = [
1 1
-2 4
]
eigvals(A)
λ, X = eig(A)
λ
X
# ### Exercises on eigenvalus and eigenvectors
#
# ref: https://ocw.mit.edu/courses/mathematics/18-06sc-linear-algebra-fall-2011/least-squares-determinants-and-eigenvalues/eigenvalues-and-eigenvectors/MIT18_06SCF11_Ses2.8prob.pdf
#
# **Problem one**
#
# A three by three matrix B is know to have eigenvalues 0, 1 and 2. This information is enough to find three of these:
#
# - The rank of B
# - The determinant of $B^TB$
# - The eigenvalues of $B^TB$
# - The eigenvalues of $(B^2 + I)^{-1}$
# (a) there has one zero eigenvalue, so the rank(B) equals 2
#
# (b) det($B^TB$) = det($B^T$)det(B) = 0
#
# (d) $Ax=\lambda x$ then $A^{-1}x=A^{-1}\frac{Ax}{\lambda}=\frac{x}{\lambda}$
|
eigenvalues.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from model import *
from data import *
# ## Train your Unet with membrane data
# membrane data is in folder membrane/, it is a binary classification task.
#
# The input shape of image and mask are the same :(batch_size,rows,cols,channel = 1)
# ### Train with data generator
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=2000,epochs=5,callbacks=[model_checkpoint])
# ### Train with npy file
# +
#imgs_train,imgs_mask_train = geneTrainNpy("data/membrane/train/aug/","data/membrane/train/aug/")
#model.fit(imgs_train, imgs_mask_train, batch_size=2, nb_epoch=10, verbose=1,validation_split=0.2, shuffle=True, callbacks=[model_checkpoint])
# -
# ### test your model and save predicted results
testGene = testGenerator("data/membrane/test")
model = unet()
model.load_weights("unet_membrane.hdf5")
results = model.predict_generator(testGene,30,verbose=1)
saveResult("data/membrane/test",results)
|
trainUnet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sistemas Inteligentes
#
# ## Exercício Computacional 5 - Otimização
# ### Identificação do Aluno
# #### Nome Completo
# <NAME>
# #### RA
# 11090115
# ## Instruções
# 1. Escolha um problema de otimização, identifique-o e explique-o.
#
# 2. Escolha ao menos uma técnica de otimização, identifique-a e explique-a.
#
# 3. Utilize células intermediárias de tipo _Markdown_ para explicar o que é feito em cada célula de código. Mas não deixe de utilizar comentários breves e pertinentes dentro do próprio código. Isto significa que o desenvolvimento NÃO deve ser feito em uma única célula.
#
# 4. Sempre que for cabível, exiba as figuras, os gráficos, os valores (ao menos parte deles) etc., mas procure sempre manter um capricho em todas as saídas.
#
# 5. Ao final, comente da forma mais completa possível os resultados obtidos, sempre sugerindo o que poderia ser feito para melhorá-los e fornecendo elementos que contribuam para a sua compreensão.
#
# 6. Respeite as regras gramaticais e procure manter coesão, coerência e fluidez em seus textos.
#
# 7. Apesar de a análise dos resultados ser mais importante do que o código em si, serão analisados critérios como organização e clareza do código, então evite códigos "poluídos" e confusos.
#
# 8. Caso seja utilizada alguma fonte de consulta ou inspiração para o exercício, lembre-se de citá-la apropriadamente ao fim.
# ### Problema a ser trabalhado
# #### Identificação do Problema
# ESCREVA AQUI
# #### Explicação do Problema
# ESCREVA AQUI
# ### Técnica
# #### Identificação da Técnica
# ESCREVA AQUI
# #### Explicação da Técnica
# ESCREVA AQUI
# ## Desenvolvimento
# +
### CÓDIGO ###
# -
# ## Discussão sobre os resultados
# ESCREVA AQUI
# ## Fontes
# ESCREVA AQUI
|
limitadas/sistemas-inteligentes/RICARDO SUYAMA com LUNEQUE JUNIOR e TITO CACO/q3-2018/ec5/.ipynb_checkpoints/EC_5-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import math
import numpy as np
import matplotlib
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# from https://grafana.icecube.wisc.edu/grafana/d/45z6Oy5Gk/glidein-sites?orgId=1
sites = {}
for row in """US-NPX: 46.05 Mil
other: 16.48 Mil
DE-DESY: 11.05 Mil
US-MSU: 10.05 Mil
CA-SFU: 9.21 Mil
US-OSG-UCSD: 3.70 Mil
US-XSEDE-PSC: 2.83 Mil
US-LCC-TACC: 1.82 Mil
US-OSG-Syracuse: 1.23 Mil
US-XSEDE-SDSC: 1.14 Mil
DE-Mainz: 914.13 K
US-GZK: 873.50 K
US-UMD: 809.25 K
US-OSG-Crane: 731.15 K
BE-IIHE: 608.60 K
DE-Dortmund: 517.12 K
US-XSEDE-TACC: 478.52 K
US-Marquette: 453.19 K
CA-Alberta: 416.41 K
US-OSG-FNAL: 384.63 K
US-OSG-BNL-ATLAS: 316.14 K
DE-Aachen: 281.38 K
US-CHTC: 255.49 K
UK-Manchester: 168.31 K
US-OSG-SPRACE: 159.44 K
US-OSG-MWT2: 144.66 K
US-OSG-Colorado: 142.58 K
US-OSG-UCSDT2: 141.05 K
DK-NBI: 122.27 K
DE-Munich: 76.82 K
US-OSG-UIUC: 49.16 K
US-OSG-Clemson: 34.97 K
US-OSG-NMSU: 33.95 K
US-OSG-WSU: 20.31 K
US-OSG-UConn: 6.52 K
US-OSG-Caltech-HEP: 3.59 K
CA-McGill: 54.00""".split('\n'):
parts = row.split(':')
val = parts[1]
if 'Mil' in val:
val = float(val.split()[0])*1000000
elif 'K' in val:
val = float(val.split()[0])*1000
else:
val = float(val)
sites[parts[0]] = val
sites
# +
sites = {'CA-Alberta': {'count': 416410, 'lat': 53.5461, 'lon': -113.4938},
'US-NPX': {'count': 46050000.0, 'lat': 43.0766, 'lon': -89.4125},
'DE-DESY': {'count': 11050000.0, 'lat': 52.3477, 'lon': 13.6208},
'US-MSU': {'count': 10050000.0, 'lat': 42.7018, 'lon': -84.4822},
'CA-SFU': {'count': 9210000.0, 'lat': 49.2768, 'lon': -122.9180},
'US-OSG-UCSD': {'count': 3700000.0, 'lat': 32.8801, 'lon': -117.2340},
'US-XSEDE-PSC': {'count': 2830000.0, 'lat': 40.4406, 'lon': -79.9959},
'US-LCC-TACC': {'count': 1820000.0, 'lat': 30.3877, 'lon': -97.7280},
'US-OSG-Syracuse': {'count': 1230000.0, 'lat': 43.0481, 'lon': -76.1474},
'US-XSEDE-SDSC': {'count': 1140000.0, 'lat': 32.8801, 'lon': -117.2340},
'DE-Mainz': {'count': 914130.0, 'lat': 49.9929, 'lon': 8.2473},
'US-GZK': {'count': 873500.0, 'lat': 43.0766, 'lon': -89.4125},
'US-UMD': {'count': 809250.0, 'lat': 38.9869, 'lon': -76.9426},
'US-OSG-Crane': {'count': 731150.0, 'lat': 40.8202, 'lon': -96.7005},
'BE-IIHE': {'count': 608600.0, 'lat': 50.8503, 'lon': 4.3517},
'DE-Dortmund': {'count': 517120.0, 'lat': 51.5136, 'lon': 7.4653},
'US-XSEDE-TACC': {'count': 478520.0, 'lat': 30.3877, 'lon': -97.7280},
'US-Marquette': {'count': 453190.0, 'lat': 43.0388, 'lon': -87.9286},
'US-OSG-FNAL': {'count': 384630.0, 'lat': 41.8407, 'lon': -88.2792},
'US-OSG-BNL-ATLAS': {'count': 316140.0, 'lat': 40.8643, 'lon': -72.8752},
'DE-Aachen': {'count': 281380.0, 'lat': 50.7753, 'lon': 6.0839},
'US-CHTC': {'count': 255490.0, 'lat': 43.0766, 'lon': -89.4125},
'UK-Manchester': {'count': 168310.0, 'lat': 53.4668, 'lon': -2.2339},
'US-OSG-SPRACE': {'count': 159440.0, 'lat': -23.511344, 'lon': -46.66769},
'US-OSG-MWT2': {'count': 144660.0, 'lat': 39.7671, 'lon': -86.1581},
'US-OSG-Colorado': {'count': 142580.0, 'lat': 40.0076, 'lon': -105.2659},
'US-OSG-UCSDT2': {'count': 141050.0, 'lat': 32.8801, 'lon': -117.2340},
'DK-NBI': {'count': 122270.0, 'lat': 55.676098, 'lon': 12.568337},
'DE-Munich': {'count': 76820.0, 'lat': 48.1351, 'lon': 11.5820},
'US-OSG-UIUC': {'count': 49160.0, 'lat': 40.1020, 'lon': -88.2272},
'US-OSG-Clemson': {'count': 34970.0, 'lat': 34.6834, 'lon': -82.8374},
'US-OSG-NMSU': {'count': 33950.0, 'lat': 32.2788, 'lon': -106.7479},
'US-OSG-WSU': {'count': 20310.0, 'lat': 46.7319, 'lon': -117.1542},
'US-OSG-UConn': {'count': 6520.0, 'lat': 41.8077, 'lon': -72.2540},
'US-OSG-Caltech-HEP': {'count': 3590.0, 'lat': 34.1377, 'lon': -118.1253},
'CA-McGill': {'count': 54.0, 'lat': 45.5048, 'lon': -73.5772},
'US-NERSC': {'count': 17500, 'lat': 37.8758, 'lon': -122.2528}
}
for row in """us-east-1;Virginia;38.13;-78.45
us-east-2;Ohio;39.96;-83
us-west-1;California;37.35;-121.96
us-west-2;Oregon;46.15;-123.88
eu-west-1;Ireland;53;-8
eu-west-2;London;51;-0.1
eu-west-3;Paris;48.86;2.35
eu-central-1;Frankfurt;50;8
sa-east-1;Sao Paulo;-23.34;-46.38
ap-southeast-1;Singapore;1.37;103.8
ap-southeast-2;Sydney;-33.86;151.2
ap-northeast-1;Tokyo;35.41;139.42
ap-northeast-2;Seoul;37.56;126.98
ap-south-1;Mumbai;19.08;72.88
ca-central-1;Canada Central;45.5;-73.6""".split('\n'):
if ';' not in row:
continue
parts = row.strip().split(';')
sites['AWS-'+parts[0]] = {'count':1, 'lat': float(parts[2]), 'lon': float(parts[3])}
for i,row in enumerate("""40.758701, -111.876183
36.114647, -115.172813
41.2619, -95.8608
33.1960, -80.0131
60.5693, 27.1878
34.6937, 135.5023
-6.2088, 106.8456
-35.2809, 149.1300
53.3498, 6.2603
""".split('\n')):
if ',' not in row:
continue
parts = row.strip().split(',')
sites[f'GOOGLE-{i}'] = {'count': 1, 'lat': float(parts[0]), 'lon': float(parts[1].strip())}
pairs = {}
for site in sites:
latlon = (sites[site]['lat'], sites[site]['lon'])
if latlon not in pairs:
pairs[latlon] = {site: sites[site]['count']}
else:
pairs[latlon][site] = sites[site]['count']
# -
def iscloud(names):
for n in names:
if n.startswith('AWS') or n.startswith('GOOGLE') or n.startswith('AZURE'):
return True
return False
# +
fig = plt.figure(figsize=(16,12))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img()
#ax.coastlines()
uw_lat = 43.0766
uw_lon = -89.4125
totalcount = sum(sum(s.values()) for s in pairs.values())
for latlon in pairs:
weight = min(10,max(int(math.log(sum(pairs[latlon].values())/totalcount*800*len(pairs))), 1))
color = 'green' if iscloud(pairs[latlon]) else 'blue'
plt.plot([uw_lon, latlon[1]], [uw_lat, latlon[0]],
color=color, linestyle='--', marker='o', linewidth=1, markersize=4,
transform=ccrs.Geodetic(),
)
#plt.text(ny_lon - 3, ny_lat - 12, 'New York',
# horizontalalignment='right',
# transform=ccrs.Geodetic())
plt.show()
# +
totalcount = sum(sum(s.values()) for s in pairs.values())
for latlon in pairs:
weight = min(10,max(int(math.log(sum(pairs[latlon].values())/totalcount*800*len(pairs))), 1))
print(list(pairs[latlon].keys())[0], 'count:', sum(pairs[latlon].values()), 'weight:', weight)
print('total', totalcount)
# -
|
pyglidein world plot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 (tensorflow)
# language: python
# name: tensorflow
# ---
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_5_python_functional.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # T81-558: Applications of Deep Neural Networks
# **Module 1: Python Preliminaries**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Module 1 Material
#
# * Part 1.1: Course Overview [[Video]](https://www.youtube.com/watch?v=taxS7a-goNs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_1_overview.ipynb)
# * Part 1.2: Introduction to Python [[Video]](https://www.youtube.com/watch?v=czq5d53vKvo&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_2_intro_python.ipynb)
# * Part 1.3: Python Lists, Dictionaries, Sets and JSON [[Video]](https://www.youtube.com/watch?v=kcGx2I5akSs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_3_python_collections.ipynb)
# * Part 1.4: File Handling [[Video]](https://www.youtube.com/watch?v=FSuSLCMgCZc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_4_python_files.ipynb)
# * **Part 1.5: Functions, Lambdas, and Map/Reduce** [[Video]](https://www.youtube.com/watch?v=jQH1ZCSj6Ng&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_5_python_functional.ipynb)
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
try:
from google.colab import drive
# %tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# # Part 1.5: Functions, Lambdas, and Map/Reduce
#
# Functions, **lambdas**, and **map/reduce** can allow you to process your data in advanced ways. We will introduce these techniques here and expand on them in the next module, which will discuss Pandas.
#
# Function parameters can be named or unnamed in Python. Default values can also be used. Consider the following function.
# +
def say_hello(speaker, person_to_greet, greeting = "Hello"):
print(f'{greeting} {person_to_greet}, this is {speaker}.')
say_hello('Jeff', "John")
say_hello('Jeff', "John", "Goodbye")
say_hello(speaker='Jeff', person_to_greet="John", greeting = "Goodbye")
# -
# A function is a way to capture code that is commonly executed. Consider the following function that can be used to trim white space from a string capitalize the first letter.
def process_string(str):
t = str.strip()
return t[0].upper()+t[1:]
# This function can now be called quite easily.
str = process_string(" hello ")
print(f'"{str}"')
# Python's **map** is a very useful function that is provided in many different programming languages. The **map** function takes a **list** and applies a function to each member of the **list** and returns a second **list** that is the same size as the first.
l = [' apple ', 'pear ', 'orange', 'pine apple ']
list(map(process_string, l))
# ### Map
# The **map** function is very similar to the Python **comprehension** that we previously explored. The following **comprehension** accomplishes the same task as the previous call to **map**.
l = [' apple ', 'pear ', 'orange', 'pine apple ']
l2 = [process_string(x) for x in l]
print(l2)
# The choice of using a **map** function or **comprehension** is up to the programmer. I tend to prefer **map** since it is so common in other programming languages.
# ### Filter
# While a **map function** always creates a new **list** of the same size as the original, the **filter** function creates a potentially smaller **list**.
# +
def greater_than_five(x):
return x>5
l = [ 1, 10, 20, 3, -2, 0]
l2 = list(filter(greater_than_five, l))
print(l2)
# -
# ### Lambda
# It might seem somewhat tedious to have to create an entire function just to check to see if a value is greater than 5. A **lambda** saves you this effort. A lambda is essentially an unnamed function.
l = [ 1, 10, 20, 3, -2, 0]
l2 = list(filter(lambda x: x>5, l))
print(l2)
# ### Reduce
#
# Finally, we will make use of **reduce**. Like **filter** and **map** the **reduce** function also works on a **list**. However, the result of the **reduce** is a single value. Consider if you wanted to sum the **values** of a **list**. The sum is implemented by a **lambda**.
# +
from functools import reduce
l = [ 1, 10, 20, 3, -2, 0]
result = reduce(lambda x,y: x+y,l)
print(result)
|
t81_558_class_01_5_python_functional.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Engineering
# * **Description**: COMP4103(Big Data)--Group Project
# * **Author**: Aaron
# * **Version**: 0.1
# ## 1. load packages
# Apache Spark
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql.window import Window
# ## 2. Create a Spark Session
# +
# Start a SparkSession
spark = SparkSession \
.builder \
.master("local[*]") \
.appName("data preprocessing") \
.getOrCreate()
sc = spark.sparkContext
# -
# ## 3. Data combination
# +
# DataSet
bitcoin_data = "bitcoin_10y_1min_interpolate.csv"
blockChain_data = "blockChain_10y_1min_interpolate.csv"
df = spark.read.format("csv") \
.option("inferSchema",'True') \
.option("header",True) \
.load(bitcoin_data) \
.withColumn("id", F.row_number().over(Window.orderBy(F.monotonically_increasing_id()))-1)
blockChain_df = spark.read.format("csv") \
.option("inferSchema",'True') \
.option("header",True) \
.load(blockChain_data) \
.withColumn("id", F.row_number().over(Window.orderBy(F.monotonically_increasing_id()))-1)
# join data
df = df.join(blockChain_df, on=['id','Timestamp'], how='inner')
# -
# ## 4. Generate the label column
# Add a NEXT_BTC_CLOSE represent next step bitcoin price as the label column
# https://sparkbyexamples.com/pyspark/pyspark-window-functions/
df = df.withColumn("NEXT_BTC_CLOSE", F.lag("Close", offset=-1) \
.over(Window.orderBy("id"))) \
.dropna()
# ## 5. Generate financial indicators
# +
# Generate additional valuable features
# Rate of Change allows investors to spot security momentum and other trends
# Typically a 12-day Rate-of-Change is used but for simplicity, I used it for every 30-min interval
df = df.withColumn("Rate_of_Change", (F.col("NEXT_BTC_CLOSE") / F.col("Previous_close") - 1) * 100)
# computing Simple Moving Averages
# Adapted from: https://stackoverflow.com/questions/45806194/pyspark-rolling-average-using-timeseries-data
def simple_moving_average(df, period, col="NEXT_BTC_CLOSE", orderby="id"):
df = df.withColumn(f"SMA_{period}", F.avg(col) \
.over(Window.orderBy(orderby) \
.rowsBetween(-period,0)))
return df
#MA number 5/7/10/20/50/100/200 days;
MA5 = 60 * 24 * 5
MA7 = 60 * 24 * 7
MA10 = 60 * 24 * 10
MA20 = 60 * 24 * 20
MA50 = 60 * 24 * 50
MA100 = 60 * 24 * 100
# periods selected based on this article:
# https://www.investopedia.com/ask/answers/122414/what-are-most-common-periods-used-creating-moving-average-
# ma-lines.asp#:~:text=Traders%20and%20market%20analysts%20commonly,averages%20are%20the%20most%20common.
# to analyze short-term trends
df = simple_moving_average(df, MA5) # these might have to be 240 - 1 actually
df = simple_moving_average(df, MA7)
df = simple_moving_average(df, MA10)
df = simple_moving_average(df, MA20)
df = simple_moving_average(df, MA50)
# to analyze long-term trends
df = simple_moving_average(df, MA100)
# -
# Save the complete data to a CSV file
df.write.option("header",True).csv("complete_10y_1min_interpolate.csv")
|
feature_engineering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#API request to get data
import requests as req
url = 'http://api.tvmaze.com/schedule?country=US'
resp = req.get(url)
response = resp.text
type(response)
#Convert str datatype of response to list since json_normalize function from pandas
#requires JSON array i.e. list of records
import json
data = json.loads(response)
type(data)
#Module to load data as pandas dataframe using json_normalize function from pandas
import pandas as pd
from pandas.io.json import json_normalize
df = json_normalize(data)
df.head()
#get shape of generated data frame
df.shape
#List all column names to select relevant ones for further data processing
df.columns
#Get relevant columns from dataframe on which knn algorithm can be applied(feature selection)
df_relevant = df[['name','id','show.name','show.genres','show.rating.average','show.type']]
#Print newly created data frame
df_relevant.head()
'''The reason to merge name column with show.name column was because
some of the shows had repeated episodes. For example:- Abby Hatcher show had two entries with
different episode names viz Afraid of Cats and <NAME>. I needed unique names
to identify nieghbours of an episode correctly which is why I merged the columns to
generate unique names column'''
df_relevant["full_name"] = df_relevant["show.name"].map(str) +"-"+ df_relevant["name"]
df_relevant.head()
#Dropping the source columns from which full_name was generated
df_relevant = df_relevant.drop(['name','show.name'], axis=1)
#rename column names of dataframe for ease of understanding
renamed_columns_dictionary = {'show.genres': 'genres',
'show.language': 'language',
'show.rating.average':'rating',
'show.type':'type'
}
df_relevant.rename(columns=renamed_columns_dictionary, inplace=True)
df_relevant
'''There were many entries which had genres columns empty. We have the choice to
substitute the empty values or drop them entirely'''
df_relevant[df_relevant['genres'].str.len() == 0].shape
#Dropping all columns whose genres column is empty for now
#Adding relevant genres based on show type can be a better option later
df_relevant = df_relevant.drop(df_relevant[df_relevant['genres'].str.len() == 0].index)
#Considerable number of rows were removed
df_relevant.shape
#Creating a new copy for ease of use and keeping a backup just in case anything messes up ahead
df = df_relevant.copy()
df.head()
#Check for all null values in dataframe
df.isnull().sum(axis = 0)
#Ratings column had some null values
#Check datatypes of all values
df.dtypes
#Fill null values in rating column with median of all values in rating column
df["rating"].fillna(df["rating"].median(), inplace = True)
df.head()
# I had to do label encoding for genres column. But pd.get_dummies function in
# pandas does not take list as row values due to which I had to convert them into
# string of comma separated values
df['genres'] = df['genres'].apply(lambda x: ",".join(x))
df.head()
# Now we can get label encoded values from genres column using get_dummies fxn from pandas
df["genres"].str.get_dummies(sep=',')
# We are creating a new data frame to be used for generating model using knn. We are neglecting name and id column since they are not useful for finding neighbours.
# "Genres" and "type" column is label encoded while rating column is taken as it is
tv_show_features = pd.concat([df["genres"].str.get_dummies(sep=","),
pd.get_dummies(df[["type"]]),
df[["rating"]]],axis=1)
tv_show_features
# Since the ratings column has values from 0 to 10 while other columns have values from
# 0 to 1 this can bias the distance metric in KNN because features containing bigger numbers will be weighted heavily while the other features will be discounted.
# So I ended up using MinMaxScaler from scikit-learn as it scales the values from 0–1.
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
tv_show_features = min_max_scaler.fit_transform(tv_show_features)
import numpy as np
np.round(tv_show_features,2)
# Then we fit the KNN model from scikit learn to the data and calculate
# the nearest neighbors for each distances.
from sklearn.neighbors import NearestNeighbors
nbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(tv_show_features)
distances, indices = nbrs.kneighbors(tv_show_features)
distances
#Helper fucntions to get relevant predictions
def get_index_from_name(name):
return df[df["full_name"]==name].index.tolist()[0]
all_show_names = list(df.full_name.values)
def get_id_from_partial_name(partial):
for name in all_show_names:
if partial in name:
print(name,all_show_names.index(name))
def print_similar_tvshows(query=None,id=None):
if id:
for id in indices[id][1:]:
print(df.iloc[id]["full_name"])
if query:
found_id = get_index_from_name(query)
for id in indices[found_id][1:]:
print(df.iloc[id]["full_name"])
df
#Change values below as per full_name column values
get_id_from_partial_name("Days of Our Lives-Ep. #13507")
get_index_from_name("Days of Our Lives-Ep. #13507")
print_similar_tvshows("Gotham-Trespassers")
df[df["full_name"]=="Gotham-Trespassers"]
# +
showrecs=[
"Fam-Pilot",
"The Big Bang Theory-The Propagation Proposition",
"Mom-Hacky Sack and a Beautiful Experience",
"Young Sheldon-A Tummy Ache and a Whale of a Metaphor",
"The Orville-Home"
]
df[df['full_name'].isin(showrecs)]
# -
|
jupyternotebook/RecommendationUsingKNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import operator
import math
from functools import reduce
import json
from os.path import join, isfile, isdir
import tensorflow as tf
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from expdir_monitor.expdir_monitor import ExpdirMonitor
from data_providers.utils import get_data_provider_by_name
from models.utils import get_model_by_name
# +
def get_model(id, net_pool_path):
if isdir(f'{net_pool_path}/#{id}'):
em = ExpdirMonitor(f'{net_pool_path}/#{id}')
else:
em = ExpdirMonitor(f'{net_pool_path}/#Running_{id}')
pure = True
init = em.load_init()
run_config = em.load_run_config(print_info=(not pure), dataset='C10+')
run_config.renew_logs = False
data_provider = get_data_provider_by_name(run_config.dataset, run_config.get_config())
net_config, model_name = em.load_net_config(init, print_info=(not pure))
return get_model_by_name(model_name)(em.expdir, data_provider, run_config, net_config, pure=pure)
def get_num_params(id, net_pool_path):
model = get_model(id, net_pool_path)
with model.graph.as_default():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
def get_num_params_efficient_helper(path):
em = ExpdirMonitor(path)
init = em.load_init()
total_params = 0
if init is None:
print(id, net_pool_path)
for layer in init['layer_cascade']['layers']:
if layer is None:
continue
for k, v in layer.items():
if 'moving_' not in k:
total_params += reduce(operator.mul, v.shape)
return total_params
def get_num_params_efficient(id, net_pool_path):
if isdir(f'{net_pool_path}/#{id}'):
return get_num_params_efficient_helper(f'{net_pool_path}/#{id}')
else:
return get_num_params_efficient_helper(f'{net_pool_path}/#Running_{id}')
# +
# RECOVER net.id2params FILE IF NECESSARY
idpath2val = {}
idpath2str = {}
net_pool_path = '../net_pool_params1/Convnet/C10+/Conv_C10+_rl_small'
id2val = json.load(open(join(net_pool_path, 'net.id2val')))
for id in id2val:
idpath2val[(id, net_pool_path)] = id2val[id]
idpath2params = {
idpath: get_num_params_efficient(*idpath)
for idpath in tqdm(idpath2val)
}
id2params = {
idpath[0]: params
for idpath, params in idpath2params.items()
}
json.dump(id2params, open('net.id2params', 'w'), indent=4)
# -
def get_df(exp_name):
net_pool_path = f'../net_pool_{exp_name}/Convnet/C10+/Conv_C10+_rl_small'
arch_search_path = f'../arch_search_{exp_name}/Convnet/C10+/Conv_C10+_rl_small'
id2val = json.load(open(join(net_pool_path, 'net.id2val')))
id2params = json.load(open(join(net_pool_path, 'net.id2params')))
with open(join(arch_search_path, 'net.log'), 'r') as f:
lines = f.readlines()
id2epoch = {
id: int(line[:-1].split('\t')[0][:-1])
for line in lines
for id in line[:-1].split('\t')[2:]
}
ids = list(id2epoch.keys())
epochs = [id2epoch[id] for id in ids]
params = [id2params[id] for id in ids]
vals = [id2val[id] for id in ids]
df = pd.DataFrame({
'id': ids,
'epoch': epochs,
'params': params,
'vals': vals,
})
df['reward'] = np.tan(df.vals * np.pi/2)
return df
# # Params1
df_base1 = get_df('base1')
ax = sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch')
get_df('random').vals.max()
df = get_df('base1')
df['reward'] = np.tan(df.acc * np.pi/2)
df.plot.scatter(x='params', y='reward')
df[['params', 'reward']].corr()
df['logparams'] = np.log10(df.params)
df.plot.scatter(x='logparams', y='acc')
df[['logparams', 'acc']].corr()
df['logparams'] = np.log10(df.params)
ax = df.plot.scatter(x='logparams', y='reward')
df[['logparams', 'reward']].corr()
x = [3.5, 4.75]
y = [1.4*x0-3.8 for x0 in x]
ax.plot(x,y)
(df.reward - 1.4 * df.logparams + 5).hist()
df_base1 = get_df('base1')
ax = sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch')
df_random = get_df('random')
sns.scatterplot(data=df_random, x='params', y='vals', hue='epoch')
df_params1 = get_df('params1')
sns.scatterplot(data=df_params1, x='params', y='vals', hue='epoch')
# +
ax = sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch')
sns.scatterplot(data=df_params1, x='params', y='vals', hue='epoch', ax=ax, palette='Blues')
# rewards.append(5 + np.tan(net_val * np.pi / 2) - 1.4 * np.log10(params_list[i]))
x = np.linspace(800, 50000, 200)
y = np.arctan(1.3 - 5 + 1.4 * np.log10(x)) * 2 / np.pi
ax.plot(x,y, color='r')
# -
get_num_params_efficient_helper('../start_nets/start_net_convnet_small_C10+')
df_params1.params.sort_values()
# # Param2
df_base1 = get_df('base1')
ax = sns.scatterplot(data=df_base1, x='params', y='reward', hue='epoch')
x= [10000, 50000]
y= [x0/10000/5 + 1.7 for x0 in x]
ax.plot(x,y)
(df_base1.reward - df_base1.params/(5e4)).hist()
df_params2 = get_df('params2')
ax = sns.scatterplot(data=df_params2, x='params', y='vals', hue='epoch', palette='Oranges')
sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch', ax=ax)
# +
df_params2['reward2'] = df_params2.reward - df_params2.params/5e4
df_base1['reward2'] = df_base1.reward - df_base1.params/5e4
ax = sns.scatterplot(data=df_params2, x='params', y='reward2', hue='epoch', palette='Oranges')
sns.scatterplot(data=df_base1, x='params', y='reward2', hue='epoch', ax=ax)
ax.set(ylabel='new_reward')
# -
df_params2.reward2.max()
df_base1[df_base1.epoch == 14].reward2.hist()
df_params2[df_params2.epoch == 14].reward2.hist()
# # Iterative 1 and 2
df_base1 = get_df('base1')
df_iterative1 = get_df('iterative1')
ax = sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch')
sns.scatterplot(data=df_iterative1, x='params', y='vals', hue='epoch', ax=ax, palette='Oranges')
df_base1 = get_df('base1')
df_iterative2 = get_df('iterative2')
ax = sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch')
sns.scatterplot(data=df_iterative2, x='params', y='vals', hue='epoch', ax=ax, palette='Oranges')
df_iterative2 = get_df('iterative2')
ax = sns.scatterplot(data=df_iterative2, x='params', y='vals', hue='epoch', palette='Oranges')
df_iterative2.vals.max()
# # Iterative Resource-Aware 1
df_iterativeparams1 = get_df('iterativeparams1')
sns.scatterplot(data=df_iterativeparams1, x='params', y='vals', hue='epoch', palette='Oranges')
df_base1 = get_df('base1')
df_iterativeparams1 = get_df('iterativeparams1')
ax = sns.scatterplot(data=df_base1, x='params', y='vals', hue='epoch')
sns.scatterplot(data=df_iterativeparams1, x='params', y='vals', hue='epoch', ax=ax, palette='Oranges')
# +
df_iterativeparams1['reward2'] = df_iterativeparams1.reward - df_iterativeparams1.params/5e4
df_base1['reward2'] = df_base1.reward - df_base1.params/5e4
ax = sns.scatterplot(data=df_iterativeparams1, x='params', y='reward2', hue='epoch')
sns.scatterplot(data=df_base1, x='params', y='reward2', hue='epoch', ax=ax, palette='Oranges')
# +
ax = sns.scatterplot(data=df_iterativeparams1, x='params', y='reward2', hue='epoch', palette='Oranges')
ax.set(ylabel='new_reward')
# -
|
code/analyze_nets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
path = os.path.join('/home/santiago/Documents/dev/reservoirpy')
import sys
sys.path.insert(0,path)
from reservoirpy.simulationpy import sim
from reservoirpy.pvtpy import black_oil as bl
from reservoirpy.wellpy import path as ph
from datetime import date
import pandas as pd
import numpy as np
import pyvista as pv
import vtk
from shapely.geometry import Point
import math
import matplotlib.pyplot as plt
# # Grid
grid=sim.grid(
grid_type='cartesian',
nx = 10,
ny = 10,
nz = 5,
dx = 100,
dy = 100,
dz= 50,
origin = Point(100,100,-5000),
petrophysics = {'PORO':0.18,'PERMX':200, 'PERMY':300,'PERMZ':100,'RT':0},
azimuth=0,
dip=0
)
# # Phases
phase = ['water']
# # Pvt
water = bl.water(formation='fm_1',pb=2000,salinity=13500, temp=60)
water.pvt_from_correlations()
# # Wells
# +
#Create the well object
name1 = 'well-1'
rte1 = 200 # Rotary table Elevation
surf_coord1 = [255,741]#Point(1000100,1000000,520)
crs1 = 'EPSG:3117'
tops1 = ph.tops({'formation':['fm1'],'md_top':[5200],'md_bottom':[5450]})
perf1 = ph.perforations({'md_top':[5300],'md_bottom':[5310]})
cons1 = {
'date':np.array([np.datetime64('2020-08-21'),np.datetime64('2021-01-05')]),
'constrain':['qw','bhp'],
'value':[100,250]
}
td1 = 6000
w1 = ph.well(name=name1,
rte=rte1,
surf_coord=surf_coord1,
td = td1,
tops=tops1,
perforations = perf1,
constrains=cons1,
crs=crs1)
#Create the well object
name2 = 'well-2'
rte2 = 200 # Rotary table Elevation
surf_coord2 = [658,478]#Point(1000100,1000000,520)
crs2 = 'EPSG:3117'
tops2 = ph.tops({'formation':['fm1'],'md_top':[5200],'md_bottom':[5450]})
perf2 = ph.perforations({'md_top':[5300],'md_bottom':[5310]})
cons2 = {
'date':np.array([np.datetime64('2020-08-21'),np.datetime64('2021-01-05')]),
'constrain':['bhp','bhp'],
'value':[100,250]
}
td2 = 6000
w2 = ph.well(name=name2,
rte=rte2,
surf_coord=surf_coord2,
td = td2,
tops=tops2,
perforations = perf2,
constrains=cons2,
crs=crs2)
wells = ph.wells_group(w1,w2)
# -
wells.wells['well-1'].constrains
# # Numerical
# +
start_date = date(2020,8,21)
end_date = date(2022,8,21)
dates = pd.date_range(start_date,end_date,freq='M')
numerical = sim.numerical(
relaxation = 1,
max_iter=25,
date_range = dates.values
)
# -
# # Initial Conditions
init_conds = sim.initial_conditions(
pi = 2500,
woc = -5200,
cap_press_init = False
)
# # Model
sim_model = sim.model(
grid = grid,
phase = phase,
pvt = {'water':water},
wells = wells,
numerical = numerical,
initial_conditions = init_conds
)
sim_model
|
examples/simulation/Build Simulation Model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv_playground
# language: python
# name: venv_playground
# ---
import numpy as np
import matplotlib.pyplot as plt
# +
Ti = 0
Tf = 1
N = 100000
f01 = 100
f02 = 200
f03 = 800
t = np.arange(Ti, Tf, 1/N)
x1 = np.cos(2*np.pi*f01*t)
x2 = np.cos(2*np.pi*f02*t)
x3 = np.cos(2*np.pi*f03*t)
# -
# ### **Linearity**
# #### **f(x1+x2+x3)**
# +
x = x1+x2+x3
plt.figure(figsize=(20,4))
plt.title("Sinal de Input")
plt.plot(t, x)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0,0.1)
plt.show()
# +
X = np.fft.fft(x)
freqs = np.fft.fftfreq(N,1/N)
plt.figure(figsize=(20,4))
plt.stem(freqs,np.abs(X))
plt.xlim(-1000,1000)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
# -
# #### **f(x1) + f(x2) + f(x3)**
# +
plt.figure(figsize=(20,4))
plt.title("Sinal de Input")
plt.plot(t,x1)
plt.plot(t,x2)
plt.plot(t,x3)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0,0.01)
plt.show()
# +
X1 = np.fft.fft(x1)
X2 = np.fft.fft(x2)
X3 = np.fft.fft(x3)
freqs = np.fft.fftfreq(N,1/N)
plt.figure(figsize=(20,4))
plt.stem(freqs, np.abs(X1+X2+X3))
plt.xlim(-1000,1000)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
|
5_dft_properties.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 0.1 Course overview
# ### 0.1.1 Course character and objectives
#
# https://youtu.be/OM3iEZGC2-k
#
# **Parallel Objectives:**
#
# **Introduce the probabilistic way of thinking, involving understanding**
#
# - the nature of probabilistic models
# - the key concepts
# - the mathematical language that go with them
# - some of the main types of models that tends to arise in applications
#
# **Introduce the basic tools of probability theory expressed in the language of mathematics**
#
# - Develop a fair number of mathematical skills
# - Indirectly, advance your ability to think with precision and to express your thinking in a mathematical language
#
# **Intuition**
#
# - This is not a math class, instead we will emphasize the interpretation of basic concepts and related facts at an intuitive level, always aiming to complement mathematical arguments with intuitive explanations**
#
# **Acquire working knowledge of the subject**
#
# - bring you to a level where you are ready to apply what you have learned to real-world problems
#
# **How do we do it:**
#
# - Cover more material than usual
# - Calculus and mental concentration is all you need
# - Capitalize on effective organization of the material
# ## 0.1.2 Why study probability?
#
# https://youtu.be/cvVmmboMiyY This video is important. Watch it!!
#
# **Why study probability?**
#
# - Traditional scientific literacy:calculus, physics, chemistry, computers to help you make sense of the world
# - Uncertainty attached to pretty much every phenomenon, thus probability is also part of scientific literacy.
#
# **What caused this shift?**
#
# Two main factors:
#
# - deal with more complex systems as science and engineering move forward. You cannot expect to have a perfect model of each component or to know the exact state of every piece of the system. So, the uncertainty is at the foreground and needs to be modelled.
# - We’re living in an information society. The data and information are only useful when they can tell us something we did not know. Their reason for existence is to reduce uncertainty. To deal with or fighter with uncertainty, you’d better understand its nature and have the tools to describe it and analyse it. This is why probability and its children – statistics and inference – is a must.
#
# **Where is probability useful?**
#
# Any scientific field, other than the motion of the planets, everything else involves uncertainty and calls for probabilistic models.
#
# - physics (quantum mechanics taught us nature is inherently uncertain)
# - biologic evolution which progresses through the accumulation of many random effects, like mutations within an uncertainty environment; the haystack of biological data that we are accumulating and that needs to be sifted using statistical tools in order to make progress in the biological science
# - communications & signal processing (fight against the noise, an effort to clean signals from the noise that nature has added)
# - management (customer demand is random and you want to model it and predict it)
# - finance (markets are uncertain and whoever has the best methods to analyse financial data has an advantage)
# - transportation (random disruptions due to weather or accidents are a major concern)
# - trends in social networks (spread epidemics but in ways that are hard to predict)
# - · · · · · ·
#
# Most phenomenon involve significant randomness and the only reason we collect and manipulate data is because we want to fight this randomness as much as we can. And the first step in fighting an enemy like randomness is to study and understand your enemy.
# ### 0.1.3 Course contents
#
# https://youtu.be/yXbKcmgpv58
#
# Unit 1-5,8 cover undergraduate level probability class
#
# <img src="images/0.1.3 Course contents.png">
# ## 0.2 Course Introduction, objectives and study guide
# ### 0.2.1 Introduction
#
# The course covers all the basic probability concepts, including:
#
# - multiple discrete or continuous random variables, expectations, and conditional distributions
# - laws of large numbers
# - the main tools of Bayesian inference methods
# - an introduction to random processes (Poisson processes and Markov chains)
# ### 0.2.2 Course objectives
#
# Upon successful completion of this course, you will:
#
# At a conceptual level:
#
# - Master the basic concepts associated with probability models
# - Be able to translate models described in words to mathematical ones.
# - Understand the main concepts and assumptions underlying Bayesian and classical inference.
# - Obtain some familiarity with the range of applications of inference methods.
#
# At a more technical level:
#
# - Become familiar with basic and common probability distributions.
# - Learn how to use conditioning to simplify the analysis of complicated models.
# - Have facility manipulating probability mass functions, densities, and expectations.
# - Develop a solid understanding of the concept of conditional expectation and its role in inference.
# - Understand the power of laws of large numbers and be able to use them when appropriate.
# - Become familiar with the basic inference methodologies (for both estimation and hypothesis testing) and be able to apply them.
# - Acquire a good understanding of two basic stochastic processes (Bernoulli and Poisson) and their use in modelling.
# - Learn how to formulate simple dynamical models as Markov chains and analyse them.
# ### 0.2.3 Study guide
#
# - Start with the overview of a unit and at the end of a unit, watch the unit summary to consolidate your understanding of the “big picture" and of the relation between different concepts.
# - Watch the lecture videos.
# - Do the exercises!
# - Solved problems and additional materials. In most of the units, we are providing you with many problems that are solved by members of our staff. it is important that you get exposed to a large number of problems.
# - The textbook.
# - Problem sets. One can really master the subject only by solving problems – a large number of them.
# - Exams. The midterm exams are designed so that in an on-campus version, learners would be given two hours. The final exam is designed so that in an on-campus version, learners would be given three hours.
# - Time management. In a typical week, there will be 2 hours of lecture clips, but it might take you 4-5 hours when you add the time spent on exercises. Plan to spend another 3-4 hours watching solved problems and additional materials, and on textbook readings. Finally, expect about 4 hours spent on the weekly problem sets.
# - Additional practice problems at the end of each chapter of the print edition of the book, whose solutions are available online.
#
# **Q&A:**
#
# **What courses would you consider direct follow-on courses to this course. e.g how does this basic course in probability "Branch out”?**
#
# - Answer 1: Well, it sorts of depends on what you wish to learn. On the one hand, this course is part of a data analytics track on EdX, and you can consider the stats course that comes up after this one. On the other hand, if you wish to pursue the maths route, you can study analysis and measure theory. There aren't any MOOCs for those, as far as I know, but you can find complete lectures on YouTube. Of course, if you have the luxury of being at uni with good teachers, you should consult them. :)
#
# - Answer 2: I agree it depends a lot on what you want to study. A natural follow on from here would be 6.437 Inference and Information which covers several of the topics here, such as Bayesian inference, decision theory, and probability theory in more depth, and then goes through all the material again from the lenses of information theory. Depending on what you want to use it for, you might look for an advanced course in [statistics](https://ocw.mit.edu/courses/mathematics/18-650-statistics-for-applications-fall-2016/), [information theory](https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-441-information-theory-spring-2010/), [measure theory](https://ocw.mit.edu/courses/mathematics/18-125-measure-and-integration-fall-2003/index.htm), [stochastic processes](https://ocw.mit.edu/courses/mathematics/18-445-introduction-to-stochastic-processes-spring-2015/), [causal inference](http://bayes.cs.ucla.edu/BOOK-2K/), [machine learning](https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-867-machine-learning-fall-2006/), or the [probabilistic method](http://www.math.wisc.edu/~roch/mdp/index.html). Whatever field you might want to apply this to will also have its collection of what topics and methods are important, whether that be biology, cognitive science, algorithms, systems engineering, operations research, or finance. So, finding a course on probability or uncertainty in the topic you’re interested in may be an even better option.
|
Course 1_Probability - The Science of Uncertainty and Data/Unit 0 Overview.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Nested clustering of cells in the HLCA, and cluster differential expression analysis (DEA)
# In this notebook we perform multi-level clustering of the cells in the HLCA (including final HLCA and benchmarking integrated HLCA subsets). We start with coarse-resolution clustering, and then re-cluster the resulting clusters by first re-calculating the nearest-neighbor graph and then clustering each cluster, with a finer resolution. For the final HLCA, we also calculate marker genes for every cluster. These will be used during manual annotation of the HLCA.
# ### Import modules, set paths, select atlas version to cluster:
# +
import numpy as np
import pandas as pd
import scanpy as sc
import scanpy.external as sce
import sys
import os
sys.path.append("../../scripts/")
import nested_clustering
# -
# For pretty coding, not necessary to run code:
# %load_ext lab_black
# Set paths:
# +
dir_benchmarking_res = (
"../../results/integration_benchmarking/benchmarking_results/integration/"
)
dir_benchmarking_cluster_output = "../../results/integration_benchmarking/clustering/"
path_HLCA = "../../data/HLCA_core_h5ads/HLCA_v1_intermediates/LCA_Bano_Barb_Jain_Kras_Lafy_Meye_Mish_MishBud_Nawi_Seib_Teic_log1p.h5ad"
dir_HLCA_cluster_output = "../../results/DEAs/leiden/"
# -
# Select data to work with. We clustered the final HLCA using this notebook, but also other atlas integration generated during our integration benchmarking. We will use the clusters of the benchmarking output later to study rare cell identification with different integration methods. Choose one of the 4 lines below:
# dataset_name = "HLCA" # final, full HLCA
# dataset_name = "harmony" # benchmark
# dataset_name = "seuratrpca" # benchmark
dataset_name = "scanvi" # benchmark
# Set number of cluster levels (we used 3 for benchmark methods and for final atlas, adding level 4 clustering for some clusters in the atlas that required finer resolution based on manual inspection)
number_of_clust_levels = 3
# ### Load data and perform clustering:
# Load the optimal integration for each of the benchmarking methods included, or the final atlas:
print("Dataset name:", dataset_name)
if dataset_name == "scanvi":
# load dataset
adata = sc.read(
os.path.join(dir_benchmarking_res, "unscaled/hvg/scanvi.h5ad")
)
# specify which obsm to use for calculating neighbor graph
use_rep = "X_emb"
# specify whether or not to re-calculate the PCA for subsets of the object
redo_pca = False
elif dataset_name == "seuratrpca":
adata = sc.read(
os.path.join(dir_benchmarking_res, "unscaled/hvg/R/seuratrpca.h5ad")
)
sc.tl.pca(adata)
use_rep = "X_pca"
redo_pca = True
elif dataset_name == "harmony":
adata = sc.read(
os.path.join(dir_bencmarking_res, "scaled/hvg/R/harmony.h5ad")
)
adata.obsm["X_pca"] = adata.obsm["X_emb"]
use_rep = "X_emb"
redo_pca = False
elif dataset_name == "HLCA":
adata = sc.read(path_HLCA)
use_rep = "X_scanvi_emb"
redo_pca = False
# visualize if a umap is available:
if "X_umap" in adata.obsm.keys():
if "scanvi_labels" in adata.obs.columns:
sc.pl.umap(adata, color="scanvi_labels")
elif "scgen_labels" in adata.obs.columns:
sc.pl.umap(adata, color="scgen_labels")
# Perform multi-level clustering:
for clustering_level in range(1, number_of_clust_levels + 1):
print("clustering level:", clustering_level, "...")
if clustering_level == 1:
# skip for re-run
cluster_name = "leiden_1"
# first clustering is not nested, so use normal function:
sc.pp.neighbors(adata, n_neighbors=30, use_rep=use_rep)
sc.tl.leiden(adata, resolution=0.01, key_added=cluster_name)
else:
previous_clustering = "leiden_" + str(clustering_level - 1)
cluster_name = "leiden_" + str(clustering_level)
# perform nested clustering
# set parameters:
res = 0.2
if clustering_level == 2:
k = 30
min_cluster_size = 50
elif clustering_level == 3:
k = 15
min_cluster_size = 30
elif clustering_level == 4:
k = 10
min_cluster_size = 10
adata = nested_clustering.add_nested_clustering_blind(
adata,
previous_clustering,
cluster_name,
use_rep=use_rep,
cluster_alg="leiden",
cluster_k=k,
cluster_res=res,
min_cluster_size=min_cluster_size,
redo_pca=redo_pca, # SET THIS TO FALSE FOR SCANVI!!! OR OTHER EMBEDDING-OUTPUT METHODS!!!!!
)
# plot
if "X_umap" in adata.obsm.keys():
sc.pl.umap(adata, color=cluster_name)
# store clustering:
cluster_df = pd.DataFrame(adata.obs[cluster_name], index=adata.obs.index)
# write to csv for benchmarking data:
if dataset_name in ["harmony","scanvi","seuratrpca"]:
cluster_df.to_csv(
os.path.join(dir_benchmarking_cluster_output, f"{dataset_name}/{dataset_name}_{cluster_name}_cluster_assignment.csv")
)
# If wanted/needed, for final HLCA:
if dataset_name == "HLCA":
# store cluster assignments:
cluster_df.to_csv(
os.path.join(dir_HLCA_cluster_output, f"LCA_{cluster_name}_cluster_assignment.csv")
)
# calculate marker genes with respect to all other clusters, and with respect to sister clusters (i.e. other cluster from the same parent cluster):
for marker_ref in ["sisters", "all"]:
marker_gene_df = nested_clustering.get_cluster_markers(
adata=adata,
cluster_label=cluster_name,
marker_ref=marker_ref,
ngenes=100,
)
# and store:
marker_gene_df.to_csv(
os.path.join(dir_HLCA_cluster_output, f"LCA_{cluster_name}_marker_genes_versus_{marker_ref}.csv")
)
# #### manual level 5 clustering where needed:
# Cluster 1.2.1 in the HLCA (version 1), contains two different types of DCs, hence we re-cluster this level four cluster to get level 5 clusters for this specific case.
if dataset_name == "HLCA":
cl_to_recluster = "1.2.1.2"
subadata = adata[adata.obs.leiden_4 == cl_to_recluster, :].copy()
sc.pp.neighbors(subadata, n_neighbors=10, use_rep="X_scanvi_emb")
sc.tl.umap(subadata)
sc.tl.leiden(subadata, resolution=0.2, key_added="leiden_5")
# plot and check which cluster is which:
sc.pl.umap(subadata, color=["CLEC9A", "CCR7", "leiden_4", "leiden_5"])
if dataset_name == "HLCA":
# add clustering info to main object
adata.obs["leiden_5"] = np.nan
adata.obs.loc[subadata.obs.index, "leiden_5"] = [
f"{l4}.{l5}" for l4, l5 in zip(subadata.obs.leiden_4, subadata.obs.leiden_5)
]
# some plots for final checks:
sc.pl.umap(adata, color="leiden_5")
sc.pl.umap(
adata,
color=["leiden_4"],
groups=["1.2.1.0", "1.2.1.1", "1.2.1.2", "1.2.1.3"],
ncols=1,
)
# For HLCA: correct color glitches for cellxgene proper functioning, and store final adata
if dataset_name == "HLCA":
# make leiden 2 strings:
adata.obs['leiden_2'] = pd.Categorical(adata.obs.leiden_2.astype(str))
# and to add colors for leiden_3:
sc.pl.umap(adata, color='leiden_3', palette='nipy_spectral')
# store:
adata.write(path_HLCA)
|
notebooks/1_building_and_annotating_the_atlas_core/06_nested_clustering_and_cluster_DEA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lists in Python
#
# <p>
# In most languages a collection of homogeneous (all of the same type)
# entities is called an array. The size of the array is fixed at the
# time of creation, however, the contents of the array can be changed
# during the course of the execution of the program. Higher dimensional
# arrays are also possible, where each element of an array is an array.
# </p>
#
# <p>
# The analogue of an array in Python is a <i>list</i>. Even though a
# list defines a collection of things it has different properties from
# an array. A list could be a collection of heterogeneous (different
# types) items. The size of a list is dynamic. It is not specified at
# the time of creation and can grow or shrink as needed. A list could
# have duplicate items. The order of the items in a list is important
# and not their uniqueness. Python also provides built-in functions to
# manipulate a list and its contents. A higher dimensional list has
# elements that are themselves lists. Given the flexibility and the
# associated functions, a Python list is a more powerful data structure
# than an array.
# </p>
#
# <h3> List Creation </h3>
# <p>
# There are several ways in which to create a list. You can enumerate
# all the elements of a list or create an empty list and then append
# or insert items into the list. When you append an item to a list, that
# item is added to the end of the list. To insert an item into a list
# you must specify its position and then all the elements to the right
# or below it are shifted to make space for it.
# Enumerate the items
a = [1, 2, 3]
a
# +
# Create an empty list and append or insert
a = []
print(a)
a.append(1) # a = [1]
print(a)
a.append(2) # a = [1, 2]
print(a)
a.insert(1, 3) # a = [1, 3, 2]
print(a)
# +
# Create a two dimensional list
b = [ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]
b
# -
# Note that the positions of items in a list start at an index value of 0.
# You can also create a list by concatenating two or more lists together.
# You can initialize a list with a predefined value.
#
# +
a = [1, 2]
b = [4, 5]
c = a + b # c = [1, 2, 4, 5]
print(c)
d = [0] * 5 # d = [0, 0, 0, 0, 0]
print(d)
# -
# ## Basic List Manipulations
# To obtain the length of a list you can use the <i>len()</i> function.
#
a = [1, 2, 3]
length = len (a) # length = 3
length
# #### Indexing
# The items in a list are indexed starting at 0 and ending at index
# <i>length - 1</i>. You can also use negative indices to access elements
# in a list. For example a[-1] returns the last item on the list and
# a[-length] returns the first. Unlike a string, a list is mutable, i.e.
# its contents can be changed like so:
#
# +
a = [1, 2, 3]
a[1] = 4 # a = [1, 4, 3]
a
# -
# To access or change an element in a 2-dimensional list specify the row
# first and then the column.
# +
b = [ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]
print(b)
d = b[1][2] # d = 6
print(d)
b[2][1] = b[1][2]*2
print(b)
# -
# Note that the positions of items in a list start at an index value of 0.
# You can also create a list by concatenating two or more lists together.
# You can initialize a list with a predefined value.
# +
a = [1, 2]
b = [4, 5]
c = a + b # c = [1, 2, 4, 5]
print(c)
d = [0] * 5 # d = [0, 0, 0, 0, 0]
d
# -
# #### List Traversal
#
# <p>
# One of the most important operations that you can do with a list is to
# traverse it, i.e. visit each and every element in the list in order.
# There are several ways in which to do so:
# <pre>
#
# </pre>
# </p>
#
# +
a = [9, 2, 6, 4, 7]
print(a)
for item in a:
print (item, end = " ") # 9 2 6 4 7
# Doubles each item in the list
length = len (a)
for i in range (length):
a[i] = a[i] * 2
# -
#
# <a href = "https://docs.python.org/3/tutorial/datastructures.html">
# Other List Functions </a>
#
#
# <table border = "1" width = "75%">
# <tr>
# <th> Function </th><th> Meaning </th>
# </tr>
# <tr>
# <td> list.sort() </td>
# <td> Sorts a list in ascending order </td>
# </tr>
# <tr>
# <td> list.reverse() </td>
# <td> Reverses the elements in a list </td>
# </tr>
# <tr>
# <td> <i>value</i> in list </td>
# <td> Returns True if the <i>value</i> is in the list and False otherwise</td>
# </tr>
# <tr>
# <td> list.index(x) </td>
# <td> Returns the index of the first occurence of x. Use with the above
# function to check if <i>x</i> is in the list before determining its position.
# </td>
# </tr>
# <tr>
# <td> list.count(x) </td>
# <td> Returns the number of occurences of x in the list </td>
# </tr>
# <tr>
# <td> list.remove(x) </td>
# <td> Deletes the first occurence of x in list </td>
# </tr>
# <tr>
# <td> list.pop(i) </td>
# <td> Deletes the ith element in the list and returns its value </td>
# </tr>
# </table>
a = [9, 2, 6, 4, 7]
a.sort()
a
a = [9, 2, 6, 4, 7]
a.reverse()
a
for value in [9, 2, 6, 4, 7]:
print(value)
# +
#index
a = [9, 2, 6, 4, 7]
a.index(6)
# +
# count()
a.count(6)
# -
# remove
a = [9, 2, 6, 4, 7]
a.remove(2)
a
# +
# pop
a = [9, 2, 6, 4, 7]
b = a.pop(2)
print(b)
a
# -
# # List Comprehensions
#
# https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions
#
#
# +
# For example, we want to create a list p
a = [1, 2, 3, 4, 5, 6]
b = [1, -2, 3, -6]
p=[]
for item in b:
if item in a:
p.append(item)
else:
p.append(1000)
print(p)
# +
# instead of the above we can write in python the following.
p = [item if item in a else 1000 for item in b]
print(p)
# -
|
Notebooks/Example-004-Python-Lists.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright (c) Microsoft Corporation.<br>
# Licensed under the MIT License.
#
# # 2. Upload Data to the Azure ML Workspace and Label
#
# In this notebook we will:
# - Upload the image data previously collected to the Azure ML Workspace default Blob Storage
#
# ## Prerequisites
# - Azure ML Workspace - [Create in Azure Portal](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-workspace?tabs=azure-portal) (the `config.json` will be needed as well which may be downloaded from the Portal)
# - Data from a camera on the Percept DK (residing in the `data` folder on the local machine or wherever the notebooks are being run) - how-to in `1.Collect_Image_Data_from_PerceptDK.ipynb` notebook
# ## Imports
from azureml.core import Workspace
from azureml.core import VERSION
print(VERSION)
# ## Connect to the Azure ML Workspace
#
# This step automatically looks for the `config.json` file base directory. You may download your `config.json` from the Azure Portal Azure ML Workspace resource - in the Overview pane as shown below.
#
# 
#
# Then you may drag and drop the `config.json` from your local file system/machine into the file explorer to the left in JupyterLab .
#
# The first time you run this cell it will ask you to perform interactive login to Azure in another browser window.
ws = Workspace.from_config()
# ## Upload data to Azure
#
# Based on [this](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-bring-data#upload) guide, let's connect our Azure ML Workspace to our notebook. The `target_path` refers to a path in the Azure ML default DataStore.
datastore = ws.get_default_datastore()
datastore.upload(src_dir='./data/office_items_A/office_supplies',
target_path='office_supplies',
overwrite=False)
# ## Label data in Azure ML Studio
#
# At this point visit your Workspace in the Azure Portal and open up Azure ML Studio (or go to https://ml.azure.com and select your workspace). Start a new labeling project (or pick up from where you left off). For a good how-to check out [this guide](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-labeling-projects).
#
# 
#
# Plan to take some time to label, depending on how many images you collected. Here is the labeling tool, visually:
#
# 
#
# Once labeling is done and there are no more tasks, export the labels as COCO-format.
#
# 
#
#
# ## Next steps
#
# - Train an SSD MobileNet V2 model with the TensorFlow Object Detection API using Azure ML
|
machine-learning-notebooks/transfer-learning-custom-azureml/2.Upload_and_Label_with_AzureML.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Udacity DEND Project-3: AWS Set-up
# ### IaC set-up for Project-3 AWS Redshift
import pandas as pd
import boto3
import json
# + [markdown] toc-hr-collapsed=true
# # STEP 0: Make sure you have an AWS secret and access key
#
# - Create a new IAM user in your AWS account
# - Give it `AdministratorAccess`, From `Attach existing policies directly` Tab
# - Take note of the access key and secret
# - Edit the file `dwh.cfg` in the same folder as this notebook and fill
# <font color='red'>
# <BR>
# [AWS]<BR>
# KEY= YOUR_AWS_KEY<BR>
# SECRET= YOUR_AWS_SECRET<BR>
# <font/>
#
# -
# # Load DWH Params from a file
# +
import configparser
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_TYPE = config.get("DWH","DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("DWH","DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("DWH","DWH_NODE_TYPE")
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_DB = config.get("DWH","DWH_DB")
DWH_DB_USER = config.get("DWH","DWH_DB_USER")
DWH_DB_PASSWORD = config.get("DWH","DWH_DB_PASSWORD")
DWH_PORT = config.get("DWH","DWH_PORT")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
(DWH_DB_USER, DWH_DB_PASSWORD, DWH_DB)
# NOTE: Un-comment this to print the result.
#pd.DataFrame({"Param":
# ["DWH_CLUSTER_TYPE", "DWH_NUM_NODES", "DWH_NODE_TYPE", "DWH_CLUSTER_IDENTIFIER", "DWH_DB", "DWH_DB_USER", "DWH_DB_PASSWORD", "DWH_PORT", "DWH_IAM_ROLE_NAME"],
# "Value":
# [DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]
# })
# -
# ## Create clients for EC2, S3, IAM, and Redshift
# +
import boto3
ec2 = boto3.resource( 'ec2',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
s3 = boto3.resource( 's3',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
iam = boto3.client('iam',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
# -
# ## Check out the sample data sources on S3
# +
LOG_DATA = config.get("S3", "BUCKET")
logDataBucket = s3.Bucket(LOG_DATA)
count = 0
# Iterate over log_data bucket objects and print
for object in logDataBucket.objects.filter(Prefix='log_data'):
count += 1
print(object)
print("COUNT: " + str(count))
# => COUNT: 31
# -
size = sum(1 for _ in logDataBucket.Bucket.objects.filter(Prefix='log_data'))
print(size)
# +
SONG_DATA = config.get("S3", "BUCKET")
songDataBucket = s3.Bucket(SONG_DATA)
count = 0
# Iterate over song_data bucket objects and print
for object in logDataBucket.objects.filter(Prefix='song_data'):
count += 1
print(object)
print("COUNT: " + str(count))
# +
SONG_DATA = config.get("S3", "BUCKET")
songDataBucket = s3.Bucket(SONG_DATA)
count = 0
# Iterate over song_data bucket objects and print
for object in logDataBucket.objects.filter(Prefix='log_json'):
count += 1
print(object)
print("COUNT: " + str(count))
# -
LOG_DATA = config.get("S3", "BUCKET")
LOCAL_PATH = config.get("S3", "LOCAL_PATH")
SONGS_JSONPATH = config.get("S3", "SONGS_JSONPATH")
print(LOG_DATA)
print(SONGS_JSONPATH)
# + [markdown] toc-hr-collapsed=true
# ## STEP 1: IAM ROLE
# - Create an IAM Role that makes Redshift able to access S3 bucket (ReadOnly)
# +
# Create the IAM role (if not exists)
try:
print('1.1 Creating a new IAM Role')
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description="Allow Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
# +
# Attach Policy
print('1.2 Attaching Policy')
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
# -
# Get and print the IAM role ARN
print('1.3 Get the IAM role ARN')
iam_role = iam.get_role(
RoleName=DWH_IAM_ROLE_NAME
)
roleArn = iam_role['Role']['Arn']
# NOTE: Un-comment this to print the result.
#print(roleArn)
# ## STEP 2: Redshift Cluster
#
# - Create a RedShift Cluster
# - For complete arguments to `create_cluster`, see [docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html#Redshift.Client.create_cluster)
try:
response = redshift.create_cluster(
ClusterType=DWH_CLUSTER_TYPE,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
DBName=DWH_DB,
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
MasterUsername=DWH_DB_USER,
MasterUserPassword=<PASSWORD>,
IamRoles=[roleArn]
)
except Exception as e:
print(e)
# + [markdown] toc-hr-collapsed=true
# ## 2.1 *Describe* the cluster to see its status
# - run this block several times until the cluster status becomes `Available`
# +
def prettyRedshiftProps(props):
pd.set_option('display.max_colwidth', -1)
keysToShow = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"])
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
# NOTE: Un-comment this to print the result.
#prettyRedshiftProps(myClusterProps)
# + [markdown] toc-hr-collapsed=true
# <h2> 2.2 Take note of the cluster <font color='red'> endpoint and role ARN </font> </h2>
# -
# <font color='red'>DO NOT RUN THIS unless the cluster status becomes "Available" </font>
# +
DWH_ENDPOINT = myClusterProps['Endpoint']['Address']
DWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']
print("DWH_ENDPOINT :: ", DWH_ENDPOINT)
print("DWH_ROLE_ARN :: ", DWH_ROLE_ARN)
# -
# ## STEP 3: Open an incoming TCP port to access the cluster endpoint
try:
vpc = ec2.Vpc(id=myClusterProps['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName= defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DWH_PORT),
ToPort=int(DWH_PORT)
)
except Exception as e:
print(e)
# ## STEP 4: Make sure you can connect to the cluster
# %load_ext sql
conn_string="postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)
# NOTE: Un-comment this to print the result.
#print(conn_string)
# %sql $conn_string
# ## STEP 5: Test COPIED and INSERTED data
# ### 5.1: Query staging tables
# +
# #%load_ext sql
# -
# Number of items in staging_events table
# %%time
# %%sql
SELECT COUNT(*)
FROM staging_events;
# Number of items in staging_songs table
# %%time
# %%sql
SELECT COUNT(*)
FROM staging_songs;
# ### 5.2 Query Analysis tables
# Number of items in users table
# %%time
# %%sql
SELECT COUNT(*)
FROM users;
# Number of items in songs table
# %%time
# %%sql
SELECT COUNT(*)
FROM songs;
# Number of items in artists table
# %%time
# %%sql
SELECT COUNT(*)
FROM artists;
# Number of items in time table
# %%time
# %%sql
SELECT COUNT(*)
FROM time;
# Number of items in songplay table
# %%time
# %%sql
SELECT COUNT(*)
FROM songplays;
# Query to answer a question: Who played which song and when.
# %%time
# %%sql
SELECT sp.songplay_id,
u.user_id,
u.last_name,
u.first_name,
sp.start_time,
a.name,
s.title
FROM songplays AS sp
JOIN users AS u ON (u.user_id = sp.user_id)
JOIN songs AS s ON (s.song_id = sp.song_id)
JOIN artists AS a ON (a.artist_id = sp.artist_id)
JOIN time AS t ON (t.start_time = sp.start_time)
ORDER BY (u.last_name)
LIMIT 100;
# ## STEP 6: Clean up your resources
# <b><font color='red'>DO NOT RUN THIS UNLESS YOU ARE SURE <br/>
# We will be using these resources in the next exercises</span></b>
# +
#### CAREFUL!!
#-- Uncomment & run to delete the created resources
#redshift.delete_cluster( ClusterIdentifier=DWH_CLUSTER_IDENTIFIER, SkipFinalClusterSnapshot=True)
#### CAREFUL!!
# -
# - run this block several times until the cluster really deleted
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
# NOTE: Un-comment this to print the result.
#prettyRedshiftProps(myClusterProps)
# +
#### CAREFUL!!
#-- Uncomment & run to delete the created resources
#iam.detach_role_policy(RoleName=DWH_IAM_ROLE_NAME, PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
#iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)
#### CAREFUL!!
|
Udacity-DEND-Project-3-AWS-Setup.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp datasets.CombinedGenoPheno
# -
# # Combining Genotype and Phenotype files
#
# > API details.
#hide
from nbdev.showdoc import *
import numpy as np
from corradin_ovp_utils.catalog import test_data_catalog, conf_test_data_catalog, get_catalog
from corradin_ovp_utils.datasets.genetic_file import triplicate_converter
from corradin_ovp_utils.odds_ratio import get_geno_combination_df
from fastcore.test import ExceptionExpected
import nbdev
#export
from typing import Any, Dict, List, Optional, Literal, Union
from pydantic import BaseModel
import pandas as pd
import corradin_ovp_utils
from fastcore.basics import typed, basic_repr
from fastcore.dispatch import typedispatch
from corradin_ovp_utils.datasets import OVPDataset
from corradin_ovp_utils.datasets.genetic_file import GeneticFileFormat
from corradin_ovp_utils.datasets.schemas import SingleFilePathSchema, MultipleFilePathSchema
import copy
# +
#export
class CombinedGenoPheno(BaseModel):
#genetic_file_df: pd.DataFrame
#sample_file_df: pd.DataFrame
all_samples_geno_df: pd.DataFrame
all_geno_df: pd.DataFrame
sample_dict: Dict[str, pd.DataFrame]
genetic_files_dict: Dict[str, GeneticFileFormat]
__repr__ = basic_repr("num_snps,num_samples")
@property
def num_snps(self):
return len(self.all_samples_geno_df.columns)
@property
def num_samples(self):
return {key: value.shape[0] for key, value in self.sample_dict.items()}
def get_geno_each_sample_subset(self, key):
subset_df = self.all_samples_geno_df.loc[self.sample_dict[key].index]
return subset_df
@property
def sample_subsets(self):
return list(self.sample_dict.keys())
@classmethod
def init_from_OVPDataset(cls,
genetic_dataset: OVPDataset.OVPDataset,
sample_dataset: OVPDataset.OVPDataset,
rsid_dict: Dict[int,List[str]],
id_col_list=["rsid"],
batch_size: int =1_000,
excluded_sample_ids : List[str] = []
):
genetic_files_dict, sample_dict_loaded = cls.process_datasets(genetic_dataset, sample_dataset, excluded_sample_ids = excluded_sample_ids)
all_samples_geno_df, all_geno_df, *extra_info = zip(*[genetic_file.get_geno_each_sample(rsid_dict, id_col_list=id_col_list, batch_size=1_000, excluded_sample_ids= excluded_sample_ids) for key, genetic_file in genetic_files_dict.items()])
return CombinedGenoPheno(all_samples_geno_df = pd.concat(all_samples_geno_df), sample_dict= sample_dict_loaded, genetic_files_dict= genetic_files_dict, all_geno_df = all_geno_df[0])
@classmethod
def process_datasets(cls, genetic_dataset: OVPDataset.OVPDataset, sample_dataset: OVPDataset.OVPDataset, excluded_sample_ids:List[str]=[]):
combine_genetic_sample_func = cls._process_file_type(genetic_dataset._file_path, sample_dataset._file_path)
genetic_dict, sample_dict_loaded = combine_genetic_sample_func(genetic_dataset, sample_dataset, excluded_sample_ids= excluded_sample_ids)
return genetic_dict, sample_dict_loaded
# @typedispatch
# @classmethod
# def _process_file_type(cls, genetic_file_schema: SingleFilePathSchema, sample_file_schema: SingleFilePathSchema):
# return lambda genetic, sample: genetic
# @typedispatch
@classmethod
def _process_file_type(cls, genetic_file_schema: MultipleFilePathSchema,
sample_file_schema: MultipleFilePathSchema):
assert genetic_file_schema.__class__ == sample_file_schema.__class__
def combine_genetic_sample_multiple(genetic_dataset, sample_file_dataset, excluded_sample_ids:List[str]):
genetic_dict = copy.deepcopy(vars(genetic_dataset.files))
sample_dict = copy.deepcopy(vars(sample_file_dataset.files))
shared_keys = set(genetic_dict.keys()) & set(sample_dict.keys())
sample_dict_loaded = {}
#make sure the two datasets only have shared keys
assert set(genetic_dict.keys()) == set(sample_dict.keys())
for key in shared_keys:
sample_file_loaded = sample_dict[key].load(with_missing_samples = True)
genetic_dict[key].sample_ids = list(sample_file_loaded.index)
genetic_dict[key].sample_file = sample_dict[key].file_path.get_full_file_path()
sample_dict_loaded[key] = sample_dict[key].load(with_missing_samples = False).query("index not in @excluded_sample_ids")
return genetic_dict, sample_dict_loaded
return combine_genetic_sample_multiple
class Config:
arbitrary_types_allowed = True
# -
genetic_file = test_data_catalog.load("genetic_file")
sample_file = test_data_catalog.load("sample_file")
vars(genetic_file.files)
vars(sample_file.files)
geno_dict, sample_dict = CombinedGenoPheno.process_datasets(genetic_file, sample_file)
test_file = geno_dict["case"]
test_file
test_file_chrom22 = test_file.load(chrom=22)
test_file_chrom22
test_file_chrom22.load_df()
with ExceptionExpected(ex=MemoryError, regex = "is too big, input limit is 10K."): test_file_chrom22.load_df(size_limit=10_000)
test = CombinedGenoPheno.init_from_OVPDataset(genetic_file, sample_file, rsid_dict = {22: ["rs77948203", "rs9610458", "rs134490", "rs5756405"]})#["case"]
test
test.genetic_files_dict
test.genetic_files_dict
test.all_geno_df
test.all_samples_geno_df
test.get_geno_each_sample_subset("case")
test.get_geno_each_sample_subset("control")
test.sample_dict
get_geno_combination_df(test.all_samples_geno_df, rsid_list=["rs9610458", "rs134490"]).df
get_geno_combination_df(test.all_samples_geno_df, rsid_list=["rs9610458", "rs134490", "rs5756405"]).df
geno_dict, sample_dict= CombinedGenoPheno.process_datasets(genetic_file, sample_file)
test_file = geno_dict["case"]
# ---
# ### Testing genetic file split by chromosome
genetic_file_split_by_chrom = test_data_catalog.load("genetic_file_split_by_chrom")
genetic_file_split_by_chrom
test_split_by_chrom = CombinedGenoPheno.init_from_OVPDataset(genetic_file_split_by_chrom, sample_file, rsid_dict = {22: ["rs77948203", "rs9610458"]})#["case"]
# Test case where we query from a chromosome that we have no file for
with nbdev_test.ExceptionExpected(ex=AttributeError, regex="'NoneType' object has no attribute 'get_geno_each_sample'"): CombinedGenoPheno.init_from_OVPDataset(genetic_file_split_by_chrom, sample_file, rsid_dict = {21: ["rs77948203", "rs9610458"]})
test_split_by_chrom
# ---
genetic_file_single = test_data_catalog.load("genetic_file_single")
sample_file_single = test_data_catalog.load("sample_file_single")
CombinedGenoPheno.process_datasets(genetic_file_single, sample_file_single)
test_single_file = CombinedGenoPheno.init_from_OVPDataset(genetic_file_single, sample_file_single, rsid_dict = {22: ["rs77948203", "rs9610458", "rs134490", "rs5756405"]}, )
test_single_file
test_single_file.all_samples_geno_df
test_single_file.get_geno_each_sample_subset("single_file")
# ### Testing a mix of rsids and position
test_position_and_rsid = CombinedGenoPheno.init_from_OVPDataset(genetic_file, sample_file,
rsid_dict = {22: ["rs77948203", "21461017"]},
id_col_list = ["rsid", "position"] )#["case"]
test_position_and_rsid
test_position_and_rsid.all_samples_geno_df
nbdev.test_eq(test_position_and_rsid.all_samples_geno_df.shape[1],2)
# ---
# ### Testing with BGEN
bgen_catalog = get_catalog(env="cluster", patterns = ['catalog*', 'catalog*/*/','catalog*/*/*'])
bgen_catalog = bgen_catalog.reload()
bgen_catalog.list()
genetic_file_bgen = bgen_catalog.load("genetic_file_bgen")
sample_file_bgen = bgen_catalog.load("test_UKB_sample_file_with_pheno_col")
genetic_file_bgen.files
sample_file_bgen.files
genetic_file_bgen.files.single_file
sample_file_bgen.files.single_file
test_combine_geno_pheno_bgen = CombinedGenoPheno.init_from_OVPDataset(genetic_file_bgen, sample_file_bgen, rsid_dict = {22: ["rs77948203", "rs9610458", "rs134490", "rs5756405"]})
test_combine_geno_pheno_bgen.sample_dict
|
05_datasets_CombinedGenoPheno.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# <hr>
# +
# QuantBook Analysis Tool
# For more information see [https://www.quantconnect.com/docs/research/overview]
qb = QuantBook()
spy = qb.AddEquity("SPY")
history = qb.History(qb.Securities.Keys, 360, Resolution.Daily)
# Indicator Analysis
bbdf = qb.Indicator(BollingerBands(30, 2), spy.Symbol, 360, Resolution.Daily)
bbdf.drop('standarddeviation', 1).plot()
# -
class MySectorWeightingPortfolioConstructionModel(EqualWeightingPortfolioConstructionModel):
def __init__(self, rebalance = Resolution.Daily):
super().__init__(rebalance)
self.symbolBySectorCode = dict()
def OnSecuritiesChanged(self, algorithm, changes):
for security in changes.AddedSecurities:
#1. When new assets are added to the universe, save the Morningstar sector code
# for each security to the variable sectorCode
sectorCode = security.sector
# 2. If the sectorCode is not in the self.symbolBySectorCode dictionary, create a new list
# and append the symbol to the list, keyed by sectorCode in the self.symbolBySectorCode dictionary
if(self.symbolBySectorCode.keys().contains(sectorCode) == False):
sectorCodeList = []
sectorCodeList.append(sectorCode)
self.symbolBySectorCode[sectorCode] = sectorCodeList
for security in changes.RemovedSecurities:
#3. For securities that are removed, save their Morningstar sector code to sectorCode
#4. If the sectorCode is in the self.symbolBySectorCode dictionary
if sectorCode in self.symbolBySectorCode:
symbol = security.Symbol
# If the symbol is in the dictionary's sectorCode list;
# Then remove the corresponding symbol from the dictionary
# We use the super() function to avoid using the base class name explicity
super().OnSecuritiesChanged(algorithm, changes)
class MyUniverseSelectionModel(FundamentalUniverseSelectionModel):
def __init__(self):
super().__init__(True, None, None)
def SelectCoarse(self, algorithm, coarse):
filtered = [x for x in coarse if x.HasFundamentalData and x.Price > 0]
sortedByDollarVolume = sorted(filtered, key=lambda x: x.DollarVolume, reverse=True)
return [x.Symbol for x in sortedByDollarVolume][:100]
def SelectFine(self, algorithm, fine):
filtered = [f for f in fine if f.AssetClassification.MorningstarSectorCode == MorningstarSectorCode.Technology]
self.technology = sorted(filtered, key=lambda f: f.MarketCap, reverse=True)[:3]
filtered = [f for f in fine if f.AssetClassification.MorningstarSectorCode == MorningstarSectorCode.FinancialServices]
self.financialServices = sorted(filtered, key=lambda f: f.MarketCap, reverse=True)[:2]
filtered = [f for f in fine if f.AssetClassification.MorningstarSectorCode == MorningstarSectorCode.ConsumerDefensive]
self.consumerDefensive = sorted(filtered, key=lambda f: f.MarketCap, reverse=True)[:1]
return [x.Symbol for x in self.technology + self.financialServices + self.consumerDefensive]
qb.SetStartDate(2016, 12, 28)
qb.SetEndDate(2017, 3, 1)
qb.SetCash(100000)
qb.UniverseSettings.Resolution = Resolution.Hour
#qb.SetUniverseSelection(MyUniverseSelectionModel())
qb.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(1), 0.025, None))
qb.SetPortfolioConstruction(MySectorWeightingPortfolioConstructionModel())
qb.SetExecution(ImmediateExecutionModel())
for x in dir(spy):
if(x.find('Fun')>0):
print(x)
f = {"rat": "1", "cat": "2"}
if "rate" in f:
print("fart")
|
Launcher/CustomCode/SkyLight/Calibrated quantum circuit/research.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plot Power Function
# $ y=x^b $
# ## Import matplotlib and numpy
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# -
# ## Define the argument as a numpy array
# +
np_arange_args={'start':-10,'stop':10,'step':0.01}
# !!! start assign jupyter notebook parameter(s) !!!
np_arange_args = {'start': -20, 'stop': 20, 'step': 0.1}
# !!! end assign jupyter notebook parameter(s) !!!
# -
x=np.arange(**np_arange_args)
# ## Define the exponent
# +
exponent=2
# !!! start assign jupyter notebook parameter(s) !!!
exponent = 3
# !!! end assign jupyter notebook parameter(s) !!!
# -
# ## Define the function
#
y=x**exponent
# ## Plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('$y=x^{'+str(exponent)+'}$',fontsize=20)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.plot(x,y)
plt.show()
|
example/_run_jnb/Power_function-output (2).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mario corpus stats
#
# Stats computed on Super Mario Bros. and Super Mario Bros. 2 (Japan).
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
# ## Apperance matrix
def at_least_one_of_A_is_in_B(A:list, B:list):
return any(a in B for a in A)
with open('./corpuses/mario_corpus.txt', 'r') as f:
levels = [level for level in f.read().rstrip(')').split(')')]
columns = []
for level in levels:
columns.extend(level.strip().split('\n'))
appearances = np.zeros((len(columns), 5))
for i, col in enumerate(columns):
for j, tile_group in enumerate([['Q', 'q'], ['o'], ['E'], ['<', '>', '[', ']'], ['B', 'b']]):
if at_least_one_of_A_is_in_B(A=tile_group, B=list(col)):
appearances[i, j] = 1
# ## Special-tile combination frequency
at_least_one = appearances[np.sum(appearances, axis=-1) >= 1]
np.bincount(np.sum(at_least_one, axis=-1).astype(int)) / len(at_least_one) * 100
# ## Co-occurence matrix
two = appearances[np.sum(appearances, axis=-1) == 2]
# +
coo = np.zeros((5, 5))
for obs in two:
indices = np.argwhere(obs == 1).flatten()
if len(indices) == 1:
ix = indices[0]
coo[ix, ix] += 1
elif len(indices) == 2:
ix1, ix2 = indices
coo[ix1, ix2] += 1
# -
mask = np.tri(coo.shape[0], k=0)
coo = np.ma.array(coo, mask=mask)
normalized_coo = coo / np.sum(coo) * 100
# + code_folding=[]
# figsize and font-size
plt.figure(figsize=(7, 7))
matplotlib.rcParams.update({'font.size': 15})
# matshow
cmap = cm.get_cmap('coolwarm')
cmap.set_bad('w')
plt.matshow(normalized_c_matrix, fignum=1, cmap=cmap)
# add labels indicating exact values
for (i, j), z in np.ndenumerate(normalized_coo):
if mask[i, j] != 1:
plt.text(j, i, f'{z:0.2f}%', ha='center', va='center',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))
# bbox: https://matplotlib.org/3.2.2/api/_as_gen/matplotlib.patches.FancyBboxPatch.html#matplotlib.patches.FancyBboxPatch
# set tick labels and title
plt.gca().set_xticklabels([None, 'Quest.', 'Coin', 'Enemy', 'Pipe', 'Cannon'])
plt.gca().set_yticklabels([None, 'Quest.', 'Coin', 'Enemy', 'Pipe', 'Cannon'])
plt.title('Co-occurrence table', y=1.08)
plt.show()
# -
|
notebooks/mario_corpus_stats.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Testing Notebook
import pandas as pd
import numpy as np
import os.path as osp
# +
video = "video_3"
datasheet = pd.read_csv("./data/milestone3/{}/yolov3_outputs/00000_persons_data.csv".format(video))
query = datasheet.replace(to_replace=r'/scratch/minoda/git/DLAV/project/', value= './data/', regex= True)
query = query.replace(to_replace=r'yolov3-outputs', value= 'yolov3_outputs', regex= True)
query.to_csv("data/milestone3/{}/yolov3_outputs/query_list.csv".format(video))
query['file_path'][0]
# +
file = './data/milestone3/video_3/yolov3_outputs/pedestrian_images/image_0000_pedestrian_0.png'
query['pid'] = -1
query.loc[query['file_path'] == file, 'pid'] = 1
# -
gallery = pd.DataFrame(columns = ["image_id", "x", "y", "w", "h", "confidence", "filepath", "pid"])
gallery.to_csv("data/milestone3/video_1/yolov3_outputs/gallery.csv")
# ### 1-Idea
# Have an empty gallery, and add more
#
# ### 2-Idea
# Assign random pid to gallery, and
distmat = np.asarray(
[[ 5.9604645e-08, 9.4048572e-01, 1.3632776e+00, 1.4863836e+00, 2.4861825e-01],
[ 9.4048572e-01, 2.2053719e-06, 8.3109367e-01, 1.1044974e+00, 9.3526816e-01],
[ 1.3632776e+00, 8.3109367e-01, 1.3113022e-06, 1.0313121e+00, 1.4344358e+00],
[ 1.4863836e+00, 1.1044974e+00, 1.0313121e+00, 7.4505806e-07, 1.4191651e+00],
[ 2.4861842e-01, 9.3526834e-01, 1.4344363e+00, 1.4191661e+00, -1.0132790e-06]])
candidates = np.argwhere(distmat < distmat.mean(axis = 0)/2)
#candidates.sort()
print(candidates)
print(np.unique(candidates, axis= 0))
# +
pid_dict = {}
processed_images = set()
for cand in candidates:
if (cand[0] > cand[1]) or (cand[0] in processed_images):
# This means we are under-diagonal, will create duplicates
continue
if cand[0] == cand[1]:
pid = cand[0]
pid_dict[pid] = [pid]
print("Base image of person: ", pid)
elif pid == cand[0]:
print("Another image of person ", pid)
pid_dict[pid].append(cand[1])
processed_images.add(cand[1])
# -
pid_dict
images_processed
|
test_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Most examples work across multiple plotting backends, this example is also available for:
#
# * [Matplotlib area_chart](../matplotlib/area_chart.ipynb)
import numpy as np
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
# ## Declaring data
# +
# create some example data
python=np.array([2, 3, 7, 5, 26, 221, 44, 233, 254, 265, 266, 267, 120, 111])
pypy=np.array([12, 33, 47, 15, 126, 121, 144, 233, 254, 225, 226, 267, 110, 130])
jython=np.array([22, 43, 10, 25, 26, 101, 114, 203, 194, 215, 201, 227, 139, 160])
dims = dict(kdims='time', vdims='memory')
python = hv.Area(python, label='python', **dims)
pypy = hv.Area(pypy, label='pypy', **dims)
jython = hv.Area(jython, label='jython', **dims)
# -
# ## Plot
opts.defaults(opts.Area(fill_alpha=0.5))
overlay = (python * pypy * jython)
overlay.relabel("Area Chart") + hv.Area.stack(overlay).relabel("Stacked Area Chart")
|
examples/gallery/demos/bokeh/area_chart.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="PgDXXup9-RSG"
import numpy as np
# + [markdown] id="rytLawcv9Hpo"
# # Data Preprocessing
# + colab={"base_uri": "https://localhost:8080/"} id="VNiJ-aqa7V-v" outputId="2bfec64d-c7e7-47e7-990c-18d69fe45611"
# char level
tokens = list(set(''.join(train_set)))
print (f'The number of unique characters is {len(tokens)} \n A tokens list sample: {tokens[700:710]}')
# + id="ag191wd37-UP"
# turn tokend into digits
token_to_id = {token: idx for idx, token in enumerate(tokens)}
# + colab={"base_uri": "https://localhost:8080/"} id="m3c-vRD99ZQE" outputId="0c481254-849a-4aab-a74c-ba8817840930"
# building the matrices
def vectorize(dataset, token_to_id, dtype='int32'):
data_idx = np.zeros([len(dataset), max(map(len, dataset))], dtype) + token_to_id[' ']
for i in range(len(dataset)):
lines = [token_to_id[c] for c in dataset[i]]
data_idx[i, :len(lines)] = lines
return data_idx
print('\n'.join(train_set[::3000]), '\n', vectorize(train_set[::3000], token_to_id))
|
Preprocessing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Find duplicate samples
# The bits of interactive data verification below are done on some intermediate meta data, which looks the same as the published meta_data.csv but has some more info in it ("end", "peak" and "type", which is the manually labeled class).
import datetime as dt
import pandas as pd
df = pd.read_csv('C:/Users/<NAME>/Desktop/D4/neu/train.csv', sep=";", parse_dates=["start", "end", "peak"], index_col="id")
timesteps = pd.TimedeltaIndex([dt.timedelta(minutes=d) for d in [0, 7*60, 10*60+30, 11*60+50]])
all_image_times = []
for t in timesteps:
new_df = df[['start', 'noaa_num']].copy()
new_df['start'] = new_df['start'] + t
all_image_times.append(new_df)
all_image_times = pd.concat(all_image_times)
res = all_image_times.groupby(by=['noaa_num', 'start']).size().reset_index(name='counts')
print(f"{len(res[res['counts'] > 1])} of {len(all_image_times)} images have a duplicate")
res[res['counts'] > 1]
# Not too wild, but AR 11635 looks suspicious:
df['peak_after_end'] = df['peak'] - df['end']
df[df['noaa_num'] == 11635].sort_values(by=['start'])
# Two successive row groups jump into my view...
df[(df['noaa_num'] == 11635) & (df['start'] == pd.Timestamp('2012-12-21 11:31:00'))].sort_values(by=['start'])
df[(df['noaa_num'] == 11635) & (df['start'].dt.day == 25)].sort_values(by=['start'])
# Seems like the first sample is for two separate C1.9 flares, but cut the same way. Maybe there are flares before and after this range so that there aren't any other cutting options. Imho not worth of further investigation.
# The second group's ranges are overlapping heavily, even though their peaks differ. Same issue as before it seems. One might ask, are there many such overlapping samples in the dataset?
gp = df.sort_values(by=['start']).groupby(['noaa_num'])
for g in gp:
df.loc[g[1].index, 'delta'] = (g[1]['start']-g[1]['start'].shift())
threshold = 30
overlapping_samples = df[df['delta'] < pd.Timedelta(minutes=threshold)]
print(f"{len(overlapping_samples)} of {len(df)} samples have a sample start that is followed directly by another sample start (same AR, max {threshold} minutes afterwards)")
# Tbh, 583 strongly overlapping samples are a few more than I expected. It's nothing too bad and their values seem correct, it might be worth to investigate into improving those overlaps at some point though.
# The 26 (almost) duplicate samples aren't too wild, and by taking care of the 583 overlaps those 26 would be taken care of too.
|
notebooks/rawdata_verification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %load_ext Cython
# # Twitter Sentiment
# ##### <NAME>
# +
### Prepare for training tweets with mood
#df = pd.read_csv("Data/SocialMediaData/training.1600000.processed.noemoticon.csv",header=None)
#df = df.rename(columns = {0:'motion',5:"tweet"})
#df = df[df["motion"]==4]["tweet"]
# save as txt
#df.to_csv(r'positive.txt', header=None, index=None, sep=' ', mode='a')
#df = df[df["motion"]==0]["tweet"]
# save as txt
#df.to_csv(r'negative.txt', header=None, index=None, sep=' ', mode='a')
# -
# ## Training models
# Training tweets download from http://help.sentiment140.com/for-students/
# +
# %%time
# %%cython
from gensim.models import doc2vec
import nltk
import re
## postive model
def split_sentence (sentence):
return re.split('\W+',sentence)
class MyDocs(object):
def __iter__(self):
for i, text in enumerate(open('train_set/positive.txt')): # doesn't use brand name
yield doc2vec.LabeledSentence(words=split_sentence(text), tags=['%s' % i])
# Train the doc2vec model
cdef pos = MyDocs()
model = doc2vec.Doc2Vec(pos, size = 200, window = 8, min_count = 5, workers = 4)
model.save('positive_tweets.model')# save the model
#model = doc2vec.Doc2Vec.load('cab_tweets.model')
## negative model
def split_sentence (sentence):
return re.split('\W+',sentence)
class MyDocs(object):
def __iter__(self):
for i, text in enumerate(open('train_set/negative.txt')): # doesn't use brand name
yield doc2vec.LabeledSentence(words=split_sentence(text), tags=['%s' % i])
# Train the doc2vec model
cdef neg = MyDocs()
model = doc2vec.Doc2Vec(neg, size = 200, window = 8, min_count = 5, workers = 4)
model.save('negative_tweets.model')# save the model
# -
#
# ## Bulid models
# ### 1. CAB
# load models
model1 = doc2vec.Doc2Vec.load('positive_tweets.model')
model2 = doc2vec.Doc2Vec.load('negative_tweets.model')
# Calculate the vector of search term according to our model
import numpy as np
cab_pos = np.zeros((17800, 200))
for i,text in enumerate(open("train_set/cab_tweets.txt")): # input search terms
cab_pos[i]=model1.infer_vector(split_sentence(text))
cab_pos
cab_neg = np.zeros((17800, 200))
for i,text in enumerate(open("train_set/cab_tweets.txt")): # input search terms
cab_neg[i]=model2.infer_vector(split_sentence(text))
cab_neg
# ### 2.DKS
import numpy as np
dks_pos = np.zeros((26480, 200))
for i,text in enumerate(open("train_set/dks_tweets.txt")): # input search terms
dks_pos[i]=model1.infer_vector(split_sentence(text))
dks_pos
import numpy as np
dks_neg = np.zeros((26480, 200))
for i,text in enumerate(open("train_set/dks_tweets.txt")): # input search terms
dks_neg[i]=model1.infer_vector(split_sentence(text))
dks_neg
# ## Calculate the cosin distance to check the change of moods
# +
## calculate the cosin distance between 2 vectors to generate the 3rd features
# Compute the cosine similarity values between the input text and all archived reviews
# cossims_with_input = map(lambda v: cossim(input_vec, v), model.docvecs)
# need to chaneg the code into rows: calculate the cosin distance between the vectors in same rows
# Calculate the cosine similarity between two vecotrs
def cossim(v1, v2):
return np.dot(v1, v2) / np.sqrt(np.dot(v1, v1)) / np.sqrt(np.dot(v2, v2))
cossims_cab=np.zeros((17800,1))
for i in range(17800):
cossims_cab[i] = cossim(cab_pos[i], cab_neg[i])
np.savetxt("cab_sentiment.csv",cossims_cab, delimiter=",")
# -
cossims_dks=np.zeros((26480,1))
for i in range(26480):
cossims_dks[i] = cossim(dks_pos[i], dks_neg[i])
np.savetxt("dks_sentiment.csv",cossims_dks, delimiter=",")
|
TwitterSentiment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #Fire up graphlab create
import graphlab
# #Load some house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
sales = graphlab.SFrame('home_data.gl/')
sales
# #Exploring the data for housing sales
# The house price is correlated with the number of square feet of living space.
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
# #Create a simple regression model of sqft_living to price
# Split data into training and testing.
# We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
train_data,test_data = sales.random_split(.8,seed=0)
# ##Build the regression model using only sqft_living as a feature
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'],validation_set=None)
# #Evaluate the simple model
print test_data['price'].mean()
print sqft_model.evaluate(test_data)
# RMSE of about \$255,170!
# #Let's show what our predictions look like
# Matplotlib is a Python plotting library that is also useful for plotting. You can install it with:
#
# 'pip install matplotlib'
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
# Above: blue dots are original data, green line is the prediction from the simple regression.
#
# Below: we can view the learned regression coefficients.
sqft_model.get('coefficients')
# #Explore other features in the data
#
# To build a more elaborate model, we will explore using more features.
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
sales[my_features].show()
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
# Pull the bar at the bottom to view more of the data.
#
# 98039 is the most expensive zip code.
# #Build a regression model with more features
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features,validation_set=None)
print my_features
# ##Comparing the results of the simple model with adding more features
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
# The RMSE goes down from \$255,170 to \$179,508 with more features.
# #Apply learned models to predict prices of 3 houses
# The first house we will use is considered an "average" house in Seattle.
house1 = sales[sales['id']=='5309101200']
house1
# <img src="http://info.kingcounty.gov/Assessor/eRealProperty/MediaHandler.aspx?Media=2916871">
print house1['price']
print sqft_model.predict(house1)
print my_features_model.predict(house1)
# In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
# ##Prediction for a second, fancier house
#
# We will now examine the predictions for a fancier house.
house2 = sales[sales['id']=='1925069082']
house2
# <img src="https://ssl.cdn-redfin.com/photo/1/bigphoto/302/734302_0.jpg">
print sqft_model.predict(house2)
print my_features_model.predict(house2)
# In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
# ##Last house, super fancy
#
# Our last house is a very large one owned by a famous Seattleite.
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d9/Bill_gates%27_house.jpg/2560px-Bill_gates%27_house.jpg">
print my_features_model.predict(graphlab.SFrame(bill_gates))
# The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
print sqft_model.predict(graphlab.SFrame(bill_gates))
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features,validation_set=None)
sales.column_names
all_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above',
'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', 'lat', 'long']
all_features_model = graphlab.linear_regression.create(sales, target='price', features=all_features, validation_set=None)
print all_features_model.evaluate(test_data)
print(all_features_model.predict(graphlab.SFrame(bill_gates)))
print("All Features Model RMSE: {:.4f}".format(all_features_model.evaluate(test_data)['rmse']))
print("Limited Features Model RMSE: {:.4f}".format(my_features_model.evaluate(test_data)['rmse']))
# ## Assignment
# ### 1. Selection and Summary Statistics
import graphlab.aggregate as agg
mean_price_by_zip = sales.groupby(key_columns='zipcode',
operations={'mean_price': agg.MEAN('price')})
zip_and_price = [(zipcode, mean_price) for zipcode, mean_price in zip(mean_price_by_zip['zipcode'], mean_price_by_zip['mean_price'])]
zip_and_price = sorted(zip_and_price, key=lambda x: x[1], reverse=True)
zip_and_price[0]
most_zip = sales[sales['zipcode'] == '98039']
most_zip['price'].mean()
# ### 2. Filtering Data
specific_sqft = sales[(sales['sqft_living'] > 2000) & (sales['sqft_living'] < 4000) ]
float(specific_sqft.num_rows()) / sales.num_rows()
# ### 3. Building a Regression Model with More Features
advanced_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', 'grade', 'waterfront', 'view', 'sqft_above', 'sqft_basement', 'yr_built',
'yr_renovated', 'lat', 'long', 'sqft_living15', 'sqft_lot15']
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features, validation_set=None)
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
my_features_model = graphlab.linear_regression.create(train_data, 'price', my_features, validation_set=None)
print(my_features_model.evaluate(test_data))
print(advanced_features_model.evaluate(test_data))
print('Advanced Features Test Set RMSE: {:.4f}'.format(advanced_features_model.evaluate(test_data)['rmse']))
print('Limited Features Test Set RMSE: {:.4f}'.format(my_features_model.evaluate(test_data)['rmse']))
print('Difference between RMSE: {:.4f}'.format(
my_features_model.evaluate(test_data)['rmse'] - advanced_features_model.evaluate(test_data)['rmse']))
|
Studying Materials/Course 1 ML Foundations/Regression/Predicting house prices.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TDA with Python using the Gudhi Library
#
# # Representing sublevel sets of functions using cubical complexes
# **Author** : <NAME>
import numpy as np
import gudhi as gd
import pickle as pickle
from pylab import *
from sklearn.neighbors import KernelDensity
import seaborn as sns
# %matplotlib inline
# TDA signatures can be extracted from the upper level or sublevel sets of a function $f :\mathbb R^p \mapsto \mathbb R$.
#
# One possible approach for studying the topology of the sublevel sets of $f$ is to define a regular grid on $R^d$ and then a filtered complex with vertices located on this grid, with $f$ as filtration function. Such a grid-based filtered complex is called a **cubical complexes** in Gudhi. See the [documentation](http://gudhi.gforge.inria.fr/python/latest/cubical_complex_user.html) of its constructor.
# ### Crater dataset
# We use the crater dataset to illustrate cubical complex filtrations:
f = open("./datasets/crater_tuto", "rb")
crater = pickle.load(f)
f.close()
# The point cloud is in $\mathbb R^2$ and is composed of a central annulus and four clusters at the corners:
plt.scatter(crater[:, 0], crater[:, 1], s = 0.1);
# Instead of directly defining a Vietoris-Rips complex or an alpha complex on the point cloud, we want to study the upper level sets of a density estimator computed on the point cloud.
#
# We can visualize the estimated density with a standard 2d-kernel estimator with the `seaborn` module:
sns.kdeplot(
x = crater[:, 0],
y = crater[:, 1],
shade = True,
cmap = "PuBu",
bw_method = .3
);
# ### Cubical complex for the crater dataset
# We first define a regular grid on $[0,10] \times [0,10]$.
xval = np.arange(0, 10, 0.05)
yval = np.arange(0, 10, 0.05)
nx = len(xval)
ny = len(yval)
# Next we fit a $2$-dimensional standard kernel density estimator on the data using `scikit-learn`:
kde = KernelDensity(kernel = 'gaussian', bandwidth = 0.3).fit(crater)
positions = np.array([[u, v] for u in xval for v in yval])
# The cubical complex filtration considers the sublevel sets of the filtration fonction. For studying the upper level sets of the density we need to take the opposite of the KDE. We define the filtration value at each vertex as follows:
filt_values = -kde.score_samples(X = positions)
# The range of filtration values is :
print(min(filt_values), max(filt_values))
# Note that the filtration values are positive because the `score_samples()` method of the `KernelDensity` class returns the log-density which is, by definition, always negative.
# We are now all set to compute the cubical complex filtration based on the grid `[xval, yval]` using the values stored in `filt_values` as filtration values:
cc_density_crater = gd.CubicalComplex(
dimensions = [nx ,ny],
top_dimensional_cells = filt_values
)
# Note that a cubical complex is not a simplex tree object:
type(cc_density_crater)
# However the class `CubicalComplex` has similar methods:
cc_density_crater.dimension()
cc_density_crater.num_simplices()
|
Tuto-GUDHI-cubical-complexes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import necessary packages
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.express as px
import geopandas as gpd
from descartes import PolygonPatch
from tqdm import tqdm
df = pd.read_csv("data/full_run_3.zip",skiprows=6)
#remove all the columns that have the same value in all rows.
for c in df.columns:
if len(df[c].unique()) < 2:
df = df.drop(columns=c)
# +
#remove all rows with no data, these are usually the rows at the start of a run.
l = []
for r in df.index:
if len(df.iloc[r,5:].unique())<2:
l.append(r)
df = df.drop(index=l)
# +
#shape of the imported file vs reduced files shows only step[0] of each run has no results. Great news!
# -
#I found this code to make dict from long list.
def listToDict(lst):
op = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return op
steps = []
n_steps = 20000
for index, row in tqdm(df.iloc[:n_steps,:].iterrows(), total=n_steps):
l = []
for column in df.columns[:5]:
x = row[column]
new_df = pd.DataFrame(index=np.arange(0,114), columns=[column])
new_df[column] = x
l.append(new_df)
for column in df.columns[5:]:
x = row[column].replace("[","").replace("]","").split(" ")
d = listToDict(x)
new_df = pd.DataFrame(index=np.arange(0, len(d)), columns=['Neighborhood number', column])
new_df['Neighborhood number'] = [*d]
new_df[column] = list(d.values())
l.append(new_df)
# empty_df.loc[index, column] = x
steps.append(pd.concat(l, axis=1))
full_df = pd.concat(steps).loc[:,~pd.concat(steps).columns.duplicated()]
full_df = full_df.astype({'Neighborhood number' : int,
'KPI-available_buy_houses' : float, 'KPI-available_part_rent_houses' : float,
'KPI-available_social_rent_houses' : float, 'KPI-avg_income' : float, 'KPI-avg_price' : float,
'KPI-avg_utility' : float, 'KPI-citizen-count' : float, 'KPI-p-sc-lower' : float,
'KPI-p-sc-working' : float, 'KPI-p-sc-middle' : float, 'KPI-p-sc-upper' : float, 'KPI-p-dutch' : float,
'KPI-p-other-western' : float, 'KPI-p-antilles' : float, 'KPI-p-morocco' : float,
'KPI-p-suriname' : float, 'KPI-p-turkey' : float, 'KPI-p-indonesian' : float,
'KPI-p-eastern-eu' : float, 'KPI-p-other-nonwestern' : float})
full_df.describe()
# +
#full_df.to_csv("output/df3.csv")
# -
#The boxplots below give an indication of the spread of results from 256 runs.
for c in full_df.groupby("[run number]").mean().iloc[:,5:]:
plt.figure()
sns.boxplot(full_df.groupby("[run number]").mean().iloc[:,5:][c])
plt.title(c)
plt.show()
sns.catplot(x="improve-health?", y="KPI-avg_utility",col="improve-safety?", hue="mixed-use-zoning?",kind="box",data=full_df)
f, ax = plt.subplots(figsize=(11, 9))
sns.heatmap(full_df.corr())
plt.title("Correlation Matrix Server 3")
#plt.savefig('output/images/corr_3.png')
#this is for the report~
print(full_df.describe().round(2).T.drop(columns="count").to_latex(longtable=True))
first_step_df = full_df[full_df["[step]"]==1]
final_step_df = full_df[full_df["[step]"]==40]
transition_df = final_step_df.replace(False,0).replace(True,1).sort_values(["[run number]","Neighborhood number"]).reset_index() - first_step_df.replace(False,0).replace(True,1).sort_values(["[run number]","Neighborhood number"]).reset_index()
transition_df["Neighborhood number"] = final_step_df.replace(False,0).replace(True,1).sort_values(["[run number]","Neighborhood number"]).reset_index()["Neighborhood number"]
transition_df["[run number]"] = final_step_df.replace(False,0).replace(True,1).sort_values(["[run number]","Neighborhood number"]).reset_index()["[run number]"]
transition_df["improve-health?"] = final_step_df.sort_values(["[run number]","Neighborhood number"]).reset_index()["improve-health?"]
transition_df["mixed-use-zoning?"] = final_step_df.sort_values(["[run number]","Neighborhood number"]).reset_index()["mixed-use-zoning?"]
transition_df["improve-safety?"] = final_step_df.sort_values(["[run number]","Neighborhood number"]).reset_index()["improve-safety?"]
transition_df = transition_df.drop(columns="index")
#This df shows the difference (or change) of statistics between the beginning and end of the run.
transition_df
melted_end_df = pd.melt(final_step_df.groupby("Neighborhood number").mean()[transition_df.columns[13:]].reset_index(), id_vars="Neighborhood number", var_name="KPI", value_name="percentage")
melted_df = pd.melt(transition_df.groupby("Neighborhood number").mean()[transition_df.columns[13:]].reset_index(), id_vars="Neighborhood number", var_name="KPI", value_name="percentage")
sns.set(style="whitegrid")
sns.catplot(x="KPI",y="percentage",data=melted_df,kind="boxen",palette="tab20c")
plt.title("Neighborhood average percentage points change \n between beginning and end of simulation runs")
plt.xticks(rotation=90)
plt.show()
sns.set(style="whitegrid")
sns.catplot(x="KPI",y="percentage",data=melted_end_df,kind="boxen",palette="tab20c")
plt.title("Neighborhood average percentage")
plt.xticks(rotation=90)
plt.show()
|
Python/Results 3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Playing with PUDL
# This notebook is meant to help get you up and running with the PUDL database, so you can play with it!
#
# ### Importing external code.
# We need to import a bunch of outside code to do our work here. Sometimes we import entire packages (like `numpy` and `pandas`) and sometimes we just pull in a couple of pieces we need from a particular part of a large package (like `declarative_base`)
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join('..','..')))
from pudl import pudl, ferc1, eia923, settings, constants
from pudl import models, models_ferc1, models_eia923
import numpy as np
import pandas as pd
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine.url import URL
import matplotlib.pyplot as plt
import datetime as dt
# %matplotlib inline
import matplotlib.ticker as ticker
from matplotlib.ticker import FormatStrFormatter
# ### Connecting to the PUDL database:
pudl_engine = pudl.connect_db()
# # Pull generation_fuel data from PUDL EIA923 tables
# ### Generation_fuel_eia923 gives info on heat rates, fuel consumed, & electricity generated
gf = pd.read_sql('''SELECT plant_id, report_date, fuel_type, aer_fuel_type, aer_fuel_category, \
fuel_consumed_total, fuel_consumed_for_electricity, fuel_mmbtu_per_unit, \
fuel_consumed_total_mmbtu, fuel_consumed_for_electricity_mmbtu, \
net_generation_mwh \
FROM generation_fuel_eia923''', pudl_engine)
pi = pd.read_sql('''SELECT plant_id, plant_state, nameplate_capacity_mw FROM plants_eia923''', pudl_engine)
mwh_eia923 = pd.merge(gf, pi, on='plant_id')
mwh_eia923.head(1)
mwh_eia923["report_date"] = pd.to_datetime(mwh_eia923.report_date)
# Examining data at annual scale here, so only creating year field
mwh_eia923["year"] = mwh_eia923["report_date"].dt.year
mwh_eia923["month"] = mwh_eia923["report_date"].dt.month
mwh_eia923.year.values
# Look at full data set to see how "date" column is formatted
mwh_eia923.head(1)
# ### EIA923 Heat rate calculations
# Summarize data by year for each plant; this creates a new column that reports the annual sum in each row
mwh_eia923["fuel_consumed_total_mmbtu_sum"] = mwh_eia923.groupby(["plant_id", "year"])["fuel_consumed_total_mmbtu"].transform(sum)
mwh_eia923["net_generation_mwh_sum"] = mwh_eia923.groupby(["plant_id", "year"])["net_generation_mwh"].transform(sum)
# Use sum of fuel_consumed_total_mmbtu_sum and net_generation_mwh_sum to calculate annual heat rate
# for each plant id
mwh_eia923['calculated_heat_rate_btu_per_kwh'] = \
((mwh_eia923['fuel_consumed_total_mmbtu_sum']*1000000)/(mwh_eia923['net_generation_mwh_sum']*1000))
# List of fuel categories we have to work with
mwh_eia923.aer_fuel_category.unique()
# Subset of just coal data
mwh_eia923_coal = mwh_eia923[(mwh_eia923.aer_fuel_category == 'coal')]
# Subset of just natural gas data
mwh_eia923_gas = mwh_eia923[(mwh_eia923.aer_fuel_category == 'gas')]
# Plot heat rate histogram for full data set by fuel type
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.hist(mwh_eia923_coal.calculated_heat_rate_btu_per_kwh, color= 'b', label='coal', range=(0,50000), bins=100)
ax1.legend(loc="upper right", fontsize=16)
ax1.yaxis.set_tick_params(labelsize=13)
ax2.hist(mwh_eia923_gas.calculated_heat_rate_btu_per_kwh, color= 'r', label='gas', range=(0,50000), bins=100)
ax2.legend(loc="upper right", fontsize=16)
plt.suptitle('Heat Rates for coal & gas plants, 2011-2015 (EIA923)', size=22)
ax2.set_xlabel('Heat rate (Btu/kWh)', size=18)
f.text(0.0, 0.5, 'Frequency (# of records)', ha='center', va='center', rotation='vertical', size=18)
f.set_size_inches(9,9)
plt.tick_params(axis='both', which='major', labelsize=13)
plt.show()
# # Calculate a monthly sum of fuel quantity & fuel content by fuel group and contract type
mwh_eia923["annual_net_generation_mwh"] = mwh_eia923.groupby(["year", "plant_id"])["net_generation_mwh"].transform(sum)
mwh_eia923['calculated_capacity_factor'] = mwh_eia923['annual_net_generation_mwh'] /(mwh_eia923['nameplate_capacity_mw']*8760)
# Plot capacity factor histogram for full data set by fuel type
f, (ax1) = plt.subplots(1, 1, sharex=True)
ax1.hist(mwh_eia923.calculated_capacity_factor, color= 'b', label='coal and gas', range=(0,1.1), bins=100)
ax1.legend(loc="upper right", fontsize=16)
ax1.yaxis.set_tick_params(labelsize=13)
plt.suptitle('Capacity factors for coal & gas plants, 2011-2015 (EIA923)', size=22)
ax2.set_xlabel('Capacity factor ()', size=18)
f.text(0.0, 0.5, 'Frequency (# of records)', ha='center', va='center', rotation='vertical', size=18)
f.set_size_inches(9,9)
plt.tick_params(axis='both', which='major', labelsize=13)
plt.show()
# # Pull fuel_receipts_costs data from PUDL EIA923 tables
#
# ### Fuel_receipts_costs_eia923 gives info on heat content, fuel cost, and fuel purchase types
fuel_cost_eia923 = pd.read_sql('''SELECT plant_id, report_date, \
energy_source, fuel_group, contract_type, contract_expiration_date, mine_id, \
fuel_quantity, average_heat_content, average_sulfur_content \
average_ash_content, average_mercury_content, fuel_cost, primary_transportation_mode\
FROM fuel_receipts_costs_eia923 ''', pudl_engine)
# Looking at data at monthly scale, so creating both year and month fields
fuel_cost_eia923["report_date"] = pd.to_datetime(fuel_cost_eia923.report_date)
fuel_cost_eia923["year"] = fuel_cost_eia923["report_date"].dt.year
fuel_cost_eia923["month"] = fuel_cost_eia923["report_date"].dt.month
# Convert fuel costs from cents to dollars
fuel_cost_eia923['fuel_cost'] = fuel_cost_eia923['fuel_cost']/100
# +
# Calculate heat_content_total by multiplying average_heat_content and fuel_quantity
fuel_cost_eia923["heat_content_total"] = (fuel_cost_eia923.average_heat_content*fuel_cost_eia923.fuel_quantity)
# # Calculate a monthly sum of fuel quantity & fuel content by fuel group and contract type
fuel_cost_eia923["monthly_fuel_quantity"] = fuel_cost_eia923.groupby(["year", "month", "fuel_group", "contract_type"])["fuel_quantity"].transform(sum)
fuel_cost_eia923["monthly_heat_content"] = fuel_cost_eia923.groupby(["year", "month", "fuel_group", "contract_type"])["heat_content_total"].transform(sum)
# -
# Create purchase cost field, which is fuel_quantity * fuel_cost
fuel_cost_eia923["purchase_cost"] = (fuel_cost_eia923.fuel_cost*fuel_cost_eia923.fuel_quantity)
# Create data frames by fuel_group for all data
coal = fuel_cost_eia923[(fuel_cost_eia923.fuel_group == 'Coal')]
gas = fuel_cost_eia923[(fuel_cost_eia923.fuel_group == 'Natural Gas')]
petroleum = fuel_cost_eia923[(fuel_cost_eia923.fuel_group == 'Petroleum')]
petcoke = fuel_cost_eia923[(fuel_cost_eia923.fuel_group == 'Petroleum_Coke')]
# Plot average heat content by fuel group
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.hist(coal.average_heat_content, color='b', label='coal', bins=100)
ax1.legend(loc="upper right", fontsize=18)
ax1.set_xlabel('Heat content (MMBtu/short ton)', size=18)
ax1.yaxis.set_tick_params(labelsize=15)
ax1.xaxis.set_tick_params(labelsize=15)
ax2.hist(gas.average_heat_content, color= 'r', label='gas', bins=100)
ax2.set_xlim(0,2)
ax2.legend(loc="upper right", fontsize=18)
ax2.set_xlabel('Heat content (MMBtu/mcf)', size=18)
plt.text(-0.1, 1.2,'Average heat content, 2011-2015 (EIA923)', ha='center',
va='top', transform=ax2.transAxes, fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=15)
f.subplots_adjust(left=None, bottom=None, right=1.9, top=None, wspace=None, hspace=None)
f.set_size_inches(9,7)
f.text(.01, 0.5, 'Frequency (# of records)', ha='center', va='center', rotation='vertical', fontsize=18)
plt.show()
# ### Fuel cost analysis
# Many records have NA fuel cost value, but have fuel quantity, heat content, etc. data
# Thus we only need to remove the NA values for the cost calculations
# Remove any negative or NA fuel cost values and call this 'positive_fuel'
positive_fuel = fuel_cost_eia923[fuel_cost_eia923['fuel_cost'] > 0]
# TODO: check to see how many records are being dropped
# Subset fuel_cost dataframes by fuel type
fuel_cost_coal = positive_fuel[(positive_fuel.fuel_group == 'Coal')]
fuel_cost_gas = positive_fuel[(positive_fuel.fuel_group == 'Natural Gas')]
fuel_cost_coal.shape
# Check maximum fuel cost for natural gas (in $)
fuel_cost_gas.fuel_cost.max()
# Natural gas fuel costs have high values that are unrealistic
# Separate the values that are over $50/MMBtu
fuel_cost_gas_high = fuel_cost_gas[fuel_cost_gas['fuel_cost'] >50]
fuel_cost_gas = fuel_cost_gas[fuel_cost_gas['fuel_cost'] <=50]
# How many natural gas records exceed $50/MMBtu?
fuel_cost_gas_high.shape
# +
# Plot fuel cost per MMBtu by fuel group
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.hist(fuel_cost_coal.fuel_cost, color='b', label='coal', bins=100)
ax1.legend(loc="upper right", fontsize=18)
ax1.set_xlabel('Fuel cost ($/MMBtu)', size=18)
ax1.yaxis.set_tick_params(labelsize=15)
ax1.xaxis.set_tick_params(labelsize=15)
ax2.hist(fuel_cost_gas.fuel_cost, color= 'r', label='gas', range=(0,40), bins=100)
ax2.set_xlim(0,30)
ax2.legend(loc="upper right", fontsize=18)
ax2.set_xlabel('Fuel cost ($/MMBtu)', size=18)
plt.text(-0.1, 1.2,'Fuel cost*, 2011-2015 (EIA923)', ha='center',
va='top', transform=ax2.transAxes, fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=15)
f.subplots_adjust(left=None, bottom=None, right=1.9, top=None, wspace=None, hspace=None)
f.set_size_inches(9,7)
f.text(.01, 0.5, 'Frequency (# of records)', ha='center', va='center', rotation='vertical', fontsize=18)
f.text(1.7, -0.02, '* Gas costs > $50/MMBtu excluded (count: 691/61590)', ha='center', va='center', fontsize=14)
plt.show()
# -
# ### Look at fuel data by contract type
# ### Calculating monthly averages and sums means individual records are lost and other fields should not be used (TODO: remove the non-monthly data columns when monthly values are calculated)
# +
# Monthly averages use variable divided by total fuel quantity for weighted monthly averages by fuel group & contract type
# Other monthly fields (monthly_fuel_quantity & monthly_purchase_cost) are summed by fuel group & contract type
# fuel_cost_eia923 dataframe is used for non-cost-related data
# positve_fuel dataframe is used for cost-related data
fuel_cost_eia923["monthly_average_heat_content"] = ((fuel_cost_eia923.groupby(["year", "month", "fuel_group", "contract_type"])["heat_content_total"].transform(sum))/(fuel_cost_eia923.groupby(["year", "month", "fuel_group", "contract_type"])["fuel_quantity"].transform(sum)))
fuel_cost_eia923["monthly_fuel_quantity"] = ((fuel_cost_eia923.groupby(["year", "month", "fuel_group", "contract_type"])["fuel_quantity"].transform(sum)))
fuel_cost_eia923["monthly_purchase_cost"] = ((fuel_cost_eia923.groupby(["year", "month", "fuel_group", "contract_type"])["purchase_cost"].transform(sum)))
positive_fuel["monthly_average_fuel_cost"] = ((positive_fuel.groupby(["year", "month", "fuel_group", "contract_type"])["purchase_cost"].transform(sum))/(positive_fuel.groupby(["year", "month", "fuel_group", "contract_type"])["fuel_quantity"].transform(sum)))
positive_fuel["monthly_fuel_quantity"] = ((positive_fuel.groupby(["year", "month", "fuel_group", "contract_type"])["fuel_quantity"].transform(sum)))
positive_fuel["monthly_purchase_cost"] = ((positive_fuel.groupby(["year", "month", "fuel_group", "contract_type"])["purchase_cost"].transform(sum)))
# Drop duplicates of monthly sums/averages, so we lose indvidual purchase records but retain monthly data by fuel group & contract type
fuel_cost_eia923 = fuel_cost_eia923.drop_duplicates(subset=['report_date', 'fuel_group', 'contract_type'])
positive_fuel = positive_fuel.drop_duplicates(subset=['report_date', 'fuel_group', 'contract_type'])
# -
# Monthly records for coal purchased by contract or new contract
contract_buy_coal = fuel_cost_eia923[((fuel_cost_eia923.contract_type == 'C')|(fuel_cost_eia923.contract_type == 'NC')) & (fuel_cost_eia923.fuel_group == 'Coal')]
contract_buy_coal.head(2)
#merge contracts and new contracts into the same category for fields of interest
contract_buy_coal["monthly_fuel_quantity"] = contract_buy_coal.groupby(["report_date"])["monthly_fuel_quantity"].transform(sum)
contract_buy_coal["monthly_heat_content"] = contract_buy_coal.groupby(["report_date"])["monthly_heat_content"].transform(sum)
contract_buy_coal["monthly_average_heat_content"] = ((contract_buy_coal.groupby(["report_date"])["monthly_heat_content"].transform(sum))/(contract_buy_coal.groupby(["report_date"])["fuel_quantity"].transform(sum)))
contract_buy_coal = contract_buy_coal.drop_duplicates(subset=['report_date'])
contract_buy_coal.shape
# Monthly records of costs for coal purchased by contract or new contract
contract_buy_coal_costs = positive_fuel[((positive_fuel.contract_type == 'C')|(positive_fuel.contract_type == 'NC')) & (positive_fuel.fuel_group == 'Coal')]
#merge contracts and new contracts into the same category
contract_buy_coal_costs["monthly_heat_content"] = contract_buy_coal_costs.groupby(["report_date"])["monthly_heat_content"].transform(sum)
contract_buy_coal_costs["monthly_average_fuel_cost"] = ((contract_buy_coal_costs.groupby(["report_date"])["monthly_purchase_cost"].transform(sum))/(contract_buy_coal_costs.groupby(["report_date"])["monthly_fuel_quantity"].transform(sum)))
contract_buy_coal_costs["monthly_fuel_quantity"] = contract_buy_coal_costs.groupby(["report_date"])["monthly_fuel_quantity"].transform(sum)
contract_buy_coal_costs = contract_buy_coal_costs.drop_duplicates(subset=['report_date'])
contract_buy_coal_costs.shape
# Monthly records for natural gas purchased by contract or new contract
contract_buy_gas = fuel_cost_eia923[((fuel_cost_eia923.contract_type == 'C')|(fuel_cost_eia923.contract_type == 'NC')) & (fuel_cost_eia923.fuel_group == 'Natural Gas')]
#merge contracts and new contracts into the same category
contract_buy_gas["monthly_heat_content"] = contract_buy_gas.groupby(["report_date"])["monthly_heat_content"].transform(sum)
contract_buy_gas["monthly_fuel_quantity"] = contract_buy_gas.groupby(["report_date"])["monthly_fuel_quantity"].transform(sum)
contract_buy_gas["monthly_average_heat_content"] = contract_buy_gas.groupby(["report_date"])["monthly_average_heat_content"].transform(sum)
contract_buy_gas = contract_buy_gas.drop_duplicates(subset=['report_date'])
# Monthly records of costs for natural gas purchased by contract or new contract
contract_buy_gas_costs = positive_fuel[((positive_fuel.contract_type == 'C')|(positive_fuel.contract_type == 'NC')) & (positive_fuel.fuel_group == 'Natural Gas')]
#merge contracts and new contracts into the same category
contract_buy_gas_costs["monthly_heat_content"] = contract_buy_gas_costs.groupby(["report_date"])["monthly_heat_content"].transform(sum)
contract_buy_gas_costs["monthly_fuel_quantity"] = contract_buy_gas_costs.groupby(["report_date"])["monthly_fuel_quantity"].transform(sum)
contract_buy_gas_costs["monthly_average_fuel_cost"] = contract_buy_gas_costs.groupby(["report_date"])["monthly_average_fuel_cost"].transform(sum)
contract_buy_gas_costs = contract_buy_gas_costs.drop_duplicates(subset=['report_date'])
contract_buy_gas_costs.shape
# Monthly records for spot purchases of coal
spot_buy_coal = fuel_cost_eia923[(fuel_cost_eia923.contract_type == 'S') & (fuel_cost_eia923.fuel_group == 'Coal')]
spot_buy_coal.shape
# Monthly records of costs for spot purchases of coal
spot_buy_coal_costs = positive_fuel[(positive_fuel.contract_type == 'S') & (positive_fuel.fuel_group == 'Coal')]
# Monthly records for spot purchases of natural gas
spot_buy_gas = fuel_cost_eia923[(fuel_cost_eia923.contract_type == 'S') & (fuel_cost_eia923.fuel_group == 'Natural Gas')]
# Monthly records of costs for spot purchases of natural gas
spot_buy_gas_costs = positive_fuel[(positive_fuel.contract_type == 'S') & (positive_fuel.fuel_group == 'Natural Gas')]
# +
# Plot monthly total heat content for natural gas by contract type
fig = plt.figure(figsize=(13,6), dpi=100)
ax = plt.subplot(111)
p1 = contract_buy_gas.monthly_heat_content.plot(kind='bar', x=contract_buy_gas.report_date, width=1.0, stacked=True, color='green', label='contract')
p2 = spot_buy_gas.monthly_heat_content.plot(kind='bar', x=spot_buy_gas.report_date, width=1.0, stacked=True, bottom=contract_buy_gas.monthly_heat_content, color='yellow', label='spot')
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(contract_buy_gas.index)
# Every 4th ticklable shows the month and/or day
# ticklabels[::4] = [item.strftime('%b %d') for item in contract_buy_coal.report_date[::12]] #shows month and day
# ticklabels[::4] = [item.strftime('%b') for item in contract_buy_coal.report_date[::12]] #shows month
# Every 12th ticklabel includes the year
ticklabels[::12] = [item.strftime('%b \n%Y') for item in contract_buy_gas.report_date[::12]] #shows month, year
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
ax.set_ylabel('Total heat content (MMBtu)', size=16)
ax.set_xlabel('Date', size=16)
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.suptitle('Monthly total heat content of GAS by purchase type, 2009-2015 (EIA923)', size=17)
plt.show()
# +
# Plot monthly total heat content for coal by contract type
fig = plt.figure(figsize=(13,6), dpi=100)
ax = plt.subplot(111)
p1 = contract_buy_coal.monthly_heat_content.plot(kind='bar', x=contract_buy_coal.report_date, width=1.0, stacked=True, colormap='jet', label='contract')
p2 = spot_buy_coal.monthly_heat_content.plot(kind='bar', x=contract_buy_coal.report_date, width=1.0,stacked=True, bottom=contract_buy_coal.monthly_heat_content, color='orange', label='spot')
contract_buy_coal.heat_content_total.plot
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(contract_buy_coal.index)
# Every 12th ticklabel includes the year
ticklabels[::12] = [item.strftime('%b \n%Y') for item in contract_buy_coal.report_date[::12]] #shows month, year
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
ax.set_ylabel('Total heat content (MMBtu)', size=16)
ax.set_xlabel('Date', size=16)
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.suptitle('Monthly total heat content of COAL by purchase type, 2009-2015 (EIA923)', size=17)
plt.show()
# -
# To calculate proportional purchases, create two new data frames - one each for contract and spot purchases
dfc = pd.concat([contract_buy_coal['monthly_heat_content'], contract_buy_coal['report_date']], axis=1, keys=['contract', 'date'])
dfs = pd.concat([spot_buy_coal['monthly_heat_content'], spot_buy_coal['report_date']], axis=1, keys=['spot', 'date'])
# Create merged dataframe of contract and spot purchase total heat contents
sum = pd.merge(dfc, dfs, on='date')
# Add contract and spot heat contents together
heat_sum = sum.contract+sum.spot
# Calculate percentages
contract_heat_percent = (sum.contract/heat_sum)*100
spot_heat_percent = (sum.spot/heat_sum)*100
# +
# Plot proportional heat content of coal purchases by contract type
fig = plt.figure(figsize=(13,6), dpi=100)
ax = plt.subplot(111)
p1 = contract_heat_percent.plot(kind='bar', x=contract_buy_coal.report_date, width=1.0, stacked=True, colormap='jet', label='contract')
p2 = spot_heat_percent.plot(kind='bar', x=spot_buy_coal.report_date, width=1.0,stacked=True, bottom=contract_heat_percent, color='orange', label='spot')
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(contract_buy_coal.index)
# Every 12th ticklabel includes the year
ticklabels[::12] = [item.strftime('%b \n%Y') for item in contract_buy_coal.report_date[::12]] #shows month, year
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
ax.set_ylabel('Heat content (MMBtu) proportion (%)', size=16)
ax.set_xlabel('Date', size=18)
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.gcf().autofmt_xdate()
plt.legend(bbox_to_anchor=(1, 0), loc='lower right', ncol=1, fontsize=14)
plt.suptitle('Proportional monthly total heat content of COAL by purchase type, 2009-2015 (EIA923)', size=17)
plt.show()
# +
# Plot total fuel quantity of coal purchases by contract type
fig = plt.figure(figsize=(13,6), dpi=100)
ax = plt.subplot(111)
p1 = contract_buy_coal.monthly_fuel_quantity.plot(kind='bar', width=1.0, x=contract_buy_coal.report_date, stacked=True, colormap='jet', label='contract')
p2 = spot_buy_coal.monthly_fuel_quantity.plot(kind='bar', width=1.0, x=contract_buy_coal.report_date, stacked=True, bottom=contract_buy_coal.monthly_fuel_quantity, color='orange', label='spot')
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(contract_buy_coal.index)
# Every 12th ticklabel includes the year
ticklabels[::12] = [item.strftime('%b \n%Y') for item in contract_buy_coal.report_date[::12]] #shows month, year
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
ax.set_ylabel('Total fuel quantity (short tons)', size=16)
ax.set_xlabel('Date', size=16)
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.suptitle('Monthly total fuel quantity (COAL) by purchase type, 2009-2015 (EIA923)', size=17)
plt.show()
# +
# Plot total fuel quantity of natural gas purchases by contract type
fig = plt.figure(figsize=(13,6), dpi=100)
ax = plt.subplot(111)
p1 = contract_buy_gas.monthly_fuel_quantity.plot(kind='bar', width=1.0, x=contract_buy_gas.report_date, stacked=True, color='green', label='contract')
p2 = spot_buy_gas.monthly_fuel_quantity.plot(kind='bar', width=1.0, x=contract_buy_gas.report_date, stacked=True, bottom=contract_buy_gas.monthly_fuel_quantity, color='yellow', label='spot')
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(contract_buy_coal.index)
# Every 12th ticklabel includes the year
ticklabels[::12] = [item.strftime('%b \n%Y') for item in contract_buy_coal.report_date[::12]] #shows month, year
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
ax.set_ylabel('Total fuel quantity (mcf)', size=16)
ax.set_xlabel('Date', size=16)
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.gcf().autofmt_xdate()
plt.legend()
plt.suptitle('Monthly total fuel quantity (GAS) by purchase type, 2009-2015 (EIA923)', size=17)
plt.show()
# +
# Plot monthly fuel cost per MMBtu for coal by contract type
fig = plt.figure(figsize=(8, 8), dpi=100)
plt.plot_date(x=contract_buy_coal_costs.report_date, y=contract_buy_coal_costs.monthly_average_fuel_cost, linestyle='-', markersize = 0, color='b', label="contract")
plt.ylabel('Average fuel cost ($/MMBtu)', size=16)
plt.xlabel('Date', size=16)
plt.plot_date(x=spot_buy_coal_costs.report_date, y=spot_buy_coal_costs.monthly_average_fuel_cost, linestyle='-', marker='o', markersize=0, color='r', label="spot")
plt.legend()
from matplotlib.ticker import FormatStrFormatter
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.xticks(size=11)
plt.yticks(size=12)
plt.suptitle('Monthly average fuel cost of COAL by purchase type weighted by fuel quantity, 2009-2016 (EIA923)', size=17)
plt.show()
# +
# Plot monthly fuel cost per MMBtu for gas by contract type
fig = plt.figure(figsize=(8, 8), dpi=100)
plt.plot_date(x=contract_buy_gas_costs.report_date, y=contract_buy_gas_costs.monthly_average_fuel_cost, linestyle='-', markersize = 0, color='b', label="contract")
plt.ylabel('Average fuel cost ($/MMBtu)', size=16)
plt.xlabel('Date', size=16)
plt.plot_date(x=spot_buy_gas_costs.report_date, y=spot_buy_gas_costs.monthly_average_fuel_cost, linestyle='-', marker='o', markersize=0, color='r', label="spot")
plt.legend()
from matplotlib.ticker import FormatStrFormatter
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%g'))
plt.xticks(size=11)
plt.yticks(size=12)
plt.suptitle('Monthly average fuel cost of GAS by purchase type weighted by fuel quantity, 2009-2016 (EIA923)', size=17)
plt.show()
|
results/notebooks/eia923/fuel_receipts/EIA923_fuel_exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="mKUSMFVqINo-"
# ## Preprocessing
# + id="Pw19m7YUINpD" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="8edbe27d-6c26-44a1-9d6d-9ed5ed7368f3"
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
import tensorflow as tf
# Import and read the charity_data.csv.
import pandas as pd
application_df = pd.read_csv("./drive/MyDrive/charity_data.csv")
application_df.head()
# + id="KunftWJG4O_z" colab={"base_uri": "https://localhost:8080/"} outputId="a76c56a6-ceb0-43cc-ec1e-c520959c8f31"
from google.colab import drive
drive.mount('/content/drive')
# + id="YH9vojtuINpG"
# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.
# YOUR CODE GOES HERE
application_df = application_df.drop(["EIN", "NAME"], 1)
# + id="3SJsSVs0INpH" colab={"base_uri": "https://localhost:8080/"} outputId="fbf24644-716a-4293-d92a-b7072ad704b7"
# Determine the number of unique values in each column.
# YOUR CODE GOES HERE
application_df.nunique()
# + id="LYOPiae_INpI" colab={"base_uri": "https://localhost:8080/"} outputId="382d8c24-200b-4103-f4d1-fb71b65d8211"
# Look at APPLICATION_TYPE value counts for binning
# YOUR CODE GOES HERE
application_counts = application_df["APPLICATION_TYPE"].value_counts()
application_counts
# + id="Gn_RgSbNJjw9" colab={"base_uri": "https://localhost:8080/"} outputId="d9422bed-37e2-4e7e-ada2-a614a7438bc6"
# Look at APPLICATION_TYPE value counts for binning
# YOUR CODE GOES HERE
application_counts = application_df["APPLICATION_TYPE"].value_counts()
application_counts
# + id="rPBZbHMuJprj"
application_types_to_replace = list(application_counts[application_counts < 500].index)
# + id="F7nFB0waINpI" colab={"base_uri": "https://localhost:8080/"} outputId="4e94f42d-914c-4ae7-fb57-f9ec868851d6"
# Choose a cutoff value and create a list of application types to be replaced
# use the variable name `application_types_to_replace`
# YOUR CODE GOES HERE
application_types_to_replace = list(application_counts[application_counts < 500].index)
# Replace in dataframe
for app in application_types_to_replace:
application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,"Other")
# Check to make sure binning was successful
application_df['APPLICATION_TYPE'].value_counts()
# + id="6I-f3qBVINpJ" colab={"base_uri": "https://localhost:8080/"} outputId="f3ec331d-09b7-4b1b-e75d-6aa44a6a6641"
# Look at CLASSIFICATION value counts for binning
# YOUR CODE GOES HERE
class_counts = application_df["CLASSIFICATION"].value_counts()
class_counts
# + id="HYbcUhWOINpK" colab={"base_uri": "https://localhost:8080/"} outputId="099b1f8e-1480-4f7c-a550-8375f9984ae2"
# You may find it helpful to look at CLASSIFICATION value counts >1
# YOUR CODE GOES HERE
class_counts[class_counts>1]
# + id="pH9rllAiINpL" colab={"base_uri": "https://localhost:8080/"} outputId="72398bed-3af4-4a15-8873-ecdf9053106b"
# Choose a cutoff value and create a list of classifications to be replaced
# use the variable name `classifications_to_replace`
# YOUR CODE GOES HERE
classification_to_replace = list (class_counts[class_counts>1])
# Replace in dataframe
for cls in classification_to_replace:
application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,"Other")
# Check to make sure binning was successful
application_df['CLASSIFICATION'].value_counts()
# + id="ExhdwgTPINpM" colab={"base_uri": "https://localhost:8080/", "height": 461} outputId="cf078edd-f956-4d29-8e80-a9c4e52f7424"
# Convert categorical data to numeric with `pd.get_dummies`
# YOUR CODE GOES HERE
application_with_dummies_df = pd.get_dummies(application_df)
application_with_dummies_df
# + id="4-ybsB_iINpM"
# Split our preprocessed data into our features and target arrays
# YOUR CODE GOES HERE
X = application_with_dummies_df.drop(["IS_SUCCESSFUL"], axis="columns").values
y = application_with_dummies_df["IS_SUCCESSFUL"].values
# Split the preprocessed data into a training and testing dataset
# YOUR CODE GOES HERE
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# + id="ilv5cfuOINpM"
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# + [markdown] id="NeSkPIw6INpN"
# ## Compile, Train and Evaluate the Model
# + id="qHzFka3ZOiFG" colab={"base_uri": "https://localhost:8080/"} outputId="7e0b565a-6719-418a-cbf0-53e427f2b386"
number_input_features = len(X_train[0])
number_input_features
# + id="WKFws-qrINpN" colab={"base_uri": "https://localhost:8080/"} outputId="fe99377e-14f6-4b10-b90a-8a836d6ac515"
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
# YOUR CODE GOES HERE
number_input_features = len(X_train[0])
#number_input_features
hidden_nodes_layer1 = 80
hidden_nodes_layer2 = 30
nn = tf.keras.models.Sequential()
# First hidden layer
# YOUR CODE GOES HERE
nn.add(
tf.keras.layers.Dense(units= hidden_nodes_layer1, input_dim=number_input_features, activation="relu")
)
# Second hidden layer
# YOUR CODE GOES HERE
nn.add(
tf.keras.layers.Dense(units=hidden_nodes_layer2, activation="relu")
)
# Output layer
# YOUR CODE GOES HERE
nn.add(
tf.keras.layers.Dense(units=1, activation="sigmoid")
)
# Check the structure of the model
nn.summary()
# + id="wZFwJdRmINpO"
# Compile the model
# YOUR CODE GOES HERE
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics= ["accuracy"])
# + id="sIJhu0ieINpO" colab={"base_uri": "https://localhost:8080/"} outputId="bcb9cd36-8ead-4c13-c963-31c8ceff8372"
# Train the model
# YOUR CODE GOES HERE
fit_model = nn.fit(X_train_scaled, y_train, epochs=100)
# + id="fqfVJNDGINpP" colab={"base_uri": "https://localhost:8080/"} outputId="50624b8a-0a25-4dd8-9d2d-37d31c0cf912"
# Evaluate the model using the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# + id="rvvURFUfINpP"
# Export our model to HDF5 file
# YOUR CODE GOES HERE
nn.save("AlphabetSoupCharity.h5")
|
Deep_Learning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + id="gaJfbb9U3qru"
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} id="H99qNfLm4Ziz" outputId="ec7ae4a9-9863-40ea-b8a3-8d1ce532288f"
(X_train, y_train), (X_test,y_test) = datasets.cifar10.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="DTYL6CHP5CHS" outputId="d6396a64-0740-4f37-caaa-52c70f01ee83"
X_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="JdXa65Ur5Y5r" outputId="994e926c-f194-471c-8b55-7e6d93f6b81c"
X_test.shape
# + [markdown] id="VZBtyIHAG3TS"
# y_train is a 2D array, for our classification having 1D array is good enough. so we will convert this to now 1D array
# + id="LIOU-U6z7udR"
y_train = y_train.reshape(-1,)
# + colab={"base_uri": "https://localhost:8080/"} id="miYFM_3W8hs7" outputId="ebed46ab-3dea-4feb-d3ec-2ee31749f8f6"
y_train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 177} id="t4JVh2VX5lI4" outputId="7f496e67-2aad-47fd-a290-032544b55bcf"
plt.figure(figsize = (15,2))
plt.imshow(X_train[56])
# + colab={"base_uri": "https://localhost:8080/"} id="vFkSMOm75rl_" outputId="aade52f7-a2b3-41e8-a0f6-3374278fc8df"
y_train[56]
# + id="UW60o9yy7keJ"
classes = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"]
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="ZVmJY79O7-RH" outputId="5a1eb9a3-a6cf-4aec-d031-c052e47fb430"
classes[y_train[56]]
# + [markdown] id="UWKiZKzo9BRQ"
# Normalize the images to a number from 0 to 1. Image has 3 channels (R,G,B) and each value in the channel can range from 0 to 255. Hence to normalize in 0-->1 range, we need to divide it by 255
#
#
# + [markdown] id="n2k5IsZl9ONW"
# ##Normalize the data
# + id="3oorSFBT8DWz"
X_train = X_train / 255.0
X_test = X_test / 255.0
# + colab={"base_uri": "https://localhost:8080/"} id="16EJXK4g9Itz" outputId="44464abf-2464-4715-e142-eb5d28ff3b29"
X_train
# + [markdown] id="BGc9JWGb92pv"
# ## Build simple artificial neural network for image classification
#
# + id="JLHbZCuA9nzo"
ann=models.Sequential([
layers.Flatten(input_shape=(32,32,3)),
layers.Dense(3000,activation='relu'),
layers.Dense(1000,activation='relu'),
layers.Dense(10,activation='softmax')
])
# + colab={"base_uri": "https://localhost:8080/"} id="XeuHM5ei-1lX" outputId="4f21f56b-cad3-4a43-92da-71f6e001eae7"
ann.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
ann.fit(X_train,y_train,epochs=10)
# + colab={"base_uri": "https://localhost:8080/"} id="IvoSMlLoEX6L" outputId="77312a1d-d16f-4023-d9cf-ea0698cf1212"
ann.summary()
# + [markdown] id="KN9SCPN4F-k8"
# ## By default the batch size is 32
# + colab={"base_uri": "https://localhost:8080/"} id="2zDIteb2FLjg" outputId="6a076452-547a-484b-b785-ebf42b308055"
50000/32
# + [markdown] id="7xIMn30mGONO"
# ## Testing
# + colab={"base_uri": "https://localhost:8080/"} id="B6DXV6lQFm3i" outputId="31522f76-8f75-4118-9ecd-aa4beaf98508"
ann.evaluate(X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="OLGPfnVJGKUG" outputId="05cf73d0-5380-4322-e9f9-61c9ba3aab0e"
y_pred = ann.predict(X_test)
y_pred
# + colab={"base_uri": "https://localhost:8080/"} id="oMrwvriLGjN5" outputId="802813af-7017-4522-9f36-f2fd26cd289c"
np.argmax(y_pred[6])
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="xkdHOAcQH_WW" outputId="7b37badc-cddf-46b5-85bb-17a71a7a77b0"
classes[np.argmax(y_pred[6])]
# + id="tM_cgS9FHMPY"
y_test = y_test.reshape(-1,)
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="qMwdfvLXHRUa" outputId="e86c132c-b5b2-44ed-92ae-db7c00be941e"
classes[y_test[6]]
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="nhHEFdm0Hid1" outputId="2d041d98-f439-45cb-a566-6c1d26c1360f"
plt.imshow(X_test[6])
# + [markdown] id="TBbrIResIgGy"
# ## Now lets build the CNN model on the same training data sets
# + id="1i2MSV5iHsvi"
cnn = models.Sequential([
layers.Conv2D(filters=32,kernel_size=(3,3),activation='relu',input_shape=(32,32,3)),
layers.MaxPool2D((2,2)),
layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu'),
layers.MaxPool2D((2,2)),
layers.Conv2D(filters=128,kernel_size=(3,3),activation='relu'),
layers.MaxPool2D((2,2)),
layers.Flatten(),
layers.Dense(64,activation='relu'),
layers.Dense(10,activation='softmax')
])
# + id="VC_4YjP6KMLT"
cnn.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="Iw5AoPluKiUz" outputId="ccb851e1-2128-4736-b552-51ead70f85e4"
cnn.fit(X_train,y_train,epochs=10)
# + colab={"base_uri": "https://localhost:8080/"} id="Ern-x2rZLNRI" outputId="267723f9-b101-431d-d6c0-93ca533fcc61"
cnn.evaluate(X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="AnbeJdVELSwr" outputId="5d27cc44-c291-485b-b48c-1539313b44ac"
y_pred = cnn.predict(X_test)
y_pred[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="JwlH4_r8MpfK" outputId="816fb85e-bd6f-4400-822c-fcb3837759d8"
y_classes = [np.argmax(element) for element in y_pred]
y_classes[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="VupDdIaUMxci" outputId="9095800e-0c2d-48ff-c301-ab52b4905be9"
y_test[:5]
# + id="It_QNHqKM2Y-"
|
CNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DAY 2-1ST PYTHON ASSIGNMENT
# +
#Question 1
#Try 5 Different functions of the String in Python.
#=>
name="<NAME>"
print(name.capitalize())
print(name.encode())
print(name.islower())
print(name.isprintable())
print(name.istitle())
print(name.split())
print(name.title())
# -
#Question 2
#Try 5 Different functions of the List object in Python
#=>
list1=['Rishu',50,63.2,True,78,90.6]
list1.append('Rajan')
print(list1)
list1.pop(2)
print(list1)
print(list1.copy())
list1.reverse()
print(list1)
list1.remove(78)
print(list1)
#Question 3
#Experiment with at least 5 default functions of Dictionary
#=>
dict={'Name':'<NAME>','Age':21,'Salary':100000,'DOB':6-3-1999}
print(dict.get('Name'))
print(dict.keys())
print(dict.copy())
dict.update({'Profession':'Coder'})
print(dict)
print(dict.items())
dict.popitem()
print(dict)
|
DAY2/Assignment Day 2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import libraries
import folium
import pandas as pd
# Make a data frame with dots to show on the map
data1 = pd.DataFrame({'lon':[6.23228, 6.23694, 6.214468244],
'lat':[-75.59199, -75.56171, -75.58074717],
'name':['muerte1', 'muerte2', 'muerte3']})
data1
# Make an empty map
n = folium.Map(location=[6.261834,-75.6190663], tiles="OpenStreetMap", zoom_start=10)
# I can add marker one by one on the map
for i in range(len(data1)):
folium.Marker([data1.iloc[i]['lon'], data1.iloc[i]['lat']], popup=data1.iloc[i]['name']).add_to(n)
# Save it as html
n.save('MUERTES.html')
# -
[data1.iloc[i]['lon'], data1.iloc[i]['lat']]
data1
data1.dtypes
data.dtypes
# +
# import libraries
import folium
import pandas as pd
# Make a data frame with dots to show on the map
data = pd.DataFrame({
'lat':[-58, 2, 145, 30.32, -4.03, -73.57, 36.82, -38.5],
'lon':[-34, 49, -38, 59.93, 5.33, 45.52, -1.29, -12.97],
'name':['Buenos Aires', 'Paris', 'melbourne', 'St Petersbourg', 'Abidjan', 'Montreal', 'Nairobi', 'Salvador']
})
data
# Make an empty map
m = folium.Map(location=[20, 0], tiles="OpenStreetMap", zoom_start=10)
# I can add marker one by one on the map
for i in range(0,len(data)):
folium.Marker([data.iloc[i]['lon'], data.iloc[i]['lat']], popup=data.iloc[i]['name']).add_to(m)
# Save it as html
m.save('312_markers_on_folium_map1.html')
# -
|
TRABAJO ANALITICA/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append('../..')
# Options for pandas
pd.options.display.max_columns = 20
pd.options.display.max_rows = 10
# Display all cell outputs
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode(connected=True)
import cufflinks
cf.go_offline(connected=True)
cf.set_config_file(theme='pearl')
# -
df = pd.DataFrame(np.random.randn(1000, 100))
df.shape
corrs = df.corr()
corrs.shape
# +
threshold = -0.1
direction = 'less'
if direction == 'greater':
values_index = np.where(corrs > threshold)
elif direction == 'less':
values_index = np.where(corrs < threshold)
values_index
# +
rows_index = values_index[0]
columns_index = values_index[1]
pairs = list(map(tuple, set([frozenset((x, y)) for x, y in zip(rows_index, columns_index)])))
from collections import Counter
# Counter(pairs)
# -
pairs
subset_df = pd.DataFrame(dict(value=corrs.values[values_index], var1=corrs.index[values_index[0]],
var2=corrs.columns[values_index[1]]))
subset_df.iloc[:int(len(subset_df)/2)]
subset_df.groupby(['var1', 'var2']).size()
subset_df.columns.str.replace('var1', 'variable1')
# +
values = []; indices = []; columns = []
for pair in pairs:
indices.append(corrs.index[pair[0]])
columns.append(corrs.columns[pair[1]])
values.append(corrs.values[pair])
subset_df = pd.DataFrame(dict(value=values, var1=indices, var2=columns))
# -
subset_df
corrs.loc[3, 54]
corrs.loc[54, 3]
corrs.loc[96, 8]
corrs.loc[44, 45]
corrs.values[(3, 54)]
[tuple(x) for x in set(map(frozenset, pairs))]
|
testing-exercises.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ploting different new features
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from ta import *
# -
# Load data
df = pd.read_csv('../data/datas.csv', sep=',')
df = utils.dropna(df)
df.shape
df.head()
# Add all ta features filling nans values
df = add_all_ta_features(df, "Open", "High", "Low", "Close", "Volume_BTC", fillna=True)
df.isnull().sum()
df.shape
# # Ploting some volatility features
# ### Bollinger Bands
plt.plot(df[40700:41000].Close)
plt.plot(df[40700:41000].volatility2, label='High BB')
plt.plot(df[40700:41000].volatility3, label='Low BB')
plt.plot(df[40700:41000].volatility4, label='EMA BB')
plt.title('Bollinger Bands')
plt.legend()
plt.show()
# ### Keltner Channel
plt.plot(df[40500: 41000].Close)
plt.plot(df[40500: 41000].volatility7, label='Central KC')
plt.plot(df[40500: 41000].volatility8, label='High KC')
plt.plot(df[40500: 41000].volatility9, label='Low KC')
plt.title('Keltner Channel')
plt.legend()
plt.show()
# ### Donchian Channel
plt.plot(df[40500: 41000].Close)
plt.plot(df[40500: 41000].volatility12, label='High DC')
plt.plot(df[40500: 41000].volatility13, label='Low DC')
plt.title('Donchian Channel')
plt.legend()
plt.show()
# # Ploting trend features
# ### MACD
plt.plot(df[40500:41000].trend1, label='MACD')
plt.plot(df[40500:41000].trend2, label='MACD Signal')
plt.plot(df[40500:41000].trend3, label='MACD Difference')
plt.title('MACD, MACD Signal and MACD Difference')
plt.legend()
plt.show()
# ### Ichimoku Kinkō Hyō
plt.plot(df[40500:41000].Close)
plt.plot(df[40500:41000].trend18, label='Ichimoku a')
plt.plot(df[40500:41000].trend19, label='Ichimoku b')
plt.title('Ichimoku Kinkō Hyō')
plt.legend()
plt.show()
# # Ploting all features
for col in df.columns:
plt.plot(df[col])
plt.title(col)
plt.show()
|
dev/test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # N-Year Thresholds
# The N-Year thresholds, which are defined by Bulletin 70, describe how much precipitation must fall in a given time frame to be classified as an n-year storm. In this notebook, I take a look at the thresholds themselves
# +
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
from datetime import datetime, timedelta
import operator
import matplotlib.pyplot as plt
from collections import namedtuple
# %matplotlib inline
# -
# N-Year Storm variables
# These define the thresholds laid out by bulletin 70, and transfer mins and days to hours
n_year_threshes = pd.read_csv('../../n-year/notebooks/data/n_year_definitions.csv')
n_year_threshes = n_year_threshes.set_index('Duration')
dur_str_to_hours = {
'5-min':0.0833,
'10-min':0.1667,
'15-min':15/60.0,
'30-min':0.5,
'1-hr':1.0,
'2-hr':2.0,
'3-hr':3.0,
'6-hr':6.0,
'12-hr':12.0,
'18-hr':18.0,
'24-hr':24.0,
'48-hr':48.0,
'72-hr':72.0,
'5-day':5*24.0,
'10-day':10*24.0
}
n_s = [int(x.replace('-year','')) for x in reversed(list(n_year_threshes.columns.values))]
duration_strs = sorted(dur_str_to_hours.items(), key=operator.itemgetter(1), reverse=False)
n_year_threshes = n_year_threshes.iloc[::-1]
n_year_threshes
n_year_threshes.transpose().loc[['1-year', '10-year', '100-year']].plot(kind='bar', title='Rainfall durations for n-year storms')
n_year_threshes[['1-year', '10-year', '100-year']].plot(kind='bar', title='How much rainfaill it takes for n-years given duration')
# Convert durations to hours, so that they make more sense on an axis
n_year_threshes['duration'] = n_year_threshes.index.values
def find_duration_hours(duration_str):
return dur_str_to_hours[duration_str]
n_year_threshes['hours_duration'] = n_year_threshes['duration'].apply(find_duration_hours)
n_year_threshes = n_year_threshes.drop('duration', 1)
n_year_threshes = n_year_threshes.set_index('hours_duration')
n_year_threshes.head()
n_year_threshes.plot(kind='line', title='Duration vs Inches for the N-Year Storm')
# ### We know that 2 inches of rainfall in 10 days ain't bad, but in 1 hour is. Let's take a look at that relationship, so that we can ask the question -- if we get n inches of rainfall, let's see how fast it must fall to be considered an n-year storm
# This method takes in a number of inches, and plots various durations and how they're classified as n-year storms
# as a bar chart
def inches_to_storm(inches):
ret_val = []
thresholds = n_year_threshes.transpose()
for storm in list(thresholds.index.values):
the_storm = thresholds.loc[storm]
storms_higher = the_storm.loc[the_storm > inches]
if len(storms_higher) == 0:
continue
upper_hours = the_storm.loc[the_storm >= inches].index[0]
upper_inches = the_storm.loc[the_storm >= inches].iloc[0]
try:
lower_hours = the_storm.loc[the_storm < inches].iloc[::-1].index[0]
lower_inches = the_storm.loc[the_storm < inches].iloc[::-1].iloc[0]
except:
lower_hours = 0
lower_inches = 0
percent_across = (inches-lower_inches) / (upper_inches - lower_inches)
duration = lower_hours + ((upper_hours - lower_hours) * percent_across)
ret_val.append({'storm': storm, 'hours': duration})
ret_val = pd.DataFrame(ret_val)
ret_val = ret_val.set_index('storm')
ret_val.plot(kind='bar', title='%s Inches over Duration to Classify the N-Year Storms' % str(inches))
inches_to_storm(3.2)
|
n-year/notebooks/N-Year Thresholds.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# # Random Signals and LTI-Systems
#
# *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).*
# -
# ## Auto-Correlation Function
#
# The auto-correlation function (ACF) $\varphi_{yy}[\kappa]$ of the output signal of an LTI system $y[k] = \mathcal{H} \{ x[k] \}$ is derived. It is assumed that the input signal is a wide-sense stationary (WSS) real-valued random process and that the LTI system has a real-valued impulse repsonse $h[k] \in \mathbb{R}$.
#
# Introducing the output relation $y[k] = h[k] * x[k]$ of an LTI system into the definition of the ACF and rearranging terms yields
#
# \begin{equation}
# \begin{split}
# \varphi_{yy}[\kappa] &= E \{ y[k+\kappa] \cdot y[k] \} \\
# &= E \left\{ \sum_{\mu = -\infty}^{\infty} h[\mu] \; x[k+\kappa-\mu] \cdot
# \sum_{\nu = -\infty}^{\infty} h[\nu] \; x[k-\nu] \right\} \\
# &= \underbrace{h[\kappa] * h[-\kappa]}_{\varphi_{hh}[\kappa]} * \varphi_{xx}[\kappa]
# \end{split}
# \end{equation}
#
# where the ACF $\varphi_{hh}[\kappa]$ of the deterministic impulse response $h[k]$ is commonly termed as *filter ACF*. This is related to the [link between ACF and convolution](../random_signals/correlation_functions.ipynb#Definition). The relation above is known as the *Wiener-Lee theorem*. It states that the ACF of the output $\varphi_{yy}[\kappa]$ of an LTI system is given by the convolution of the input signal's ACF $\varphi_{xx}[\kappa]$ with the filter ACF $\varphi_{hh}[\kappa]$. For a system which just attenuates the input signal $y[k] = A \cdot x[k]$ with $A \in \mathbb{R}$, the ACF at the output is given as $\varphi_{yy}[\kappa] = A^2 \cdot \varphi_{xx}[\kappa]$.
# ### Example - System Response to White Noise
#
# Let's assume that the wide-sense ergodic input signal $x[k]$ of an LTI system with impulse response $h[k] = \text{rect}_N[k]$ is normal distributed white noise. Introducing $\varphi_{xx}[\kappa] = N_0\, \delta[\kappa]$ and $h[k]$ into the Wiener-Lee theorem yields
#
# \begin{equation}
# \varphi_{yy}[\kappa] = N_0 \cdot \varphi_{hh}[\kappa] = N_0 \cdot (\text{rect}_N[\kappa] * \text{rect}_N[-\kappa])
# \end{equation}
#
# The example is evaluated numerically for $N_0 = 1$ and $N=5$
# +
import numpy as np
import matplotlib.pyplot as plt
L = 10000 # number of samples
K = 30 # limit for lags in ACF
# generate input signal (white Gaussian noise)
np.random.seed(2)
x = np.random.normal(size=L)
# compute system response
y = np.convolve(x, [1, 1, 1, 1, 1], mode='full')
# compute and truncate ACF
acf = 1/len(y) * np.correlate(y, y, mode='full')
acf = acf[len(y)-K-1:len(y)+K-1]
kappa = np.arange(-K, K)
# plot ACF
plt.figure(figsize=(10, 6))
plt.stem(kappa, acf, use_line_collection=True)
plt.title('Estimated ACF of output signal $y[k]$')
plt.ylabel(r'$\hat{\varphi}_{yy}[\kappa]$')
plt.xlabel(r'$\kappa$')
plt.axis([-K, K, 1.2*min(acf), 1.1*max(acf)])
plt.grid()
# -
# **Exercise**
#
# * Derive the theoretic result for $\varphi_{yy}[\kappa]$ by calculating the filter-ACF $\varphi_{hh}[\kappa]$.
# * Why is the estimated ACF $\hat{\varphi}_{yy}[\kappa]$ of the output signal not exactly equal to its theoretic result $\varphi_{yy}[\kappa]$?
# * Change the number of samples `L` and rerun the example. What changes?
#
# Solution: The filter-ACF is given by $\varphi_{hh}[\kappa] = \text{rect}_N[\kappa] * \text{rect}_N[-\kappa]$. The convolution of two rectangular signals $\text{rect}_N[\kappa]$ results in a triangular signal. Taking the time reversal into account yields
#
# \begin{equation}
# \varphi_{hh}[\kappa] = \begin{cases}
# N - |\kappa| & \text{for } -N < \kappa \leq N \\
# 0 & \text{otherwise}
# \end{cases}
# \end{equation}
#
# for even $N$. The estimated ACF $\hat{\varphi}_{yy}[\kappa]$ differs from its theoretic value due to the statistical uncertainties when using random signals of finite length. Increasing its length `L` lowers the statistical uncertainties.
# ## Cross-Correlation Function
#
# The cross-correlation functions (CCFs) $\varphi_{xy}[\kappa]$ and $\varphi_{yx}[\kappa]$ between the in- and output signal of an LTI system $y[k] = \mathcal{H} \{ x[k] \}$ are derived. As for the ACF it is assumed that the input signal originates from a wide-sense stationary real-valued random process and that the LTI system's impulse response is real-valued, i.e. $h[k] \in \mathbb{R}$.
#
# Introducing the convolution into the definition of the CCF and rearranging the terms yields
#
# \begin{equation}
# \begin{split}
# \varphi_{xy}[\kappa] &= E \{ x[k+\kappa] \cdot y[k] \} \\
# &= E \left\{ x[k+\kappa] \cdot \sum_{\mu = -\infty}^{\infty} h[\mu] \; x[k-\mu] \right\} \\
# &= \sum_{\mu = -\infty}^{\infty} h[\mu] \cdot E \{ x[k+\kappa] \cdot x[k-\mu] \} \\
# &= h[-\kappa] * \varphi_{xx}[\kappa]
# \end{split}
# \end{equation}
#
# The CCF $\varphi_{xy}[\kappa]$ between in- and output is given as the time-reversed impulse response of the system convolved with the ACF of the input signal.
#
# The CCF between out- and input is yielded by taking the symmetry relations of the CCF and ACF into account
#
# \begin{equation}
# \varphi_{yx}[\kappa] = \varphi_{xy}[-\kappa] = h[\kappa] * \varphi_{xx}[\kappa]
# \end{equation}
#
# The CCF $\varphi_{yx}[\kappa]$ between out- and input is given as the impulse response of the system convolved with the ACF of the input signal.
#
# For a system which just attenuates the input signal $y[k] = A \cdot x[k]$, the CCFs between input and output are given as $\varphi_{xy}[\kappa] = A \cdot \varphi_{xx}[\kappa]$ and $\varphi_{yx}[\kappa] = A \cdot \varphi_{xx}[\kappa]$.
# ## System Identification by Cross-Correlation
#
# The process of determining the impulse response or transfer function of a system is referred to as *system identification*. The CCFs of an LTI system play an important role in the estimation of the impulse response $h[k]$ of an unknown system. This is illustrated in the following.
#
# The basic idea is to use a specific measurement signal as input signal to the system. Let's assume that the unknown LTI system is excited by [white noise](../random_signals/white_noise.ipynb). The ACF of the wide-sense stationary input signal $x[k]$ is then given as $\varphi_{xx}[\kappa] = N_0 \cdot \delta[\kappa]$. According to the relation derived above, the CCF between out- and input for this special choice of the input signal becomes
#
# \begin{equation}
# \varphi_{yx}[\kappa] = h[\kappa] * N_0 \cdot \delta[\kappa] = N_0 \cdot h[\kappa]
# \end{equation}
#
# For white noise as input signal $x[k]$, the impulse response of an LTI system can be estimated by estimating the CCF between its out- and input signals. Using noise as measurement signal instead of a Dirac impulse is beneficial since its [crest factor](https://en.wikipedia.org/wiki/Crest_factor) is limited.
# ### Example
#
# The application of the CCF to the identification of a system is demonstrated. The system is excited by wide-sense ergodic normal distributed white noise with $N_0 = 1$. The ACF of the in- and output, as well as the CCF between out- and input is estimated and plotted.
# +
import scipy.signal as sig
N = 10000 # number of samples for input signal
K = 50 # limit for lags in ACF
# generate input signal
# normally distributed (zero-mean, unit-variance) white noise
np.random.seed(5)
x = np.random.normal(size=N)
# impulse response of the system
h = np.concatenate((np.zeros(10), sig.triang(10), np.zeros(10)))
# output signal by convolution
y = np.convolve(h, x, mode='full')
# compute correlation functions
acfx = 1/len(x) * np.correlate(x, x, mode='full')
acfy = 1/len(y) * np.correlate(y, y, mode='full')
ccfyx = 1/len(y) * np.correlate(y, x, mode='full')
def plot_correlation_function(cf):
'''Plot correlation function.'''
cf = cf[N-K-1:N+K-1]
kappa = np.arange(-len(cf)//2, len(cf)//2)
plt.stem(kappa, cf, use_line_collection=True)
plt.xlabel(r'$\kappa$')
plt.axis([-K, K, -0.2, 1.1*max(cf)])
# plot ACFs and CCF
plt.rc('figure', figsize=(10, 3))
plt.figure()
plot_correlation_function(acfx)
plt.title('Estimated ACF of input signal')
plt.ylabel(r'$\hat{\varphi}_{xx}[\kappa]$')
plt.figure()
plot_correlation_function(acfy)
plt.title('Estimated ACF of output signal')
plt.ylabel(r'$\hat{\varphi}_{yy}[\kappa]$')
plt.figure()
plot_correlation_function(ccfyx)
plt.plot(np.arange(len(h)), h, 'g-')
plt.title('Estimated and true impulse response')
plt.ylabel(r'$\hat{h}[k]$, $h[k]$');
# -
# **Exercise**
#
# * Why is the estimated CCF $\hat{\varphi}_{yx}[k]$ not exactly equal to the true impulse response $h[k]$ of the system?
# * What changes if you change the number of samples `N` of the input signal?
#
# Solution: The derived relations for system identification hold for the case of a wide-sense ergodic input signal of infinite duration. Since we can only numerically simulate signals of finite duration, the observed deviations are a result of the resulting statistical uncertainties. Increasing the length `N` of the input signal improves the estimate of the impulse response.
# + [markdown] nbsphinx="hidden"
# **Copyright**
#
# This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *<NAME>, Digital Signal Processing - Lecture notes featuring computational examples*.
|
random_signals_LTI_systems/correlation_functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="4i71CtFk_RiQ" outputId="8b26e8e7-9edb-4137-a6ba-f30c48c0cb51"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
count=CountVectorizer()
data=pd.read_csv("Train.csv")
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 303} id="YwBn18cmAe7p" outputId="2882b702-4b31-4a5b-ac9c-50c01981464f"
fig=plt.figure(figsize=(5,5))
colors=["skyblue",'pink']
pos=data[data['label']==1]
neg=data[data['label']==0]
ck=[pos['label'].count(),neg['label'].count()]
legpie=plt.pie(ck,labels=["Positive","Negative"],
autopct ='%1.1f%%',
shadow = True,
colors = colors,
startangle = 45,
explode=(0, 0.1))
# + id="kJck4fj7Aid8"
import re
def preprocessor(text):
text=re.sub('<[^>]*>','',text)
emojis=re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)',text)
text=re.sub('[\W]+',' ',text.lower()) +\
' '.join(emojis).replace('-','')
return text
data['text']=data['text'].apply(preprocessor)
# + id="fep5_GYsAmun"
from nltk.stem.porter import PorterStemmer
porter=PorterStemmer()
def tokenizer(text):
return text.split()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
# + colab={"base_uri": "https://localhost:8080/", "height": 872} id="-NsOtOfOAq0b" outputId="82474ad3-498e-4cf8-84f3-f26adcd33ea4"
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop=stopwords.words('english')
from wordcloud import WordCloud
positivedata = data[ data['label'] == 1]
positivedata =positivedata['text']
negdata = data[data['label'] == 0]
negdata= negdata['text']
def wordcloud_draw(data, color = 'white'):
words = ' '.join(data)
cleaned_word = " ".join([word for word in words.split()
if(word!='movie' and word!='film')
])
wordcloud = WordCloud(stopwords=stop,
background_color=color,
width=2500,
height=2000
).generate(cleaned_word)
plt.figure(1,figsize=(10, 7))
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
print("Positive words are as follows")
wordcloud_draw(positivedata,'white')
print("Negative words are as follows")
wordcloud_draw(negdata)
# + id="yXmR1QqUAvd0"
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf=TfidfVectorizer(strip_accents=None,lowercase=False,preprocessor=None,tokenizer=tokenizer_porter,use_idf=True,norm='l2',smooth_idf=True)
y=data.label.values
x=tfidf.fit_transform(data.text)
# + id="3i25deGBAylC"
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(x,y,random_state=1,test_size=0.5,shuffle=False)
# + colab={"base_uri": "https://localhost:8080/"} id="6Ke7AjCSA1Ka" outputId="62355d1e-c8b4-4f74-fb1b-788b911144ff"
from sklearn.linear_model import LogisticRegressionCV
clf=LogisticRegressionCV(cv=6,scoring='accuracy',random_state=0,n_jobs=-1,verbose=3,max_iter=500).fit(X_train,y_train)
y_pred = clf.predict(X_test)
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
|
Day-28/Sentiment_Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pprint
import tempfile
import urllib
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
tf.get_logger().propagate = False
pp = pprint.PrettyPrinter()
import tfx
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.base import executor_spec
from tfx.components.trainer.executor import GenericExecutor
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.utils.dsl_utils import external_input
import ssl
# %load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip
# -
# %%skip_for_export
# !jupyter nbextension enable --py tensorflow_model_analysis --system
# +
#setup paths
# This is the root directory for your TFX pip package installation.
#_tfx_root = tfx.__path__[0]
# This is the directory containing the TFX Chicago Taxi Pipeline example.
#_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')
# This is the path where your model will be pushed for serving.
#_serving_model_dir = os.path.join(
# tempfile.mkdtemp(), 'serving_model/taxi_simple')
cwd = os.getcwd()
if not os.path.exists('serving_model/iris_simple'):
os.makedirs('serving_model/iris_simple')
_serving_model_dir =os.path.join(cwd,'serving_model/iris_simple')
# Set up logging.
absl.logging.set_verbosity(absl.logging.INFO)
# +
ssl._create_default_https_context = ssl._create_unverified_context
#_data_root = tempfile.mkdtemp(prefix='tfx-data')
if not os.path.exists(cwd+'/data/iris-data'):
os.makedirs(cwd+'/data/iris-data')
#if not os.path.exists(cwd+'dag_out'):
# os.makedirs(cwd+'/dag_out')
#_dag_output = os.path.join(cwd,'dag_out')
_data_root = os.path.join(cwd,'data/iris-data')
# -
#_data_root = tempfile.mkdtemp(prefix='tfx-data')
#DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'
#urllib.request.urlretrieve(DATA_PATH, _data_filepath)
_data_filepath = os.path.join(_data_root, "iris.csv")
# +
# %%skip_for_export
# !head {_data_filepath}
# -
# Create the InteractiveContext
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext. Calls to InteractiveContext are no-ops outside of the
# notebook.
context = InteractiveContext(pipeline_root="./tfx_<EMAIL>")
#context = InteractiveContext()
# Run TFX components interactively
example_gen = CsvExampleGen(input=external_input(_data_root))
context.run(example_gen)
# %%skip_for_export
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
# +
# %%skip_for_export
# Get the URI of the output artifact representing the training examples, which is a directory
train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
# -
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
# +
# %%skip_for_export
context.show(statistics_gen.outputs['statistics'])
# -
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
context.run(schema_gen)
# +
# %%skip_for_export
context.show(schema_gen.outputs['schema'])
# -
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
# +
# %%skip_for_export
context.show(example_validator.outputs['anomalies'])
# -
# # Transform
_iris_transform_module_file = 'iris_transform.py'
# +
# %%skip_for_export
# %%writefile {_iris_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
def transformed_name(key):
return key + '_xf'
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
DENSE_FLOAT_FEATURE_KEYS = ['sepal_length','sepal_width','petal_length','petal_width'
]
# Keys
LABEL_KEY = 'variety'
_DENSE_FLOAT_FEATURE_KEYS = DENSE_FLOAT_FEATURE_KEYS
_transformed_name = transformed_name
_LABEL_KEY = LABEL_KEY
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.scale_to_z_score(_fill_in_missing(inputs[key]))
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = _fill_in_missing(inputs[_LABEL_KEY])
return outputs
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
# -
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_iris_transform_module_file))
context.run(transform)
# +
# %%skip_for_export
transform.outputs
# +
# %%skip_for_export
# Get the URI of the output artifact representing the transformed examples, which is a directory
train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
# -
_iris_trainer_module_file = 'iris_trainer_k.py'
# +
# %%skip_for_export
# %%writefile {_iris_trainer_module_file}
from typing import List, Text
import os
import absl
import datetime
import tensorflow as tf
import tensorflow_transform as tft
import kerastuner
from tensorflow import keras
from tfx.components.trainer.executor import TrainerFnArgs
def transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [_transformed_name(key) for key in keys]
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
DENSE_FLOAT_FEATURE_KEYS = ['sepal_length','sepal_width','petal_length','petal_width'
]
# Keys
LABEL_KEY = 'variety'
_DENSE_FLOAT_FEATURE_KEYS = DENSE_FLOAT_FEATURE_KEYS
_transformed_name = transformed_name
_LABEL_KEY = LABEL_KEY
# Iris dataset has 150 records, and is divided to train and eval splits in 2:1
# ratio.
_TRAIN_DATA_SIZE = 100
_EVAL_DATA_SIZE = 50
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example and applies TFT."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
label_key=_transformed_name(_LABEL_KEY))
return dataset
def _get_hyperparameters() -> kerastuner.HyperParameters:
"""Returns hyperparameters for building Keras model."""
hp = kerastuner.HyperParameters()
# Defines search space.
hp.Choice('learning_rate', [1e-2, 1e-3], default=1e-2)
hp.Int('num_layers', 1, 3, default=2)
return hp
def _build_keras_model(hparams: kerastuner.HyperParameters) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying iris data.
Args:
hparams: Holds HyperParameters for tuning.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [
keras.layers.Input(shape=(1,), name=_transformed_name(f))
for f in _DENSE_FLOAT_FEATURE_KEYS
]
d = keras.layers.concatenate(inputs)
for _ in range(int(hparams.get('num_layers'))):
d = keras.layers.Dense(8, activation='relu')(d)
outputs = keras.layers.Dense(3, activation='softmax')(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(hparams.get('learning_rate')),
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Trainer will call this function.
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, tf_transform_output,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output,
batch_size=_EVAL_BATCH_SIZE)
if fn_args.hyperparameters:
hparams = kerastuner.HyperParameters.from_config(fn_args.hyperparameters)
else:
# This is a shown case when hyperparameters is decided and Tuner is removed
# from the pipeline. User can also inline the hyperparameters directly in
# _build_keras_model.
hparams = _get_hyperparameters()
absl.logging.info('HyperParameters for training: %s' % hparams.get_config())
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(hparams)
steps_per_epoch = _TRAIN_DATA_SIZE / _TRAIN_BATCH_SIZE
try:
log_dir = fn_args.model_run_dir
except KeyError:
# TODO(b/158106209): use ModelRun instead of Model artifact for logging.
log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, update_freq='batch')
model.fit(
train_dataset,
epochs=int(fn_args.train_steps / steps_per_epoch),
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
# -
trainer = Trainer(
module_file=os.path.abspath(_iris_trainer_module_file),
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=500),
eval_args=trainer_pb2.EvalArgs(num_steps=250))
context.run(trainer)
# %%skip_for_export
model_artifact_dir = trainer.outputs['model'].get()[0].uri
pp.pprint(os.listdir(model_artifact_dir))
model_dir = os.path.join(model_artifact_dir, 'serving_model_dir')
pp.pprint(os.listdir(model_dir))
# +
# %%skip_for_export
log_dir = os.path.join(model_artifact_dir, 'logs')
# %load_ext tensorboard
# %tensorboard --logdir {log_dir}
# %reload_ext tensorboard
# -
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
import tensorflow_transform as tft
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='variety')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
#baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
context.run(evaluator)
# +
# %%skip_for_export
evaluator.outputs
# +
# %%skip_for_export
context.show(evaluator.outputs['evaluation'])
# +
# %%skip_for_export
blessing_uri = evaluator.outputs.blessing.get()[0].uri
# !ls -l {blessing_uri}
# +
# %%skip_for_export
PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri
print(tfma.load_validation_result(PATH_TO_RESULT))
# +
# %%skip_for_export
# Get the TFMA output result path and load the result.
PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri
tfma_result = tfma.load_eval_result(PATH_TO_RESULT)
# Show data sliced along feature column petal_width.
#tfma_eval = tfma.view.render_slicing_metrics(tfma_result, slicing_column='petal_width')
#tfma.view.render_plot(tfma_result)
#print(type(tfma_eval))
# -
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
context.run(pusher)
# +
# %%skip_for_export
pusher.outputs
# -
# %%skip_for_export
print(pusher.outputs.model_push.get()[0].uri)
# +
# #!ls tfx_com/Pusher/pushed_model/8
# +
# %%skip_for_export
push_uri = pusher.outputs.model_push.get()[0].uri
model = tf.saved_model.load(push_uri)
for item in model.signatures.items():
pp.pprint(item)
# -
_runner_type = 'airflow' #@param ["beam", "airflow"]
_pipeline_name = 'iris_%s' % _runner_type
# +
#docs_infra: no_execute
# For Colab notebooks only.
# TODO(USER): Fill out the path to this notebook.
#_notebook_filepath = (
# '/content/drive/My Drive/Colab Notebooks/components.ipynb')
# For Jupyter notebooks only.
_notebook_filepath = os.path.join(os.getcwd(),
'iris_example.ipynb')
# TODO(USER): Fill out the paths for the exported pipeline.
#_tfx_root = os.path.join(_dag_output, 'tfx')
_iris_root = os.path.join(cwd, 'iris')
_serving_model_dir = os.path.join(_serving_model_dir, 'serving_model')
_data_root = os.path.join(cwd, 'data', 'simple')
_pipeline_root = os.path.join(cwd, 'pipelines', _pipeline_name)
_metadata_path = os.path.join(cwd, 'metadata', _pipeline_name,
'metadata.db')
# -
#docs_infra: no_execute
# TODO(USER): Specify components to be included in the exported pipeline.
components = [
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, evaluator, pusher
]
# +
# %%skip_for_export
#docs_infra: no_execute
#@markdown Run this cell to generate the pipeline files.
if get_ipython().magics_manager.auto_magic:
print('Warning: %automagic is ON. Line magics specified without the % prefix '
'will not be scrubbed during export to pipeline.')
_pipeline_export_filepath = 'export_%s.py' % _pipeline_name
context.export_to_pipeline(notebook_filepath=_notebook_filepath,
export_filepath=_pipeline_export_filepath,
runner_type=_runner_type)
# -
# %%skip_for_export
# #!jupyter nbextension enable --py widgetsnbextension --system
# #!jupyter nbextension install --py --symlink tensorflow_model_analysis --system
# %%skip_for_export
# #!pip freeze -> requirements.txt
# %%skip_for_export
# #!pip3 install tensorflow==2.2.0
# #!pip3 install tensorboard>=2.1,<2.3
# #!pip3 install tensorflow-data-validation==0.22.0
# #!pip3 install tensorflow-metadata==0.22.0
# #!pip3 install tensorflow-model-analysis==0.22.1
# #!pip3 install tensorflow-transform==0.22.0
# #!pip3 install tfx-bsl==0.22.0
# +
# #!pip3 install ipywidgets==7.5.0
# +
# #!pip3 install -U tensorboard-plugin-profile
# +
# #!pip3 install tfx==0.22.0
# -
|
iris_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tools scripts
# The notebook must run in the same directory with "train", "test", "val_seen", "val_unseen", "envdrop" and "joint_train_envdrop"
# +
import copy
import gzip
import itertools
import json
import pickle
import random
import re
import threading
import time
from collections import Counter
import clip
import matplotlib.pyplot as plt
import nltk
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from clip.simple_tokenizer import SimpleTokenizer as _Tokenizer
from nltk.tokenize import PunktSentenceTokenizer
from numba import jit
from PIL import Image
from tqdm.notebook import tqdm
plt.rcParams['axes.unicode_minus'] = False
# nltk.download("punkt")
# nltk.download("averaged_perceptron_tagger")
# sns.set()
# plt.style.use('ggplot')
# -
# # Train NLTK tokenizer
# +
# use R2R
# before running this cell, R2R instruction dataset must be downloaded into folder "r2r"
import pickle
from nltk.tokenize import PunktSentenceTokenizer
splits = ["train", "val_seen", "val_unseen", "test"]
all_text = ""
for split in splits:
sub_data_path = "r2r/R2R_%s.json"%(split)
with open(sub_data_path, "r") as f:
sub_data = json.loads(f.read())
for item in tqdm(sub_data):
for i in range(3):
all_text += item["instructions"][i]+" "
sent_tokenizer = PunktSentenceTokenizer(all_text.lower())
f = open('r2r_sent_tokenizer.pickle', 'wb')
pickle.dump(sent_tokenizer, f)
f.close()
f = open('r2r_sent_tokenizer.pickle', 'rb')
sent_tokenizer = pickle.load(f)
f.close()
# +
# use VLN-CE
import pickle
from nltk.tokenize import PunktSentenceTokenizer
splits = ["train", "val_seen", "val_unseen", "test"]
all_text = ""
for split in splits:
sub_data_path = "%s/%s.json.gz"%(split,split)
with gzip.open(sub_data_path, "r") as f:
sub_data = json.loads(f.read())
for item in tqdm(sub_data["episodes"]):
all_text += item["instruction"]["instruction_text"]+" "
sent_tokenizer = PunktSentenceTokenizer(all_text)
f = open('r2r_sent_tokenizer.pickle', 'wb')
pickle.dump(sent_tokenizer, f)
f.close()
f = open('r2r_sent_tokenizer.pickle', 'rb')
sent_tokenizer = pickle.load(f)
f.close()
# + [markdown] tags=[]
# # FGR2R's chunking
# https://github.com/YicongHong/Fine-Grained-R2R
#
# Upgrade stanfordnlp to stanza
# +
import copy
import functools
import json
import re
import sys
from collections import Counter
import numpy as np
# #!pip install stanza
import stanza
# stanza.download("en")
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '_' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def check_lemma(word):
""" Two special words, keep them in original form """
if word.text in ['left','your']:
return word.text
else:
return word.lemma
class Tokenizer(object):
""" Class to tokenize and encode a sentence. """
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)') # Split on any non-alphanumeric character
def __init__(self):
pass
def split_sentence(self, sentence):
""" Break sentence into a list of words and punctuation """
toks = []
for word in [s.strip().lower() for s in self.SENTENCE_SPLIT_REGEX.split(sentence.strip()) if
len(s.strip()) > 0]:
# Break up any words containing punctuation only, e.g. '!?', unless it is multiple full stops e.g. '..'
if all(c in string.punctuation for c in word) and not all(c in '.' for c in word):
toks += list(word)
else:
toks.append(word)
return toks
def create_chunk(doc, nlp):
doc = nlp(doc)
max_kdx = len(doc.sentences) - 1
instr_lemma = [['<start>']]
for kdx, sent in enumerate(doc.sentences):
root_sub = []; conj_sub = []
for word in sent.words:
if (word.deprel == 'root'):
root_sub.append(int(word.id))
elif (word.deprel == 'conj' and word.head == 1) or (word.deprel == 'conj' and (word.head in root_sub or word.head in conj_sub)):
conj_sub.append(int(word.id))
max_jdx = len(sent.words) - 1
instr_lemma_sub = []; instr_depend_sub = []; instr_lemma_sub = []; ti = 0
for jdx, word in enumerate(sent.words):
if (word.deprel == 'root') and (('root' in instr_depend_sub) or ('parataxis' in instr_depend_sub)):
# print('1', word.lemma, instr_depend_sub)
if len(instr_lemma_sub) >= 2:
''' check for the special cases of turning and the word "and","then" '''
if (('advmod' in instr_depend_sub) or ('xcomp' in instr_depend_sub)) and ('obj' not in instr_depend_sub) and ('obl' not in instr_depend_sub) and ('nmod' not in instr_depend_sub):
if (instr_lemma[-1][-1] == 'and') or (instr_lemma_sub[0]=='and') or (instr_lemma[-1][-1] == 'then') or (instr_lemma_sub[0]=='then'):
instr_lemma[-1] += instr_lemma_sub
instr_lemma_sub = []; instr_depend_sub = []
else:
'add to the next chunk'
else:
# print('a', instr_lemma_sub)
instr_lemma.append(instr_lemma_sub)
instr_lemma_sub = []; instr_depend_sub = []
elif ti <= len(conj_sub)-1:
if word.head == conj_sub[ti]:
ti += 1
# print('2', word.lemma, instr_depend_sub)
if len(instr_lemma_sub) >= 2:
''' check for the special cases of turning and the word "and","then" '''
if (len(instr_lemma_sub) < 4) and (('advmod' in instr_depend_sub) or ('xcomp' in instr_depend_sub)) and ('obj' not in instr_depend_sub) and ('obl' not in instr_depend_sub) and ('nmod' not in instr_depend_sub):
if (instr_lemma[-1][-1] == 'and') or (instr_lemma_sub[0]=='and') or (instr_lemma[-1][-1] == 'then') or (instr_lemma_sub[0]=='then'):
instr_lemma[-1] += instr_lemma_sub
instr_lemma_sub = []; instr_depend_sub = []
else:
'add to the next chunk'
else:
# print('b', instr_lemma_sub)
instr_lemma.append(instr_lemma_sub)
instr_lemma_sub = []; instr_depend_sub = []
elif (word.deprel == 'parataxis') and (('root' in instr_depend_sub) or ('parataxis' in instr_depend_sub)):
if len(instr_lemma_sub) >= 2:
''' check for the special cases of turning and the word "and","then" '''
if (('advmod' in instr_depend_sub) or ('xcomp' in instr_depend_sub)) and ('obj' not in instr_depend_sub) and ('obl' not in instr_depend_sub) and ('nmod' not in instr_depend_sub):
if (instr_lemma[-1][-1] == 'and') or (instr_lemma_sub[0]=='and') or (instr_lemma[-1][-1] == 'then') or (instr_lemma_sub[0]=='then'):
instr_lemma[-1] += instr_lemma_sub
instr_lemma_sub = []; instr_depend_sub = []
else:
'add to the next chunk'
else:
instr_lemma.append(instr_lemma_sub)
instr_lemma_sub = []; instr_depend_sub = []
if word.deprel not in ['punct']:
instr_lemma_sub.append(check_lemma(word))
instr_depend_sub.append(word.deprel)
if len(instr_lemma_sub) >= 2:
instr_lemma.append(instr_lemma_sub)
else:
instr_lemma[-1] += instr_lemma_sub
instr_lemma = [" ".join(v) for v in instr_lemma[1:]]
return instr_lemma
# -
# # Sub Cut Tool
def instruction_cut_clip(train_data, append_dot=True, keep_subs=True, refine=False, split_func=None, lower_all=False):
"""
Params:
train_data: a dataset dict
append_dot: whether to add a "." at the end of sub-instructions
keep_subs: whether to keep sub-instruction text after processing
refine: whether to use proposed refine processing
split_func: the function used to cut instructions, default is `nltk.sent_tokenize`
lower_all: whether to lower all characters in sub-instructions.
Return:
train_data with "sub_instruction_tokens" and "sub_instruction"
"""
if split_func is None:
split_func = nltk.sent_tokenize
print(split_func)
return_data = copy.deepcopy(train_data)
train_data = train_data["episodes"]
# pre process
char_pattern = re.compile(r"[a-zA-Z]")
for i, item in enumerate(tqdm(train_data)):
inst = item["instruction"]["instruction_text"]
inst = inst.strip()
start_idx = 0
while not char_pattern.search(inst[start_idx]):
start_idx += 1
inst = inst[start_idx:]
if lower_all:
inst = inst.lower()
train_data[i]["instruction"]["instruction_text"] = inst.replace("...", ".").replace("..", ".").replace(".",". ").replace(" ", " ")
# cut by nltk
pattern = re.compile(r"\r\n")
for i, item in enumerate(tqdm(train_data)):
inst = item["instruction"]["instruction_text"]
res = []
now = pattern.split(inst)
for v in now:
res.extend(split_func(v))
train_data[i]["sub_instruction"] = [piece.strip() for piece in res if piece.strip()]
# refine
if refine:
punctuation_list = [",", "."]
char_pattern = re.compile(r"[a-zA-Z]+")
def judge_verb(word):
const_verbs = ["wait", "turn", "walk", "stop"]
if "VB" in word[1]:
return True
if word[0] in const_verbs:
return True
return False
for i, item in enumerate(tqdm(train_data)):
new_sub = []
for k, piece in enumerate(item["sub_instruction"]):
word_list = nltk.pos_tag(nltk.word_tokenize(piece))
tmp = ""
for x, word in enumerate(word_list):
if (word[0].lower()=="and" or word[0]=="," or word[0].lower()=="then") and (x+1<len(word_list) and judge_verb(word_list[x+1])):
if tmp and char_pattern.search(tmp):
new_sub.append(tmp)
if word[0].lower()=="and" or word[0].lower()=="then":
tmp = word[0]
else:
tmp = ""
elif (word[0]=="and" or word[0]==",") and (x+1<len(word_list) and word_list[x+1][0]=="then"):
if tmp:
new_sub.append(tmp)
if word[0].lower()=="and" or word[0].lower()=="then":
tmp = word[0]
else:
tmp = ""
else:
if not tmp or word[0] in punctuation_list:
tmp+=word[0]
else:
tmp+=(" "+word[0])
if tmp:
new_sub.append(tmp)
train_data[i]["sub_instruction"] = new_sub
# post process and generate tokens
char_pattern = re.compile(r"[a-zA-Z]")
max_len = 0
instruction_vocab = return_data["instruction_vocab"]
unk_index = instruction_vocab["UNK_INDEX"]
pad_index = instruction_vocab["PAD_INDEX"]
pad_len = 100
sub_pad_len = 77
sub_num = 10
useless_sub = [pad_index]*sub_pad_len
sub_split_index = -1
for i, item in enumerate(tqdm(train_data)):
tokens_all = []
tokens_split = []
for k, piece in enumerate(item["sub_instruction"]):
piece = piece.strip()
assert piece
idx = len(piece)-1
while idx>=0 and piece[idx] in [".", ","]:
idx -= 1
if append_dot:
piece = piece[0:(idx+1)]+"."
else:
piece = piece[0:(idx+1)]
piece = piece.replace("``", "\"").replace("''", "\"")
train_data[i]["sub_instruction"][k] = piece
piece_tokens = clip.tokenize(piece, truncate=True).squeeze(0).tolist()
tokens_split.append(piece_tokens)
if len(tokens_split)>sub_num:
tokens_split = tokens_split[0:sub_num]
tokens_split.extend([useless_sub]*(sub_num-len(tokens_split)))
train_data[i]["instruction"]["instruction_tokens"] = item["instruction"]["instruction_tokens"][0:pad_len]
train_data[i]["sub_instruction_tokens"] = tokens_split
if not keep_subs:
del item["sub_instruction"]
return_data["episodes"] = train_data
return return_data
# # Process all data
# NLTK + refine (NRSub)
splits = ["train", "test", "val_seen", "val_unseen", "envdrop", "joint_train_envdrop"]
f = open('r2r_sent_tokenizer.pickle', 'rb')
sent_tokenizer = pickle.load(f)
f.close()
sent_token = sent_tokenizer.tokenize
tic = time.time()
for split in splits:
raw_data_path = "%s/%s.json.gz"%(split, split)
sub_data_path = "%s/%s_sub.json.gz"%(split, split)
with gzip.open(raw_data_path, "r") as f:
raw_data = json.loads(f.read())
sub_data = instruction_cut_clip(raw_data, refine=True, append_dot=False, split_func=sent_token)
with gzip.open(sub_data_path, "w") as f:
f.write(json.dumps(sub_data).encode("utf-8"))
print(time.time()-tic)
# NLTK
splits = ["train", "test", "val_seen", "val_unseen"]#, "envdrop", "joint_train_envdrop"]
sent_token = nltk.sent_tokenize
for split in splits:
raw_data_path = "%s/%s.json.gz"%(split, split)
sub_data_path = "%s/%s_sub.json.gz"%(split, split)
with gzip.open(raw_data_path, "r") as f:
raw_data = json.loads(f.read())
sub_data = instruction_cut_clip(raw_data, refine=True, append_dot=False, split_func=sent_token)
with gzip.open(sub_data_path, "w") as f:
f.write(json.dumps(sub_data).encode("utf-8"))
# NLTK default
splits = ["train", "test", "val_seen", "val_unseen"]#, "envdrop", "joint_train_envdrop"]
f = open('r2r_sent_tokenizer.pickle', 'rb')
sent_tokenizer = pickle.load(f)
f.close()
sent_token = sent_tokenizer.tokenize
for split in splits:
raw_data_path = "%s/%s.json.gz"%(split, split)
sub_data_path = "%s/%s_sub.json.gz"%(split, split)
with gzip.open(raw_data_path, "r") as f:
raw_data = json.loads(f.read())
sub_data = instruction_cut_clip(raw_data, refine=False, append_dot=False, split_func=sent_token)
with gzip.open(sub_data_path, "w") as f:
f.write(json.dumps(sub_data).encode("utf-8"))
# FGR2R
splits = ["train", "test", "val_seen", "val_unseen", "envdrop", "joint_train_envdrop"]
# nlp = stanza.Pipeline()
sent_token = functools.partial(create_chunk, nlp=nlp)
for split in splits:
raw_data_path = "%s/%s.json.gz"%(split, split)
sub_data_path = "%s/%s_sub.json.gz"%(split, split)
with gzip.open(raw_data_path, "r") as f:
raw_data = json.loads(f.read())
sub_data = instruction_cut_clip(raw_data, refine=False, append_dot=False, split_func=sent_token)
with gzip.open(sub_data_path, "w") as f:
f.write(json.dumps(sub_data).encode("utf-8"))
# # Statics
raw_instructions = []
sub_instructions = []
splits = ["train", "val_seen", "val_unseen","test"]
for split in splits:
# /home/raven/codes/vln-ce/data/datasets/R2R_VLNCE_FGR2R/
sub_data_path = "%s/%s_sub.json.gz"%(split, split)
with gzip.open(sub_data_path, "r") as f:
sub_data = json.loads(f.read())
for item in tqdm(sub_data["episodes"]):
raw_instructions.append(item["instruction"]["instruction_text"])
for item in tqdm(sub_data["episodes"]):
sub_instructions.append(item["sub_instruction"])
lens = [len(raw_instructions), sum([len(v) for v in sub_instructions])]
print(np.mean([len(v) for v in sub_instructions]))
raw_len_list = [len(nltk.word_tokenize(v)) for v in raw_instructions]
sub_len_list = [len(nltk.word_tokenize(v)) for v in list(itertools.chain.from_iterable(sub_instructions))]
fig = plt.figure(figsize=(6.4,4.8))
ax = fig.add_subplot()
sns.histplot({"raw (totally 13436 sentences)":raw_len_list,"cut (totally 54832 sentences)":sub_len_list}, ax=ax, binrange=[0,80], binwidth=2)
ax.set_xlabel("Word number")
ax.set_title("Histogram of word numbers in a sentence")
plt.savefig("tmp4.jpg",dpi=100, bbox_inches = 'tight')
plt.show()
sub_num_list = [len(v) for v in sub_instructions]
sub_num_cnt = Counter(sub_num_list)
fig = plt.figure(figsize=(6.4,4.8),dpi=100)
ax = fig.add_subplot()
sns.barplot(x=list(sub_num_cnt.keys()), y=list(sub_num_cnt.values()))
plt.title("%2.2f%% instructions contain >= 2 subs"%((1-sub_num_cnt[1]/np.sum(list(sub_num_cnt.values())))*100 ))
plt.savefig("tmp3.jpg",dpi=100)
np.sum(list(sub_num_cnt.values()))
# + [markdown] tags=[]
# # Example
# -
split = "train"
sub_data_path = "%s/%s_sub.json.gz"%(split, split)
with gzip.open(sub_data_path, "r") as f:
sub_data = json.loads(f.read())
split = "train"
sub_data_path = "/home/raven/codes/vln-ce/data/datasets/R2R_VLNCE_FGR2R/%s/%s_sub.json.gz"%(split, split)
with gzip.open(sub_data_path, "r") as f:
sub_data1 = json.loads(f.read())
for i in range(0,10):
print(sub_data["episodes"][i]["sub_instruction"])
print(sub_data1["episodes"][i]["sub_instruction"])
print(sub_data["episodes"][2])
# # Comparison betweeen FGR2R and NLTK+refine
# +
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import gzip
import json
plt.rcParams['font.sans-serif'] = ["Arial"]
plt.rcParams['axes.unicode_minus'] = False
sub_instructions = []
splits = ["train", "val_seen", "val_unseen","test"]
for split in splits:
# /home/raven/codes/vln-ce/data/datasets/R2R_VLNCE_FGR2R/
sub_data_path = "NRSub/%s/%s_sub.json.gz"%(split, split)
with gzip.open(sub_data_path, "r") as f:
sub_data = json.loads(f.read())
for item in tqdm(sub_data["episodes"]):
sub_instructions.append(item["sub_instruction"])
nltkr_lens = [len(v) for v in sub_instructions]
sub_instructions = []
for split in splits:
# /home/raven/codes/vln-ce/data/datasets/R2R_VLNCE_FGR2R/
sub_data_path = "FGSub/%s/%s_sub.json.gz"%(split, split)
with gzip.open(sub_data_path, "r") as f:
sub_data = json.loads(f.read())
for item in tqdm(sub_data["episodes"]):
sub_instructions.append(item["sub_instruction"])
fgr2r_lens = [len(v) for v in sub_instructions]
fgr2r_mean = np.mean(np.array(fgr2r_lens))
nltkr_mean = np.mean(np.array(nltkr_lens))
# -
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
c = [plt.cm.Spectral(i/1.0) for i in range(2)]
sns.histplot({"FGSub (62675 sentences)": fgr2r_lens, "NLTKSub (70806 sentences)": nltkr_lens}, ax=ax, binrange=[
0, 12], binwidth=1, kde=True, kde_kws={"gridsize": 100, "bw_method": 0.4}, line_kws={"linewidth":2}, zorder=1000)
# ax.hist(fgr2r_lens,density=True, bins=10, alpha=0.75)
# ax.hist(nltkr_lens,density=True, bins=10, alpha=0.75)
ax.set_xlabel("Sub-instruction Number", fontsize=13, fontweight='bold')
ax.set_ylabel("Frequency", fontsize=13, fontweight='bold')
ax.set_title("Histogram of Sub-instruction Numbers", fontsize=13, fontweight='bold')
ax.set_xlim([0, 12])
ax.legend(["FGSub (62675 sentences)", "NLTKSub (70806 sentences)"])
plt.xticks(fontsize=12, )
plt.yticks(fontsize=12, )
leg = ax.get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=12, fontweight='bold')
plt.savefig("cmp_sub.pdf", bbox_inches='tight')
plt.show()
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
sns.kdeplot(fgr2r_lens, shade=True, label="Cyl=4", alpha=.7, gridsize=200, bw_method=0.35)
sns.kdeplot(nltkr_lens, shade=True, label="Cyl=4", alpha=.7, gridsize=200, bw_method=0.35)
ax.set_xlabel("Sub-instruction Number", fontsize=13, fontweight='bold')
ax.set_ylabel("Density Estimation", fontsize=13, fontweight='bold')
ax.set_title("KDE Plot of Sub-instruction Numbers", fontsize=13, fontweight='bold')
ax.set_xlim([0, 12])
ax.legend(["FGSub (62675 sentences)", "NRSub (70806 sentences)"])
plt.xticks(fontsize=12, )
plt.yticks(fontsize=12, )
leg = ax.get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=12, fontweight='bold')
plt.savefig("cmp_sub_kde.pdf", bbox_inches='tight')
plt.show()
|
sub_cut.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# In this notebook, we compute invariant sets for the planar system with univariate input of [Example 1 of [AT19]](https://github.com/janis10/cis2m/blob/master/example2D.m).
# We need to select an semidefinite programming solver to find the invariant set. A complete list of available solvers is [available here](https://www.juliaopt.org/JuMP.jl/stable/installation/#Getting-Solvers-1).
#
# [AT19] <NAME> and <NAME>, "Computing controlled invariant sets in two moves", In 2019 IEEE Conference on Decision and Control (CDC).
using MosekTools
using JuMP
solver = with_optimizer(Mosek.Optimizer, QUIET=true)
# We first define the safe sets and input sets for our system using [Polyhedra](https://github.com/JuliaPolyhedra/Polyhedra.jl).
using Polyhedra
G =[ 0.9147 -0.5402
0.2005 0.6213
-0.8193 0.9769
-0.4895 -0.8200
0.7171 -0.3581
0.8221 0.0228
0.3993 -0.8788]
F = [0.5566
0.8300
0.7890
0.3178
0.4522
0.7522
0.1099]
safe_set = polyhedron(hrep(G, F), DefaultLibrary{Float64}(solver))
cheby_center, cheby_radius = chebyshevcenter(safe_set, solver)
input_set = polyhedron(convexhull([-2], [2]))
# We new define the dynamic of our system and create it using [MathematicalSystems](https://github.com/JuliaReach/MathematicalSystems.jl/).
using MathematicalSystems
A = [1.5 1.0
0.0 1.0]
B = reshape([0.5, 0.25], 2, 1)
using SwitchOnSafety
system = ConstrainedLinearControlDiscreteSystem(A, B, safe_set, input_set)
# We now compute the invariant set by searching for any ellipsoid with a given point in its interior.
# As the system is reformulated into an algebraic system with safe set `safe_set * input_set`, the Chebyshev center is `(cheby_center, 0)` (note that `Polyhedra.chebyshevcenter(safe * input)` is currently incorrect because of https://github.com/JuliaPolyhedra/Polyhedra.jl/issues/125) where `cheby_center` is the Chebyshev center of `safe_set`.
# To avoid having to solve Bilinear Matrix Inequalities, we set the S-procedure scaling to `1.05` (found by a few trials, checking what gives the best `objective_value`).
S_procedure_scaling = 1.254
S_procedure_scaling = 1.1884
cheby_center, cheby_radius = chebyshevcenter(safe_set, solver)
cheby = [cheby_center; 0.0]
cheby = zeros(3)
using SwitchOnSafety
variable = Ellipsoid(point = SetProg.InteriorPoint(cheby))
max_vol_ell = invariant_set(system, solver, variable, λ = S_procedure_scaling)
# Instead of maximizing the volume, we can maximize the L1 norm.
using SwitchOnSafety
max_tr_ell = invariant_set(system, solver, variable, λ = 1.181, volume_heuristic = ell -> L1_heuristic(ell, ones(3)))
# We can see that we get a larger ellipsoids for the volume maximization but not for the L1 norm as maximizing the L1 integral over the hypercube centered at the origin is not a very good heuristic. We should instaed maximize the L1 integral over the safe set but this is not implemented yet in [SetProg](https://github.com/blegat/SetProg.jl).
using Plots
plot(safe_set)
plot!(project(max_vol_ell, 1:2), color=:orange)
plot!(project(max_tr_ell, 1:2), color=:green)
|
examples/Controlled_Invariant_Sets/Planar_System_bounded_univariate_control.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter notebook example
# This is an example Jupyter notebook, created as
# part of the living documents and reproducible reports
# workshop. The GitHub repository for this workshop is
# available here: https://github.com/a-paxton/living-documents
# **Created by**: <NAME> (University of Connecticut)
# **Last modified**: 10 July 2020
# ***
# ## "Living documents"
# Jupyter notebooks are an interactive way to weave together
# **text**, **live code**, **output**, and **images** in a single document.
# Jupyter notebooks are made up of a series of sequential **cells**. These
# cells can be *code cells* (for executable code), *markdown cells*
# (for markdown-formattable text, including adding images), or *raw cells*
# (for code that will not be executed or modified by Jupyter directly). More
# on the different kinds of cells is available in their excellent
# [introductory documentation](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html#structure-of-a-notebook-document).
# Press the "+" symbol in the top bar to add a new chunk, or double-click
# on ane existing chunk to edit it.
# To run a code cell or to render the text or images from a markdown cell,
# click the "play" button in the top bar of the notebook.
# <img src="http://www.marcelpatek.com/nbo/img/jupyter.svg">
# **Figure**. An example image in a Jupyter notebook -- the Jupyter logo.
# As you can see, markdown is compatible with HTML formatting, too.
# ... and here's some code
import numpy as np
y = [1, 8, 9]
np.mean(y)
# ***
# # How do Jupyter notebooks help?
# ## Preserve your own sanity
# For whatever reason, we sometimes find ourselves picking up projects after weeks or months away. By investing a little more time while we work, we can use Jupyter's capabilities to document what we're doing and why in a clear workflow... much to the joy of our future selves.
# ## Improve transparency and promote reproducibility
# Living documents can make our own lives easier, but they can also provide important additional information for other researchers. As shareable and executable supplemental materials for publications and other scholarly works, Jupyter notebooks allow us to clearly document our work and provide important supporting information.
# ## Integrate your workflow
# By providing a single interface to multiple languages, Jupyter notebooks can provide a platform for us to streamline our workflow. As we'll see below, Jupyter notebooks allow us to integrate multiple languages and individual analysis files -- even in the same notebook!
# ***
# # Core features
# ## Use the same format for multiple languages
# You can use the Jupyter notebook format with a variety of languages -- including R, MATLAB, Octave, Julia, Ruby, and Python -- by downloading "kernels" for those languages.
# ## Access other languages in the same Python notebook
# When using a Python kernel, you can incorporate specific cells of other languages. As part of what are called [cell magics](http://ipython.readthedocs.io/en/stable/interactive/magics.html), you can use R magics to add cells of R code to your Python notebook.
# load in the R magic
# %load_ext rpy2.ipython
# Once you've loaded the extension, you simply need to type `%%R` at the beginning of each cell to activate it.
# + language="R"
# # note that any comments MUST happen after the first line...
# r_list = c(0:3)
# mean(r_list)
# -
# ... or the magic won't work
# %%R
r_list2 = c(1:5)
mean(r_list)
# You can transfer your R-based work into Python with `%Rget`...
# %Rget r_list
# ... or push your Python-based work into R with `%Rpush`.
python_list = list(range(10,16))
python_list
# %Rpush python_list
# + language="R"
# python_list
# -
# ## Call external scripts
# You can also call external scripts from your notebook.
# %run 'test-external-script.py'
# ***
# # Neat features
# ## Make a table of contents using header cells and markdown formatting.
# **Table of Contents**
# * ["Living documents"](#"Living-documents")
# * [How do Jupyter notebooks help?](#How-do-Jupyter-notebooks-help?)
# * [Preserve your own sanity](#Preserve-your-own-sanity)
# * [Improve transparency and promote reproducibility](#Improve-transparency-and-promote-reproducibility)
# * [Integrate your workflow](#Integrate-your-workflow)
# ## Render equations with LaTeX
# $\Sigma_{t} = \alpha^{2} - 5$
# ## Incorporate HTML formatting
# This is particularly useful for images.
# <br>And for single-line breaks.
# <br>And <a href="https://media.giphy.com/media/LXONhtCmN32YU/giphy.gif" target="_blank">to open live links in new tabs</a>.
# <img src="http://www.marcelpatek.com/nbo/img/jupyter.svg" style="height:50px;">
# ***
# # Installing Jupyter and the R kernels
# Instructions for installing Jupyter are available here:
# https://jupyter.readthedocs.io/en/latest/install.html
# After you install Jupyter, you can then install the R kernel (so that you can run a Jupyter notebook of purely R code) through the instructions here:
# https://irkernel.github.io/installation/
|
jupyter-notebooks/jupyter_notebook-basic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# name: python3
# ---
# ## Preprocessing
# + colab={"base_uri": "https://localhost:8080/"} id="bRqNl6G-mPT7" outputId="68b29eb7-ad2e-49f9-af64-ab03ba0add06"
# from google.colab import drive
# drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="H7Swf4P8H5xv" outputId="c6fdfb30-7d8e-4a4f-b28a-927f6cb8e76d"
# # cd drive/MyDrive/NLP_Project
# + id="6135jyy5IQ2B"
# # !pip install pandas matplotlib tqdm seaborn sklearn numpy graphviz
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
import numpy as np
import warnings
warnings.filterwarnings('always')
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import pickle as pk
# + id="wCsgwo7tIvAs"
trainData = np.load('../../../dataFinal/npy_files/noWE_t2_train.npy')
trainLabels = open('../../../dataFinal/finalTrainLabels.labels', 'r').readlines()
testData = np.load('../../../dataFinal/npy_files/noWE_t2_test.npy')
testLabels = open('../../../dataFinal/finalTestLabels.labels', 'r').readlines()
validationData = np.load('../../../dataFinal/npy_files/noWE_t2_trial.npy')
validationLabels = open('../../../dataFinal/finalDevLabels.labels', 'r').readlines()
# + colab={"base_uri": "https://localhost:8080/"} id="1QjcQGoVGDJB" outputId="ff4a6bee-c555-4753-a288-9b5baed2bb0b"
for i in tqdm(range(len(trainLabels))):
trainLabels[i] = int(trainLabels[i])
for i in tqdm(range(len(testLabels))):
testLabels[i] = int(testLabels[i])
for i in tqdm(range(len(validationLabels))):
validationLabels[i] = int(validationLabels[i])
# + id="A6A_MlQgJlte"
trainLabels = np.array(trainLabels)
testLabels = np.array(testLabels)
validationLabels = np.array(validationLabels)
trainLabels = trainLabels.reshape((-1, ))
testLabels = testLabels.reshape((-1, ))
validationLabels = validationLabels.reshape((-1, ))
X_train, X_test, y_train, y_test, X_val, y_val = trainData, testData, trainLabels, testLabels, validationData, validationLabels
# -
# ## Estimators
# + [markdown] id="yOeyPMrplFYL"
# ### Range 1
#
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="QfFhxz3ElIC9" outputId="ded38644-9827-4d49-a7ee-27e26422afeb"
# accuracy = []
# predVal = []
itr= [150, 250, 350, 450]
for i in tqdm(range(len(itr))):
clf = AdaBoostClassifier(algorithm="SAMME.R",n_estimators=itr[i], random_state=0)
clf.fit(trainData, trainLabels)
y_pred = clf.predict(testData)
y_true = testLabels
print("Test Data:")
print("Classification report for case: ",itr[i])
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(trainData)
y_true = trainLabels
print("Train Data:")
print("Classification report for case: ",itr[i])
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(validationData)
y_true = validationLabels
print("Validation Data:")
print("Classification report for case: ",itr[i])
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
#predVal.append(itr[i])
# plt.plot(predVal, accuracy)
# plt.xlabel('Num estimators')
# plt.ylabel('Accuracy')
# plt.title('Accuracy vs Num estimators')
# plt.show()
# -
#ADA boost NoWE
itr= [150, 250, 350, 450]
test_accuracy = [27.31,27.31,26.91,26.26]
train_accuracy = [27.43,27.77,27.56,26.92]
trial_accuracy = [27.2,27.14,26.83,26.18]
accuracy= test_accuracy
plt.plot(itr, accuracy)
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Test Accuracy vs Num estimators')
plt.show()
accuracy= train_accuracy
plt.plot(itr, accuracy)
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Train Accuracy vs Num estimators')
plt.show()
accuracy= trial_accuracy
plt.plot(itr, accuracy)
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Trial Accuracy vs Num estimators')
plt.show()
# +
plt.plot(itr, test_accuracy, label = "Test")
plt.plot(itr, train_accuracy, label = "Train")
plt.plot(itr, trial_accuracy, label = "Trial")
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Test Accuracy vs Num estimators')
plt.legend()
plt.show()
# -
# ### Range 2
#
# +
ada_accuracy = []
ada_predVal = []
itr= [150, 175, 200, 225]
for i in tqdm(range(len(itr))):
clf = AdaBoostClassifier(algorithm="SAMME.R",n_estimators=itr[i], random_state=0)
clf.fit(trainData, trainLabels)
y_pred = clf.predict(testData)
y_true = testLabels
print("Test Data:")
print("Classification report for case: ",itr[i])
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(trainData)
y_true = trainLabels
print("Train Data:")
print("Classification report for case: ",itr[i])
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(validationData)
y_true = validationLabels
print("Validation Data:")
print("Classification report for case: ",itr[i])
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
# ada_accuracy.append(round(accuracy_score(y_pred = y_pred, y_true = testLabels) * 100,2))
# ada_predVal.append(itr[i])
# plt.plot(ada_predVal, ada_accuracy)
# plt.xlabel('Num estimators')
# plt.ylabel('ada_Accuracy')
# plt.title('ada_Accuracy vs Num estimators')
# plt.show()
# -
#ADA boost NoWE
itr= [150, 175, 200, 225]
test_accuracy = [27.31,27.45,27.45,27.33]
train_accuracy = [27.43,27.7,27.86,27.76]
trial_accuracy = [27.2,27.15,27.24,27.22]
accuracy= test_accuracy
plt.plot(itr, accuracy)
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Test Accuracy vs Num estimators')
plt.show()
accuracy= train_accuracy
plt.plot(itr, accuracy)
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Train Accuracy vs Num estimators')
plt.show()
accuracy= trial_accuracy
plt.plot(itr, accuracy)
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Trial Accuracy vs Num estimators')
plt.show()
# +
plt.plot(itr, test_accuracy, label = "Test")
plt.plot(itr, train_accuracy, label = "Train")
plt.plot(itr, trial_accuracy, label = "Trial")
plt.xlabel('Num estimators')
plt.ylabel('Accuracy')
plt.title('Test Accuracy vs Num estimators')
plt.legend()
plt.show()
# -
# ## Algrithm
# + [markdown] id="J1BFRUvebBRK"
# ### "SAMME"
# + id="VmgCogIdd4dv" outputId="ee6ad446-3bb3-4ac5-f288-0482fb7b1e4f"
clf = AdaBoostClassifier(algorithm="SAMME", n_estimators=200, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_true = testLabels
print("Test Data:")
accuracy = round(accuracy_score(y_pred = y_pred, y_true = y_true) * 100,2)
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(X_train)
y_true = trainLabels
print("Train Data:")
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(X_val)
y_true = validationLabels
print("Validation Data:")
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
# -
itr= [1]
test_accuracy = [22.8]
train_accuracy = [22.65]
trial_accuracy = [22.94]
# +
plt.plot(itr, test_accuracy, label = "Test", marker="o", markersize=10,)
plt.plot(itr, train_accuracy, label = "Train", marker="o", markersize=10,)
plt.plot(itr, trial_accuracy, label = "Trial", marker="o", markersize=10,)
plt.xlim(1, 1)
plt.ylim(22, 23.5)
plt.xlabel('SAMME')
plt.ylabel('Accuracy')
plt.title('Accuracy in SAMME')
plt.legend()
plt.show()
# + [markdown] id="J1BFRUvebBRK"
# ### "SAMME.R" Better
# + id="YOleKFlTd4dw" outputId="540fd6b1-1769-43a8-8e15-433907d2d467"
X_train, X_test, y_train, y_test = trainData, testData, trainLabels, testLabels
clf = AdaBoostClassifier(algorithm="SAMME.R",n_estimators=200, random_state=0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_true = testLabels
print("Test Data:")
accuracy = round(accuracy_score(y_pred = y_pred, y_true = y_true) * 100,2)
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(X_train)
y_true = trainLabels
print("Train Data:")
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
y_pred = clf.predict(X_val)
y_true = validationLabels
print("Validation Data:")
print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
# -
itr= [1]
test_accuracy = [27.45]
train_accuracy = [27.86]
trial_accuracy = [27.24]
# +
plt.plot(itr, test_accuracy, label = "Test", marker="o", markersize=10,)
plt.plot(itr, train_accuracy, label = "Train", marker="o", markersize=10,)
plt.plot(itr, trial_accuracy, label = "Trial", marker="o", markersize=10,)
plt.xlim(1, 1)
plt.ylim(26, 28)
plt.xlabel('SAMMER')
plt.ylabel('Accuracy')
plt.title('Accuracy in SAMMER')
plt.legend()
plt.show()
# -
# ## AdaBoost Final Results
# ### algorithm="SAMME.R",n_estimators=200
# ### Accuracy: 27.24
# +
# clf = AdaBoostClassifier(algorithm="SAMME.R",n_estimators=200, random_state=0)
# clf.fit(trainData, trainLabels)
# y_pred = clf.predict(X_test)
# y_true = testLabels
# print("Test Data:")
# accuracy = round(accuracy_score(y_pred = y_pred, y_true = y_true) * 100,2)
# print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
# print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
# y_pred = clf.predict(X_train)
# y_true = trainLabels
# print("Train Data:")
# print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
# print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
# y_pred = clf.predict(X_val)
# y_true = validationLabels
# print("Validation Data:")
# print("Accuracy % : ",round(accuracy_score(y_pred = y_pred, y_true=y_true) * 100,2))
# print(classification_report(y_true,y_pred,labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]))
# -
filename = 'finalModel_AB_NoWE_2500'
pk.dump(clf,open(filename,'wb'))
|
src/finalModels/AdaBoost/Adaboost_NoWE.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import annoy
# +
def load_data():
with open('movies.pickle', 'rb') as f:
data = pickle.load(f)
return data
data = load_data()
data
# -
# ## Annoy
class AnnoyIndex():
def __init__(self, vectors, labels):
self.dimention = vectors.shape[1]
self.vectors = vectors.astype('float32')
self.labels = labels
def build(self, number_of_trees=5):
self.index = annoy.AnnoyIndex(self.dimention)
for i, vec in enumerate(self.vectors):
self.index.add_item(i, vec.tolist())
self.index.build(number_of_trees)
def query(self, vector, k=10):
indices = self.index.get_nns_by_vector(vector.tolist(), k)
return [self.labels[i] for i in indices]
index = AnnoyIndex(data["vector"], data["name"])
index.build()
movie_vector, movie_name = data['vector'][90], data['name'][90]
simlar_movies_names = '\n* '.join(index.query(movie_vector))
print(f"The most similar movies to {movie_name} are:\n* {simlar_movies_names}")
# ## RP Forest
https://github.com/lyst/rpforest
|
Lectures/search_in_practice-approximate_nearest_neighbors/Trees and Forrests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://machinelearningmastery.com/check-point-deep-learning-models-keras/
# !wget http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data
# +
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
seed = 7
np.random.seed(seed)
# load pima indians dataset
dataset = np.loadtxt('pima-indians-diabetes.data', delimiter=',')
X = dataset[:, 0:8]
Y = dataset[:, 8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath = 'weights-improvement-{epoch:03d}-{val_acc:.2f}.hdf5'
checkpoint = ModelCheckpoint(filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10,
callbacks=callbacks_list, verbose=0)
# -
# ls *.hdf5
# ## Checkpoint Best Neural Network Model Only
# +
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
seed = 7
np.random.seed(seed)
# load pima indians dataset
dataset = np.loadtxt('pima-indians-diabetes.data', delimiter=',')
X = dataset[:, 0:8]
Y = dataset[:, 8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath = 'weights.best.hdf5'
checkpoint = ModelCheckpoint(filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10,
callbacks=callbacks_list, verbose=0)
# -
# ## Load weights
# +
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
seed = 7
np.random.seed(seed)
# create model
# model = Sequential()
# model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
# model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
# model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# model.load_weights('weights.best.hdf5')
# モデル構造も含まれるので下記でOK
model = load_model('weights.best.hdf5')
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
dataset = np.loadtxt('pima-indians-diabetes.data', delimiter=',')
X = dataset[:, 0:8]
Y = dataset[:, 8]
scores = model.evaluate(X, Y, verbose=0)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
|
keras/180102-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to size your bets - The Kelly Rule
# he Kelly rule has a long history in gambling because it provides guidance on how much to stake on each of an (infinite) sequence of bets with varying (but favorable) odds to maximize terminal wealth. It was published as A New Interpretation of the Information Rate in 1956 by <NAME> who was a colleague of <NAME> at Bell Labs. He was intrigued by bets placed on candidates at the new quiz show The $64,000 Question, where a viewer on the west coast used the three-hour delay to obtain insider information about the winners.
#
# Kelly drew a connection to Shannon's information theory to solve for the bet that is optimal for long-term capital growth when the odds are favorable, but uncertainty remains. His rule maximizes logarithmic wealth as a function of the odds of success of each game, and includes implicit bankruptcy protection since log(0) is negative infinity so that a Kelly gambler would naturally avoid losing everything.
# ## Imports
import warnings
warnings.filterwarnings('ignore')
# +
# %matplotlib inline
from pathlib import Path
import numpy as np
from numpy.linalg import inv
from numpy.random import dirichlet
import pandas as pd
from sympy import symbols, solve, log, diff
from scipy.optimize import minimize_scalar, newton, minimize
from scipy.integrate import quad
from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
# -
sns.set_style('whitegrid')
np.random.seed(42)
DATA_STORE = Path('..', 'data', 'assets.h5')
# ## The optimal size of a bet
# Kelly began by analyzing games with a binary win-lose outcome. The key variables are:
# - b: The odds define the amount won for a \\$1 bet. Odds = 5/1 implies a \\$5 gain if the bet wins, plus recovery of the \\$1 capital.
# - p: The probability defines the likelihood of a favorable outcome.
# - f: The share of the current capital to bet.
# - V: The value of the capital as a result of betting.
#
# The Kelly rule aims to maximize the value's growth rate, G, of infinitely-repeated bets (see Chapter 5 for background).
# $$G=\lim_{N\rightarrow\infty}=\frac{1}{N}\log\frac{V_N}{V_0}$$
# We can maximize the rate of growth G by maximizing G with respect to f, as illustrated using sympy as follows:
share, odds, probability = symbols('share odds probability')
Value = probability * log(1 + odds * share) + (1 - probability) * log(1 - share)
solve(diff(Value, share), share)
f, p = symbols('f p')
y = p * log(1 + f) + (1 - p) * log(1 - f)
solve(diff(y, f), f)
# ## Get S&P 500 Data
with pd.HDFStore(DATA_STORE) as store:
sp500 = store['sp500/stooq'].close
# ### Compute Returns & Standard Deviation
annual_returns = sp500.resample('A').last().pct_change().dropna().to_frame('sp500')
return_params = annual_returns.sp500.rolling(25).agg(['mean', 'std']).dropna()
return_ci = (return_params[['mean']]
.assign(lower=return_params['mean'].sub(return_params['std'].mul(2)))
.assign(upper=return_params['mean'].add(return_params['std'].mul(2))))
return_ci.plot(lw=2, figsize=(14, 8))
plt.tight_layout()
sns.despine();
# ### Kelly Rule for a Single Asset - Index Returns
# In a financial market context, both outcomes and alternatives are more complex, but the Kelly rule logic does still apply. It was made popular by <NAME>, who first applied it profitably to gambling (described in Beat the Dealer) and later started the successful hedge fund Princeton/Newport Partners.
#
# With continuous outcomes, the growth rate of capital is defined by an integrate over the probability distribution of the different returns that can be optimized numerically.
# We can solve this expression (see book) for the optimal f* using the `scipy.optimize` module:
def norm_integral(f, mean, std):
val, er = quad(lambda s: np.log(1 + f * s) * norm.pdf(s, mean, std),
mean - 3 * std,
mean + 3 * std)
return -val
def norm_dev_integral(f, mean, std):
val, er = quad(lambda s: (s / (1 + f * s)) * norm.pdf(s, mean, std), m-3*std, mean+3*std)
return val
def get_kelly_share(data):
solution = minimize_scalar(norm_integral,
args=(data['mean'], data['std']),
bounds=[0, 2],
method='bounded')
return solution.x
annual_returns['f'] = return_params.apply(get_kelly_share, axis=1)
return_params.plot(subplots=True, lw=2, figsize=(14, 8));
annual_returns.tail()
# ### Performance Evaluation
(annual_returns[['sp500']]
.assign(kelly=annual_returns.sp500.mul(annual_returns.f.shift()))
.dropna()
.loc['1900':]
.add(1)
.cumprod()
.sub(1)
.plot(lw=2));
annual_returns.f.describe()
return_ci.head()
# ### Compute Kelly Fraction
m = .058
s = .216
# Option 1: minimize the expectation integral
sol = minimize_scalar(norm_integral, args=(m, s), bounds=[0., 2.], method='bounded')
print('Optimal Kelly fraction: {:.4f}'.format(sol.x))
# Option 2: take the derivative of the expectation and make it null
x0 = newton(norm_dev_integral, .1, args=(m, s))
print('Optimal Kelly fraction: {:.4f}'.format(x0))
# ## Kelly Rule for Multiple Assets
# We will use an example with various equities. [<NAME> (2008)](https://www.amazon.com/Quantitative-Trading-Build-Algorithmic-Business/dp/0470284889) illustrates how to arrive at a multi-asset application of the Kelly Rule, and that the result is equivalent to the (potentially levered) maximum Sharpe ratio portfolio from the mean-variance optimization.
#
# The computation involves the dot product of the precision matrix, which is the inverse of the covariance matrix, and the return matrix:
with pd.HDFStore(DATA_STORE) as store:
sp500_stocks = store['sp500/stocks'].index
prices = store['quandl/wiki/prices'].adj_close.unstack('ticker').filter(sp500_stocks)
prices.info()
monthly_returns = prices.loc['1988':'2017'].resample('M').last().pct_change().dropna(how='all').dropna(axis=1)
stocks = monthly_returns.columns
monthly_returns.info()
# ### Compute Precision Matrix
cov = monthly_returns.cov()
precision_matrix = pd.DataFrame(inv(cov), index=stocks, columns=stocks)
kelly_allocation = monthly_returns.mean().dot(precision_matrix)
kelly_allocation.describe()
kelly_allocation.sum()
# ### Largest Portfolio Allocation
# The plot shows the tickers that receive an allocation weight > 5x their value:
kelly_allocation[kelly_allocation.abs()>5].sort_values(ascending=False).plot.barh(figsize=(8, 10))
plt.yticks(fontsize=12)
sns.despine()
plt.tight_layout();
# ### Performance vs SP500
# The Kelly rule does really well. But it has also been computed from historical data..
ax = monthly_returns.loc['2010':].mul(kelly_allocation.div(kelly_allocation.sum())).sum(1).to_frame('Kelly').add(1).cumprod().sub(1).plot(figsize=(14,4));
sp500.filter(monthly_returns.loc['2010':].index).pct_change().add(1).cumprod().sub(1).to_frame('SP500').plot(ax=ax, legend=True)
plt.tight_layout()
sns.despine();
|
05_strategy_evaluation/05_kelly_rule.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="ur8xi4C7S06n"
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="DHxMX0JAMELh"
# # **Purchase Prediction with AutoML Tables**
#
# <table align="left">
# <td>
# <a href="https://colab.sandbox.google.com/github/GoogleCloudPlatform/python-docs-samples/blob/master/tables/automl/notebooks/purchase_prediction/purchase_prediction.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/tables/automl/notebooks/purchase_prediction/purchase_prediction.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] colab_type="text" id="tvgnzT1CKxrO"
# ## **Overview**
#
# One of the most common use cases in Marketing is to predict the likelihood of conversion. Conversion could be defined by the marketer as taking a certain action like making a purchase, signing up for a free trial, subscribing to a newsletter, etc. Knowing the likelihood that a marketing lead or prospect will ‘convert’ can enable the marketer to target the lead with the right marketing campaign. This could take the form of remarketing, targeted email campaigns, online offers or other treatments.
#
# Here we demonstrate how you can use BigQuery and AutoML Tables to build a supervised binary classification model for purchase prediction.
# + [markdown] colab_type="text" id="sukxx8RLSjRr"
# ### **Dataset**
# + [markdown] colab_type="text" id="mmn5rn7kScSt"
# The model uses a real dataset from the [Google Merchandise store](https://www.googlemerchandisestore.com/) consisting of Google Analytics web sessions.
#
# The goal here is to predict the likelihood of a web visitor visiting the online Google Merchandise Store making a purchase on the website during that Google Analytics session. Past web interactions of the user on the store website in addition to information like browser details and geography are used to make this prediction.
#
# This is framed as a binary classification model, to label a user during a session as either true (makes a purchase) or false (does not make a purchase). Dataset Details The dataset consists of a set of tables corresponding to Google Analytics sessions being tracked on the Google Merchandise Store. Each table is a single day of GA sessions. More details around the schema can be seen here.
#
# You can access the data on BigQuery [here](https://support.google.com/analytics/answer/3437719?hl=en&ref_topic=3416089).
# + [markdown] colab_type="text" id="SLq3FfRa8E8X"
# ### **Costs**
#
# + [markdown] colab_type="text" id="DzxIfOrB71wl"
# This tutorial uses billable components of Google Cloud Platform (GCP):
#
# * Cloud AI Platform
# * Cloud Storage
# * BigQuery
# * AutoML Tables
#
# Learn about [Cloud AI Platform pricing](https://cloud.google.com/ml-engine/docs/pricing), [Cloud Storage pricing](https://cloud.google.com/storage/pricing), [BigQuery pricing](https://cloud.google.com/bigquery/pricing) and [AutoML Tables pricing](https://cloud.google.com/automl-tables/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage.
# + [markdown] colab_type="text" id="ze4-nDLfK4pw"
# ## Set up your local development environment
#
# **If you are using Colab or AI Platform Notebooks**, your environment already meets
# all the requirements to run this notebook. If you are using **AI Platform Notebook**, make sure the machine configuration type is **4 vCPU, 15 GB RAM** or above. You can skip this step.
# + [markdown] colab_type="text" id="gCuSR8GkAgzl"
# **Otherwise**, make sure your environment meets this notebook's requirements.
# You need the following:
#
# * The Google Cloud SDK
# * Git
# * Python 3
# * virtualenv
# * Jupyter notebook running in a virtual environment with Python 3
#
# The Google Cloud guide to [Setting up a Python development
# environment](https://cloud.google.com/python/setup) and the [Jupyter
# installation guide](https://jupyter.org/install) provide detailed instructions
# for meeting these requirements. The following steps provide a condensed set of
# instructions:
#
# 1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
#
# 2. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
#
# 3. [Install
# virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
# and create a virtual environment that uses Python 3.
#
# 4. Activate that environment and run `pip install jupyter` in a shell to install
# Jupyter.
#
# 5. Run `jupyter notebook` in a shell to launch Jupyter.
#
# 6. Open this notebook in the Jupyter Notebook Dashboard.
# + [markdown] colab_type="text" id="BF1j6f9HApxa"
# ## **Set up your GCP project**
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a GCP project.](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the AI Platform APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
#
# 4. [Enable AutoML API.](https://console.cloud.google.com/apis/library/automl.googleapis.com?q=automl)
#
# + [markdown] colab_type="text" id="i7EUnXsZhAGF"
# ## **PIP Install Packages and dependencies**
#
# Install addional dependencies not installed in Notebook environment
# + colab={} colab_type="code" id="n2kLhBBRvdog"
# ! pip install --upgrade --quiet --user google-cloud-automl
# ! pip install --upgrade --quiet --user google-cloud-bigquery
# ! pip install --upgrade --quiet --user google-cloud-storage
# ! pip install --upgrade --quiet --user matplotlib
# ! pip install --upgrade --quiet --user pandas
# ! pip install --upgrade --quiet --user pandas-gbq
# ! pip install --upgrade --quiet --user gcsfs
# + [markdown] colab_type="text" id="kK5JATKPNf3I"
# **Note:** Try installing using `sudo`, if the above command throw any permission errors.
# + [markdown] colab_type="text" id="f-YlNVLTYXXN"
# `Restart` the kernel to allow automl_v1beta1 to be imported for Jupyter Notebooks.
#
# + colab={} colab_type="code" id="C16j_LPrYbZa"
from IPython.core.display import HTML
HTML("<script>Jupyter.notebook.kernel.restart()</script>")
# + [markdown] colab_type="text" id="tPXmVHerC58T"
# ## **Set up your GCP Project Id**
#
# Enter your `Project Id` in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
# + colab={} colab_type="code" id="2hI1ChtyvXa4"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
COMPUTE_REGION = "us-central1" # Currently only supported region.
# + [markdown] colab_type="text" id="dr--iN2kAylZ"
# ## **Authenticate your GCP account**
#
# **If you are using AI Platform Notebooks**, your environment is already
# authenticated. Skip this step.
# + [markdown] colab_type="text" id="3yyVCJHFSEKG"
# Otherwise, follow these steps:
#
# 1. In the GCP Console, go to the [**Create service account key**
# page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
#
# 2. From the **Service account** drop-down list, select **New service account**.
#
# 3. In the **Service account name** field, enter a name.
#
# 4. From the **Role** drop-down list, select
# **AutoML > AutoML Admin**,
# **Storage > Storage Admin** and **BigQuery > BigQuery Admin**.
#
# 5. Click *Create*. A JSON file that contains your key downloads to your
# local environment.
# + [markdown] colab_type="text" id="Yt6PhVG0UdF1"
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + colab={} colab_type="code" id="q5TeVHKDMOJF"
import sys
# Upload the downloaded JSON file that contains your key.
if 'google.colab' in sys.modules:
from google.colab import files
keyfile_upload = files.upload()
keyfile = list(keyfile_upload.keys())[0]
# %env GOOGLE_APPLICATION_CREDENTIALS $keyfile
# ! gcloud auth activate-service-account --key-file $keyfile
# + [markdown] colab_type="text" id="d1bnPeDVMR5Q"
# ***If you are running the notebook locally***, enter the path to your service account key as the `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell
# + colab={} colab_type="code" id="fsVNKXESYoeQ"
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
# %env GOOGLE_APPLICATION_CREDENTIALS /path/to/service/account
# ! gcloud auth activate-service-account --key-file '/path/to/service/account'
# + [markdown] colab_type="text" id="zgPO1eR3CYjk"
# ## **Create a Cloud Storage bucket**
#
# **The following steps are required, regardless of your notebook environment.**
#
# When you submit a training job using the Cloud SDK, you upload a Python package
# containing your training code to a Cloud Storage bucket. AI Platform runs
# the code from this package. In this tutorial, AI Platform also saves the
# trained model that results from your job in the same bucket. You can then
# create an AI Platform model version based on this output in order to serve
# online predictions.
#
# Set the name of your Cloud Storage bucket below. It must be unique across all
# Cloud Storage buckets.
#
# You may also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Make sure to [choose a region where Cloud
# AI Platform services are
# available](https://cloud.google.com/ml-engine/docs/tensorflow/regions). You may
# not use a Multi-Regional Storage bucket for training with AI Platform.
# + cellView="both" colab={} colab_type="code" id="MzGDU7TWdts_"
BUCKET_NAME = "[your-bucket-name]" #@param {type:"string"}
# + [markdown] colab_type="text" id="-EcIXiGsCePi"
# **Only if your bucket doesn't exist**: Run the following cell to create your Cloud Storage bucket. Make sure Storage > Storage Admin role is enabled
# + colab={} colab_type="code" id="NIq7R4HZCfIc"
# ! gsutil mb -p $PROJECT_ID -l $COMPUTE_REGION gs://$BUCKET_NAME
# + [markdown] colab_type="text" id="ucvCsknMCims"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + colab={} colab_type="code" id="vhOb7YnwClBb"
# ! gsutil ls -al gs://$BUCKET_NAME
# + [markdown] colab_type="text" id="XoEqT2Y4DJmf"
# ## **Import libraries and define constants**
# + [markdown] colab_type="text" id="wkJe8sD-EoTE"
# Import relevant packages.
# + colab={} colab_type="code" id="Cj-pbWdxEtZM"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# + colab={} colab_type="code" id="6HT8yR2Cvd0a"
# AutoML library.
from google.cloud import automl_v1beta1 as automl
import google.cloud.automl_v1beta1.proto.data_types_pb2 as data_types
from google.cloud import bigquery
from google.cloud import storage
# + colab={} colab_type="code" id="YPTWUWT0E32J"
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import numpy as np
from sklearn import metrics
# + [markdown] colab_type="text" id="MEqIjz0PFCVO"
# Populate the following cell with the necessary constants and run it to initialize constants.
# + colab={} colab_type="code" id="iXC9vCBrGTKE"
#@title Constants { vertical-output: true }
# A name for the AutoML tables Dataset to create.
DATASET_DISPLAY_NAME = 'purchase_prediction' #@param {type: 'string'}
# A name for the file to hold the nested data.
NESTED_CSV_NAME = 'FULL.csv' #@param {type: 'string'}
# A name for the file to hold the unnested data.
UNNESTED_CSV_NAME = 'FULL_unnested.csv' #@param {type: 'string'}
# A name for the input train data.
TRAINING_CSV = 'training_unnested_balanced_FULL' #@param {type: 'string'}
# A name for the input validation data.
VALIDATION_CSV = 'validation_unnested_FULL' #@param {type: 'string'}
# A name for the AutoML tables model to create.
MODEL_DISPLAY_NAME = 'model_1' #@param {type:'string'}
assert all([
PROJECT_ID,
COMPUTE_REGION,
DATASET_DISPLAY_NAME,
MODEL_DISPLAY_NAME,
])
# + [markdown] colab_type="text" id="X6xxcNmOGjtY"
# Initialize client for AutoML, AutoML Tables, BigQuery and Storage.
# + colab={} colab_type="code" id="0y3EourAGWmf"
# Initialize the clients.
automl_client = automl.AutoMlClient()
tables_client = automl.TablesClient(project=PROJECT_ID, region=COMPUTE_REGION)
bq_client = bigquery.Client()
storage_client = storage.Client()
# + [markdown] colab_type="text" id="xdJykMXDozoP"
# ## **Test the set up**
#
# To test whether your project set up and authentication steps were successful, run the following cell to list your datasets in this project.
#
# If no dataset has previously imported into AutoML Tables, you shall expect an empty return.
# + colab={} colab_type="code" id="_dKylOQTpF58"
# List the datasets.
list_datasets = tables_client.list_datasets()
datasets = { dataset.display_name: dataset.name for dataset in list_datasets }
datasets
# + [markdown] colab_type="text" id="dleTdOMaplSM"
# You can also print the list of your models by running the following cell.
#
# If no model has previously trained using AutoML Tables, you shall expect an empty return.
#
# + colab={} colab_type="code" id="tMXP6no1pn9p"
# List the models.
list_models = tables_client.list_models()
models = { model.display_name: model.name for model in list_models }
models
# + [markdown] colab_type="text" id="Z0g-D23HYX9A"
# ##**Transformation and Feature Engineering Functions**
#
# The data cleaning and transformation step was by far the most involved. It includes a few sections that create an AutoML tables dataset, pull the Google merchandise store data from BigQuery, transform the data, and save it multiple times to csv files in google cloud storage.
#
# The dataset that is made viewable in the AutoML Tables UI. It will eventually hold the training data after that training data is cleaned and transformed.
#
# This dataset has only around 1% of its values with a positive label value of True i.e. cases when a transaction was made. This is a class imbalance problem. There are several ways to handle class imbalance. We chose to oversample the positive class by random over sampling. This resulted in an artificial increase in the sessions with the positive label of true transaction value.
#
# There were also many columns with either all missing or all constant values. These columns would not add any signal to our model, so we dropped them.
#
# There were also columns with NaN rather than 0 values. For instance, rather than having a count of 0, a column might have a null value. So we added code to change some of these null values to 0, specifically in our target column, in which null values were not allowed by AutoML Tables. However, AutoML Tables can handle null values for the features.
# + [markdown] colab_type="text" id="5lqd8kOlYeYx"
# **Feature Engineering**
#
# The dataset had rich information on customer location and behavior; however, it can be improved by performing feature engineering. Moreover, there was a concern about data leakage. The decision to do feature engineering, therefore, had two contributing motivations: remove data leakage without too much loss of useful data, and to improve the signal in our data.
#
# **Weekdays**
#
# The date seemed like a useful piece of information to include, as it could capture seasonal effects. Unfortunately, we only had one year of data, so seasonality on an annual scale would be difficult (read impossible) to incorporate. Fortunately, we could try and detect seasonal effects on a micro, with perhaps equally informative results. We ended up creating a new column of weekdays out of dates, to denote which day of the week the session was held on. This new feature turned out to have some useful predictive power, when added as a variable into our model.
#
# **Data Leakage**
#
# The marginal gain from adding a weekday feature, was overshadowed by the concern of data leakage in our training data. In the initial naive models we trained, we got outstanding results. So outstanding that we knew that something must be going on. As it turned out, quite a few features functioned as proxies for the feature we were trying to predict: meaning some of the features we conditioned on to build the model had an almost 1:1 correlation with the target feature. Intuitively, this made sense.
#
# One feature that exhibited this behavior was the number of page views a customer made during a session. By conditioning on page views in a session, we could very reliably predict which customer sessions a purchase would be made in. At first this seems like the golden ticket, we can reliably predict whether or not a purchase is made! The catch: the full page view information can only be collected at the end of the session, by which point we would also have whether or not a transaction was made. Seen from this perspective, collecting page views at the same time as collecting the transaction information would make it pointless to predict the transaction information using the page views information, as we would already have both. One solution was to drop page views as a feature entirely. This would safely stop the data leakage, but we would lose some critically useful information. Another solution, (the one we ended up going with), was to track the page view information of all previous sessions for a given customer, and use it to inform the current session. This way, we could use the page view information, but only the information that we would have before the session even began. So we created a new column called previous_views, and populated it with the total count of all previous page views made by the customer in all previous sessions. We then deleted the page views feature, to stop the data leakage.
#
# Our rationale for this change can be boiled down to the concise heuristic: only use the information that is available to us on the first click of the session. Applying this reasoning, we performed similar data engineering on other features which we found to be proxies for the label feature. We also refined our objective in the process: For a visit to the Google Merchandise store, what is the probability that a customer will make a purchase, and can we calculate this probability the moment the customer arrives? By clarifying the question, we both made the result more powerful/useful, and eliminated the data leakage that threatened to make the predictive power trivial.
# + colab={} colab_type="code" id="BVIYkceJUjCz"
def balanceTable(table):
# class count.
count_class_false, count_class_true = table.totalTransactionRevenue\
.value_counts()
# divide by class.
table_class_false = table[table["totalTransactionRevenue"]==False]
table_class_true = table[table["totalTransactionRevenue"]==True]
# random over-sampling.
table_class_true_over = table_class_true.sample(
count_class_false, replace=True)
table_test_over = pd.concat([table_class_false, table_class_true_over])
return table_test_over
# + colab={} colab_type="code" id="pBMg-NHTUnMU"
def partitionTable(table, dt=20170500):
# The automl tables model could be training on future data and implicitly learning about past data in the testing
# dataset, this would cause data leakage. To prevent this, we are training only with the first 9 months of data (table1)
# and doing validation with the last three months of data (table2).
table1 = table[table["date"]<=dt].copy(deep=False)
table2 = table[table["date"]>dt].copy(deep=False)
return table1, table2
# + colab={} colab_type="code" id="smziJuelUqbC"
def N_updatePrevCount(table, new_column, old_column):
table = table.fillna(0)
table[new_column] = 1
table.sort_values(by=['fullVisitorId','date'])
table[new_column] = table.groupby(['fullVisitorId'])[old_column].apply(
lambda x: x.cumsum())
table.drop([old_column], axis=1, inplace=True)
return table
# + colab={} colab_type="code" id="vQ4Hlhg2Uu49"
def N_updateDate(table):
table['weekday'] = 1
table['date'] = pd.to_datetime(table['date'].astype(str), format='%Y%m%d')
table['weekday'] = table['date'].dt.dayofweek
return table
# + colab={} colab_type="code" id="anX4rrFSUxlF"
def change_transaction_values(table):
table['totalTransactionRevenue'] = table['totalTransactionRevenue'].fillna(0)
table['totalTransactionRevenue'] = table['totalTransactionRevenue'].apply(
lambda x: x!=0)
return table
# + colab={} colab_type="code" id="RRLNtUbfv3pj"
def saveTable(table, csv_file_name, bucket_name):
table.to_csv(csv_file_name, index=False)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(csv_file_name)
blob.upload_from_filename(filename=csv_file_name)
# + [markdown] colab_type="text" id="T1I1dkSAU73g"
# ##**Getting training data**
#
#
# + [markdown] colab_type="text" id="-qfwBGWIB5Nm"
#
# If you are using **Colab** the memory may not be sufficient enough to generate Nested and Unnested data using the queries. In this case, you can directly download the unnested data **FULL_unnested.csv** from [here](https://storage.cloud.google.com/cloud-ml-data/automl-tables/notebooks/trial_for_c4m/FULL_unnested.csv) and upload the file manually to GCS bucket that was created in the previous steps `(BUCKET_NAME)`.
# + [markdown] colab_type="text" id="swgcbjAGLgsl"
# *If* you are using **AI Platform Notebook or Local environment**, run the following code
# + colab={} colab_type="code" id="5CDSXB-Fv3jb"
# Save table.
query = """
SELECT
date,
device,
geoNetwork,
totals,
trafficSource,
fullVisitorId
FROM
`bigquery-public-data.google_analytics_sample.ga_sessions_*`
WHERE
_TABLE_SUFFIX BETWEEN FORMAT_DATE('%Y%m%d',DATE_SUB('2017-08-01', INTERVAL 366 DAY)) AND
FORMAT_DATE('%Y%m%d',DATE_SUB('2017-08-01', INTERVAL 1 DAY))
"""
df = bq_client.query(query).to_dataframe()
print(df.iloc[:3])
saveTable(df, NESTED_CSV_NAME, BUCKET_NAME)
# + colab={} colab_type="code" id="pTHwOgw8ArcA"
# Unnest the Data.
nested_gcs_uri = 'gs://{}/{}'.format(BUCKET_NAME, NESTED_CSV_NAME)
table = pd.read_csv(nested_gcs_uri, low_memory=False)
column_names = ['device', 'geoNetwork','totals', 'trafficSource']
for name in column_names:
print(name)
table[name] = table[name].apply(lambda i: dict(eval(i)))
temp = table[name].apply(pd.Series)
table = pd.concat([table, temp], axis=1).drop(name, axis=1)
# need to drop a column.
table.drop(['adwordsClickInfo'], axis=1, inplace=True)
saveTable(table, UNNESTED_CSV_NAME, BUCKET_NAME)
# + [markdown] colab_type="text" id="1UL8YqzdWXeu"
# ### **Run the Transformations**
# + colab={} colab_type="code" id="JJ84Zs68wN3X"
# Run the transformations.
unnested_gcs_uri = 'gs://{}/{}'.format(BUCKET_NAME, UNNESTED_CSV_NAME)
table = pd.read_csv(unnested_gcs_uri, low_memory=False)
consts = ['transactionRevenue', 'transactions', 'adContent', 'browserSize',
'campaignCode', 'cityId', 'flashVersion', 'javaEnabled', 'language',
'latitude', 'longitude', 'mobileDeviceBranding', 'mobileDeviceInfo',
'mobileDeviceMarketingName','mobileDeviceModel','mobileInputSelector',
'networkLocation', 'operatingSystemVersion', 'screenColors',
'screenResolution', 'screenviews', 'sessionQualityDim',
'timeOnScreen', 'visits', 'uniqueScreenviews', 'browserVersion',
'referralPath','fullVisitorId', 'date']
table = N_updatePrevCount(table, 'previous_views', 'pageviews')
table = N_updatePrevCount(table, 'previous_hits', 'hits')
table = N_updatePrevCount(table, 'previous_timeOnSite', 'timeOnSite')
table = N_updatePrevCount(table, 'previous_Bounces', 'bounces')
table = change_transaction_values(table)
# + colab={} colab_type="code" id="mTdp0V1wnPer"
table1, table2 = partitionTable(table)
table1 = N_updateDate(table1)
table2 = N_updateDate(table2)
table1.drop(consts, axis=1, inplace=True)
table2.drop(consts, axis=1, inplace=True)
saveTable(table2,'{}.csv'.format(VALIDATION_CSV), BUCKET_NAME)
table1 = balanceTable(table1)
# training_unnested_FULL.csv = the first 9 months of data.
saveTable(table1, '{}.csv'.format(TRAINING_CSV), BUCKET_NAME)
# + [markdown] colab_type="text" id="8ZpdDzvPP3Gr"
# ## **Import Training Data**
#
# Select a dataset display name and pass your table source information to create a new dataset.
# + [markdown] colab_type="text" id="SZy-Idpsdn2_"
# #### **Create Dataset**
# + colab={} colab_type="code" id="ZaKxxQTevuV7"
# Create dataset.
dataset = tables_client.create_dataset(
dataset_display_name=DATASET_DISPLAY_NAME)
dataset_name = dataset.name
dataset
# + [markdown] colab_type="text" id="-6ujokeldxof"
# #### **Import Data**
# + colab={} colab_type="code" id="VDcwd-tswNxn"
# Read the data source from GCS.
dataset_gcs_input_uris = ['gs://{}/{}.csv'.format(BUCKET_NAME, TRAINING_CSV)]
import_data_response = tables_client.import_data(
dataset=dataset,
gcs_input_uris=dataset_gcs_input_uris
)
print('Dataset import operation: {}'.format(import_data_response.operation))
# Synchronous check of operation status. Wait until import is done.
print('Dataset import response: {}'.format(import_data_response.result()))
# Verify the status by checking the example_count field.
dataset = tables_client.get_dataset(dataset_name=dataset_name)
dataset
# + [markdown] colab_type="text" id="uXpSJ3T-S1xx"
# ## **Review the specs**
# Run the following command to see table specs such as row count.
# + colab={} colab_type="code" id="XQHzt60WwNhI"
# List table specs.
list_table_specs_response = tables_client.list_table_specs(dataset=dataset)
table_specs = [s for s in list_table_specs_response]
# List column specs.
list_column_specs_response = tables_client.list_column_specs(dataset=dataset)
column_specs = {s.display_name: s for s in list_column_specs_response}
# Print Features and data_type.
features = [(key, data_types.TypeCode.Name(value.data_type.type_code))
for key, value in column_specs.items()]
print('Feature list:\n')
for feature in features:
print(feature[0],':', feature[1])
# + colab={} colab_type="code" id="_9AIZL9xTIPV"
# Table schema pie chart.
type_counts = {}
for column_spec in column_specs.values():
type_name = data_types.TypeCode.Name(column_spec.data_type.type_code)
type_counts[type_name] = type_counts.get(type_name, 0) + 1
plt.pie(x=type_counts.values(), labels=type_counts.keys(), autopct='%1.1f%%')
plt.axis('equal')
plt.show()
# + [markdown] colab_type="text" id="gOeAP21SWrl1"
# ##**Update dataset: assign a label column and enable nullable columns**
# AutoML Tables automatically detects your data column type. Depending on the type of your label column, AutoML Tables chooses to run a classification or regression model. If your label column contains only numerical values, but they represent categories, change your label column type to categorical by updating your schema.
# + [markdown] colab_type="text" id="8g5I3Ua-Sheq"
# ### **Update a column: set to not nullable**
#
# + colab={} colab_type="code" id="pZzF09ogwiu_"
# Update column.
column_spec_display_name = 'totalTransactionRevenue' #@param {type: 'string'}
update_column_response = tables_client.update_column_spec(
dataset=dataset,
column_spec_display_name=column_spec_display_name,
nullable=False,
)
update_column_response
# + [markdown] colab_type="text" id="KZQftXACy21j"
# **Tip:** You can use kwarg `type_code='CATEGORY'` in the preceding `update_column_spec(..)` call to convert the column data type from `FLOAT64` to `CATEGORY`.
# + [markdown] colab_type="text" id="y1NpM6k7XEDm"
# ###**Update dataset: assign a target column**
# + colab={} colab_type="code" id="714Fydm8winh"
# Assign target column.
column_spec_display_name = 'totalTransactionRevenue' #@param {type: 'string'}
update_dataset_response = tables_client.set_target_column(
dataset=dataset,
column_spec_display_name=column_spec_display_name,
)
update_dataset_response
# + [markdown] colab_type="text" id="9jzfkZGVeZUA"
# ##**Creating a model**
# + [markdown] colab_type="text" id="Cb7KjMuzXRNq"
# ####**Train a model**
#
# To create the datasets for training, validation and testing, we first had to consider what kind of data we were dealing with. The data we had keeps track of all customer sessions with the Google Merchandise store over a year. AutoML tables does its own training and testing, and delivers a quite nice UI to view the results in. For the training and validation dataset then, we simply used the over sampled, balanced dataset created by the transformations described above. But we first partitioned the dataset to include the first 9 months in one table and the last 3 in another. This allowed us to train and validate with an entirely different dataset that what we used to test.
#
# Moreover, we held off on oversampling for the validation dataset, to not bias the data that we would ultimately use to judge the success of our model.
#
# The decision to divide the sessions along time was made to avoid the model training on future data to predict past data. (This can be avoided with a datetime variable in the dataset and by toggling a button in the UI)
#
# Training the model may take one hour or more. The following cell keeps running until the training is done. If your Colab times out, use `client.list_models()` to check whether your model has been created. Then use model name to continue to the next steps. Run the following command to retrieve your model. Replace `model_name` with its actual value.
#
# model = client.get_model(model_name=model_name)
#
# Note that we trained on the first 9 months of data and we validate using the last 3.
#
# For demonstration purpose, the following command sets the budget as 1 node hour `('train_budget_milli_node_hours': 1000)`. You can increase that number up to a maximum of 72 hours `('train_budget_milli_node_hours': 72000)` for the best model performance.
#
# Even with a budget of 1 node hour (the minimum possible budget), training a model can take more than the specified node hours.
#
# You can also select the objective to optimize your model training by setting optimization_objective. This solution optimizes the model by using default optimization objective. Refer [link](https://cloud.google.com/automl-tables/docs/train#opt-obj) for more details.
# + colab={} colab_type="code" id="HB3ZX_BMwiep"
# The number of hours to train the model.
model_train_hours = 1 #@param {type:'integer'}
create_model_response = tables_client.create_model(
MODEL_DISPLAY_NAME,
dataset=dataset,
train_budget_milli_node_hours=model_train_hours*1000,
)
operation_id = create_model_response.operation.name
print('Create model operation: {}'.format(create_model_response.operation))
# + colab={} colab_type="code" id="y3J0reWbTsrW"
# Wait until model training is done.
model = create_model_response.result()
model_name = model.name
model
# + [markdown] colab_type="text" id="s9rUSDDQXse3"
# ##**Make a prediction**
# In this section, we take our validation data prediction results and plot the Precision Recall curve and the ROC curve of both the false and true predictions.
#
# There are two different prediction modes: online and batch. The following cell shows you how to make a batch prediction.
# + cellView="both" colab={} colab_type="code" id="OJ3DPwzkwiOe"
#@title Start batch prediction { vertical-output: true }
batch_predict_gcs_input_uris = ['gs://{}/{}.csv'.format(BUCKET_NAME, VALIDATION_CSV)] #@param {type:'string'}
batch_predict_gcs_output_uri_prefix = 'gs://{}'.format(BUCKET_NAME) #@param {type:'string'}
batch_predict_response = tables_client.batch_predict(
model=model,
gcs_input_uris=batch_predict_gcs_input_uris,
gcs_output_uri_prefix=batch_predict_gcs_output_uri_prefix,
)
print('Batch prediction operation: {}'.format(batch_predict_response.operation))
# Wait until batch prediction is done.
batch_predict_result = batch_predict_response.result()
batch_predict_response.metadata
# + [markdown] colab_type="text" id="S4aNtFCPX9Ew"
# ##**Evaluate your prediction**
# The follow cell creates a Precision Recall curve and a ROC curve for both the true and false classifications.
# + colab={} colab_type="code" id="IOeudrAvdreq"
def invert(x):
return 1-x
def switch_label(x):
return(not x)
# + colab={} colab_type="code" id="OdtcQU5kVkem"
batch_predict_results_location = batch_predict_response.metadata\
.batch_predict_details.output_info\
.gcs_output_directory
table = pd.read_csv('{}/tables_1.csv'.format(batch_predict_results_location))
y = table["totalTransactionRevenue"]
scores = table["totalTransactionRevenue_True_score"]
scores_invert = table['totalTransactionRevenue_False_score']
# + colab={} colab_type="code" id="_tYEgv_IeL3T"
# code for ROC curve, for true values.
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area=%0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for True')
plt.legend(loc="lower right")
plt.show()
# + colab={} colab_type="code" id="RAWpzQjReQxk"
# code for ROC curve, for false values.
plt.figure()
lw = 2
label_invert = y.apply(switch_label)
fpr, tpr, thresholds = metrics.roc_curve(label_invert, scores_invert)
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area=%0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for False')
plt.legend(loc="lower right")
plt.show()
# + colab={} colab_type="code" id="dcoUEakxeXKe"
# code for PR curve, for true values.
precision, recall, thresholds = metrics.precision_recall_curve(y, scores)
plt.figure()
lw = 2
plt.plot( recall, precision, color='darkorange',
lw=lw, label='Precision recall curve for True')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall Curve for True')
plt.legend(loc="lower right")
plt.show()
# + cellView="both" colab={} colab_type="code" id="wx-hFytjwiLJ"
# code for PR curve, for false values.
precision, recall, thresholds = metrics.precision_recall_curve(
label_invert, scores_invert)
print(precision.shape)
print(recall.shape)
plt.figure()
lw = 2
plt.plot( recall, precision, color='darkorange',
label='Precision recall curve for False')
plt.xlim([0.0, 1.1])
plt.ylim([0.0, 1.1])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall Curve for False')
plt.legend(loc="lower right")
plt.show()
# + [markdown] colab_type="text" id="HAivzUjcVJgT"
# ## **Cleaning up**
#
# To clean up all GCP resources used in this project, you can [delete the GCP
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
# + colab={} colab_type="code" id="sx_vKniMq9ZX"
# Delete model resource.
tables_client.delete_model(model_name=model_name)
# Delete dataset resource.
tables_client.delete_dataset(dataset_name=dataset_name)
# Delete Cloud Storage objects that were created.
# ! gsutil -m rm -r gs://$BUCKET_NAME
# If training model is still running, cancel it.
automl_client.transport._operations_client.cancel_operation(operation_id)
|
tables/automl/notebooks/purchase_prediction/purchase_prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datetime import datetime
import mxnet as mx
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import nn, rnn, utils as gutils
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
from data import load_data
from net import EXAM
from eval import evaluate
# -
# params
ctx = [mx.cpu(0)]
batch_size = 1200
X_num = 1e5 # num of data, -1 means all
feature_num = 50 # time step or sentence len
hidden_size = 1024
test_num = int(1e4)
epoch = 500
lr = 0.001
opt = 'adam'
if_label_embed = True # use EXAM_alter if True
if_log = True # save result
log_columns = ['loss', 'train_P@1', 'train_P@3', 'train_P@5', 'test_P@1', 'test_P@3', 'test_P@5']
log_path = './log/'
data_base_path = './'
# +
if(if_label_embed):
train_data_loader, test_data_loader, embed, label_embed = load_data(X_num=X_num,
feature_num=feature_num, test_num=test_num, batch_size=batch_size,
data_base_path=data_base_path, if_label_embed=if_label_embed, if_log=if_log,
ctx=ctx[0])
net = EXAM.EXAM_alter(feature_num, embed, label_embed, hidden_size=hidden_size)
else:
train_data_loader, test_data_loader, embed, label_num = load_data(X_num=X_num,
feature_num=feature_num, test_num=test_num, batch_size=batch_size,
data_base_path=data_base_path, if_label_embed=if_label_embed, if_log=if_log,
ctx=ctx[0])
net = EXAM.EXAM(feature_num, embed, label_num, hidden_size=hidden_size)
# +
net.initialize(init=init.Xavier(), ctx=ctx)
net.embed.weight.set_data(embed.idx_to_vec)
if(if_label_embed):
net.label_embed.set_data(label_embed)
loss = gluon.loss.SigmoidBCELoss()
trainer = gluon.Trainer(net.collect_params(), opt, {'learning_rate': lr})
# +
log = pd.DataFrame(columns=log_columns)
train_loss, train_p1, train_p3, train_p5 = 0, 0, 0, 0
for e in tqdm(range(1, epoch + 1), desc='train'):
train_loss = 0
for batch_idx, (X_batch, y_batch) in tqdm(enumerate(train_data_loader),
desc='train batch'):
_batch_size=X_batch.shape[0]
X_batch = gutils.split_and_load(X_batch, ctx, even_split=False)
y_batch = gutils.split_and_load(y_batch, ctx, even_split=False)
with autograd.record():
ls = [loss(net(_X), _y)
for _X, _y in zip(X_batch, y_batch)]
for l in ls:
l.backward()
train_loss += l.sum().as_in_context(mx.cpu()).asscalar()
trainer.step(batch_size=_batch_size)
nd.waitall()
train_p1, train_p3, train_p5 = evaluate(net, train_data_loader, ctx=ctx[-1])
test_p1, test_p3, test_p5 = evaluate(net, test_data_loader, ctx=ctx[-1])
print('\n\n\nepoch %d:\tloss %.4f' %(e, train_loss))
print('train_p@1\t%.3f\t\ttrain_p@3\t%.3f\t\ttrain_p@5\t%.3f' %(train_p1, train_p3, train_p5))
print('test_p@1\t%.3f\t\ttest_p@3\t%.3f\t\ttest_p@5\t%.3f' %(test_p1, test_p3, test_p5))
if(if_log):
_log = [[train_loss, train_p1, train_p3, train_p5, test_p1, test_p3, test_p5]]
_log = pd.DataFrame(_log,
columns=log_columns)
log = log.append(_log)
# -
if(if_log):
log_count = 1
for file in glob.glob('*.csv'):
log_count += 1
with open(log_path + str(log_count) + '_params.txt', 'w') as log_params:
log_params.write(f'batch_size = {batch_size}\nX_num = {X_num}\nfeature_num = {feature_num}\n' +
f'hidden_size = {hidden_size}\ntest_num = {test_num}\nepoch = {epoch}\n' +
f'lr = {lr}\nopt = {opt}\nif_label_embed = {if_label_embed}\n')
if(if_label_embed):
log.to_csv(log_path + str(log_count) + '_result-alter-' + str(datetime.now()) + '.csv',
encoding='utf-8', index=False)
else:
log.to_csv(log_path + str(log_count) + '_result-' + str(datetime.now()) + '.csv',
encoding='utf-8', index=False)
|
exam.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Helpdesk Hard
import getpass
import psycopg2
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
pwd = getpass.getpass()
engine = create_engine(
'postgresql+psycopg2://postgres:%s@localhost/sqlzoo' % (pwd))
pd.set_option('display.max_rows', 100)
shift = pd.read_sql_table('Shift', engine)
staff = pd.read_sql_table('Staff', engine)
issue = pd.read_sql_table('Issue', engine)
shift_type = pd.read_sql_table('Shift_type', engine)
level = pd.read_sql_table('Level', engine)
customer = pd.read_sql_table('Customer', engine)
caller = pd.read_sql_table('Caller', engine)
# ## 11.
# Show the manager and number of calls received for each hour of the day on 2017-08-12
#
# ```
# +---------+---------------+----+
# | Manager | Hr | cc |
# +---------+---------------+----+
# | LB1 | 2017-08-12 08 | 6 |
# | LB1 | 2017-08-12 09 | 16 |
# | LB1 | 2017-08-12 10 | 11 |
# | LB1 | 2017-08-12 11 | 6 |
# | LB1 | 2017-08-12 12 | 8 |
# | LB1 | 2017-08-12 13 | 4 |
# | AE1 | 2017-08-12 14 | 12 |
# | AE1 | 2017-08-12 15 | 8 |
# | AE1 | 2017-08-12 16 | 8 |
# | AE1 | 2017-08-12 17 | 7 |
# | AE1 | 2017-08-12 19 | 5 |
# +---------+---------------+----+
# ```
(issue.assign(date_call=issue['Call_date'].dt.date)
.assign(hr=issue['Call_date'].dt.strftime('%Y-%m-%d %H'))
.astype({'date_call': 'datetime64'})
.query('date_call=="2017-08-12"')
.merge(shift, left_on=['Taken_by', 'date_call'],
right_on=['Operator', 'Shift_date'])
[['Manager', 'hr', 'Call_ref']]
.groupby(['Manager', 'hr'])
.count().reset_index()
.sort_values('hr'))
# ## 12.
# **80/20 rule. It is said that 80% of the calls are generated by 20% of the callers. Is this true? What percentage of calls are generated by the most active 20% of callers.**
#
# Note - Andrew has not managed to do this in one query - but he believes it is possible.
#
# ```
# +---------+
# | t20pc |
# +---------+
# | 32.2581 |
# +---------+
# ```
# ## 13.
# **Annoying customers. Customers who call in the last five minutes of a shift are annoying. Find the most active customer who has never been annoying.**
#
# ```
# +--------------+------+
# | Company_name | abna |
# +--------------+------+
# | High and Co. | 20 |
# +--------------+------+
# ```
# ## 14.
# **Maximal usage. If every caller registered with a customer makes a call in one day then that customer has "maximal usage" of the service. List the maximal customers for 2017-08-13.**
#
# ```
# +-------------------+--------------+-------------+
# | company_name | caller_count | issue_count |
# +-------------------+--------------+-------------+
# | Askew Inc. | 2 | 2 |
# | Bai Services | 2 | 2 |
# | Dasher Services | 3 | 3 |
# | High and Co. | 5 | 5 |
# | Lady Retail | 4 | 4 |
# | Packman Shipping | 3 | 3 |
# | Pitiable Shipping | 2 | 2 |
# | Whale Shipping | 2 | 2 |
# +-------------------+--------------+-------------+
# ```
# ## 15.
# **Consecutive calls occur when an operator deals with two callers within 10 minutes. Find the longest sequence of consecutive calls – give the name of the operator and the first and last call date in the sequence.**
#
# ```
# +----------+---------------------+---------------------+-------+
# | taken_by | first_call | last_call | calls |
# +----------+---------------------+---------------------+-------+
# | AB1 | 2017-08-14 09:06:00 | 2017-08-14 10:17:00 | 24 |
# +----------+---------------------+---------------------+-------+
# ```
|
Python/12-3 Helpdesk - Hard.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mpmp]
# language: python
# name: conda-env-mpmp-py
# ---
# ## Plot survival prediction results
# In this notebook, we'll compare the results of survival prediction using [elastic net Cox regression](https://scikit-survival.readthedocs.io/en/stable/api/generated/sksurv.linear_model.CoxnetSurvivalAnalysis.html) for expression and methylation data only.
#
# The files analyzed in this notebook are generated by the `run_survival_prediction.py` script.
#
# Notebook parameters:
# * SIG_ALPHA (float): significance cutoff for pairwise comparisons (after FDR correction)
# +
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import mpmp.config as cfg
import mpmp.utilities.analysis_utilities as au
import mpmp.utilities.survival_utilities as su
import mpmp.utilities.plot_utilities as plu
# %load_ext autoreload
# %autoreload 2
# -
# ### Parameters and data directories
# +
# significance cutoff, after FDR correction
SIG_ALPHA = 0.05
# if True, save figures to images directory
SAVE_FIGS = True
images_dir = Path(cfg.images_dirs['survival'])
# set results directories
# TODO: document why these need to be different
me_pancancer_results_dir = Path(cfg.results_dirs['survival'], 'me_baseline_alphas')
all_pancancer_results_dir = Path(cfg.results_dirs['survival'], 'all_baseline_alphas')
me_pancancer_baseline_results_dir = Path(cfg.results_dirs['survival'], 'me_baseline', 'results_baseline')
all_pancancer_baseline_results_dir = Path(cfg.results_dirs['survival'], 'all_baseline', 'results_baseline')
me_cancer_type_results_dir = Path(cfg.results_dirs['survival'], 'me_ridge')
all_cancer_type_results_dir = Path(cfg.results_dirs['survival'], 'all_ridge')
me_cancer_type_baseline_results_dir = Path(cfg.results_dirs['survival'], 'me_ridge_baseline')
all_cancer_type_baseline_results_dir = Path(cfg.results_dirs['survival'], 'all_ridge_baseline')
# set list of PCA component numbers to look for
pancancer_pcs_list = [10, 100, 500, 1000, 5000]
cancer_type_n_pcs = 10
# -
# ### Pan-cancer survival prediction, expression vs. methylation
#
# These are models trained and tested on all data types in TCGA, for all samples that have expression and DNA methylation data profiled. We compare against a clinical covariate-only baseline (grey dotted line).
# order to plot data types in
training_data_map = {
'expression': 'gene expression',
'me_27k': '27k methylation',
'me_450k': '450k methylation',
}
# +
# get baseline predictor results, using non-omics covariates only
me_pancancer_df = []
for n_dim in pancancer_pcs_list:
# load results into a single dataframe
me_pcs_dir = Path(me_pancancer_results_dir, 'results_{}_pca'.format(n_dim))
me_results_df = su.load_survival_results(me_pcs_dir)
me_results_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
me_results_df['n_dim'] = n_dim
me_pancancer_df.append(me_results_df)
me_pancancer_df = pd.concat(me_pancancer_df)
me_pancancer_df = (me_pancancer_df
.loc[me_pancancer_df.cancer_type == 'pancancer', :]
.reset_index(drop=True)
)
me_pancancer_df.training_data.replace(to_replace=training_data_map, inplace=True)
print(me_pancancer_df.shape)
print(me_pancancer_df.n_dim.unique())
print(me_pancancer_df.training_data.unique())
me_pancancer_df.head()
# -
# get baseline predictor results, using non-omics covariates only
me_baseline_df = su.load_survival_results(me_pancancer_baseline_results_dir)
me_baseline_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
print(me_baseline_df.shape)
print(me_baseline_df.training_data.unique())
me_baseline_df.head()
# +
sns.set({'figure.figsize': (7, 6)})
sns.set_style('whitegrid')
sns.pointplot(data=me_pancancer_df,
x='n_dim', y='cindex', hue='training_data',
hue_order=training_data_map.values())
plt.xlabel('Number of PCs', size=14)
plt.ylabel('cindex', size=14)
plt.legend(title='Training data', fontsize=13, title_fontsize=13, loc='upper left')
plt.title('Pan-cancer survival performance, expression/methylation', size=14)
plt.ylim(0.5, 1.0)
# plot baseline mean/bootstrapped 95% CI
baseline_vals = (me_baseline_df
[(me_baseline_df.data_type == 'test') &
(me_baseline_df.signal == 'signal') &
(me_baseline_df.cancer_type == 'pancancer')]
).cindex.values
baseline_mean = np.mean(baseline_vals)
plt.gca().axhline(y=baseline_mean, linestyle='--', linewidth=3, color='gray')
baseline_ci = sns.utils.ci(
sns.algorithms.bootstrap(baseline_vals,
func=np.mean,
n_boot=1000,
units=None,
seed=cfg.default_seed)
)
plt.gca().axhspan(baseline_ci[0], baseline_ci[1], facecolor='gray', alpha=0.3)
if SAVE_FIGS:
images_dir.mkdir(exist_ok=True)
plt.savefig(images_dir / 'me_pancan_survival.svg', bbox_inches='tight')
plt.savefig(images_dir / 'me_pancan_survival.png',
dpi=300, bbox_inches='tight')
# -
# ### Pan-cancer survival prediction, all data types
#
# These are models trained and tested on all data types in TCGA, for all samples that have all data types (expression, methylation, RPPA, miRNA, mutational signatures) data profiled. We compare against a clinical covariate-only baseline (grey dotted line).
# order to plot data types in
training_data_map = {
'expression': 'gene expression',
'me_27k': '27k methylation',
'me_450k': '450k methylation',
'rppa': 'RPPA',
'mirna': 'microRNA',
'mut_sigs': 'mutational signatures',
}
# +
me_performance_df = []
all_drop_cancer_types = set()
me_pcs_dir = Path(me_cancer_type_results_dir, 'results_{}_pca'.format(cancer_type_n_pcs))
me_results_df = su.load_survival_results(me_pcs_dir)
me_results_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
me_performance_df = me_results_df[
(me_results_df.data_type == 'test') &
(me_results_df.signal == 'signal')
].copy()
me_performance_df.drop(columns=['data_type', 'signal'], inplace=True)
me_performance_df.training_data.replace(to_replace=training_data_map, inplace=True)
me_performance_df.head(10)
# +
# get baseline predictor results, using non-omics covariates only
all_pancancer_df = []
for n_dim in pancancer_pcs_list:
# load results into a single dataframe
all_pcs_dir = Path(all_pancancer_results_dir, 'results_{}_pca'.format(n_dim))
all_results_df = su.load_survival_results(all_pcs_dir)
all_results_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
all_results_df['n_dim'] = n_dim
all_pancancer_df.append(all_results_df)
all_pancancer_df = pd.concat(all_pancancer_df)
all_pancancer_df = (all_pancancer_df
.loc[all_pancancer_df.cancer_type == 'pancancer', :]
.reset_index(drop=True)
)
all_pancancer_df.training_data.replace(to_replace=training_data_map, inplace=True)
print(all_pancancer_df.shape)
print(all_pancancer_df.n_dim.unique())
print(all_pancancer_df.training_data.unique())
all_pancancer_df.head()
# -
# get baseline predictor results, using non-omics covariates only
all_baseline_df = su.load_survival_results(all_pancancer_baseline_results_dir)
all_baseline_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
print(all_baseline_df.shape)
print(all_baseline_df.training_data.unique())
all_baseline_df.head()
# +
sns.set({'figure.figsize': (10, 5)})
sns.set_style('whitegrid')
sns.pointplot(data=all_pancancer_df,
x='n_dim', y='cindex', hue='training_data',
hue_order=training_data_map.values())
plt.xlabel('Number of PCs', size=14)
plt.ylabel('cindex', size=14)
plt.legend(title='Training data', fontsize=13, title_fontsize=13, loc='upper left', ncol=2)
plt.title('Pan-cancer survival performance, all data types', size=14)
plt.ylim(0.5, 1.0)
# plot baseline mean/bootstrapped 95% CI
baseline_vals = (all_baseline_df
[(all_baseline_df.data_type == 'test') &
(all_baseline_df.signal == 'signal') &
(all_baseline_df.cancer_type == 'pancancer')]
).cindex.values
baseline_mean = np.mean(baseline_vals)
plt.gca().axhline(y=baseline_mean, linestyle='--', linewidth=3, color='gray')
baseline_ci = sns.utils.ci(
sns.algorithms.bootstrap(baseline_vals,
func=np.mean,
n_boot=1000,
units=None,
seed=cfg.default_seed)
)
plt.gca().axhspan(baseline_ci[0], baseline_ci[1], facecolor='gray', alpha=0.3)
if SAVE_FIGS:
plt.savefig(images_dir / 'all_pancan_survival.svg', bbox_inches='tight')
plt.savefig(images_dir / 'all_pancan_survival.png',
dpi=300, bbox_inches='tight')
# -
# ### Individual cancer survival prediction, expression vs. methylation
#
# These are models trained and tested on each individual data type in TCGA, for all samples that have expression and DNA methylation data profiled. Like before, grey dotted lines are clinical covariate-only baselines, for that specific cancer type.
#
# We only ran these experiments using 10 principal components extracted from the -omics datasets, since using more PCs than that led to convergence issues on many cancer types.
# order to plot data types in
training_data_map = {
'expression': 'gene expression',
'me_27k': '27k methylation',
'me_450k': '450k methylation',
}
# +
me_performance_df = []
all_drop_cancer_types = set()
me_pcs_dir = Path(me_cancer_type_results_dir, 'results_{}_pca'.format(cancer_type_n_pcs))
me_results_df = su.load_survival_results(me_pcs_dir)
me_results_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
me_performance_df = me_results_df[
(me_results_df.data_type == 'test') &
(me_results_df.signal == 'signal')
].copy()
me_performance_df.drop(columns=['data_type', 'signal'], inplace=True)
me_performance_df.training_data.replace(to_replace=training_data_map, inplace=True)
me_performance_df.head(10)
# -
group_cancer_types = me_performance_df.groupby(['cancer_type']).count().seed
max_count = group_cancer_types.max()
valid_cancer_types = group_cancer_types[group_cancer_types == max_count].index
print(valid_cancer_types)
cancer_type_avg = (
me_performance_df[me_performance_df.cancer_type.isin(valid_cancer_types)]
.groupby('cancer_type')
.mean()
).cindex
cancer_type_avg.sort_values(ascending=False).head(10)
cancer_type_sd = me_performance_df.groupby('cancer_type').std().cindex
cancer_type_cv = cancer_type_avg / cancer_type_sd
cancer_type_cv.sort_values(ascending=False).head(10)
# get baseline predictor results, using non-omics covariates only
me_baseline_df = su.load_survival_results(me_cancer_type_baseline_results_dir)
me_baseline_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
print(me_baseline_df.shape)
print(me_baseline_df.training_data.unique())
me_baseline_df.head()
# +
sns.set({'figure.figsize': (28, 5)})
sns.set_style('whitegrid')
fig, axarr = plt.subplots(1, 5)
cancer_type_cv = cancer_type_cv[cancer_type_cv.index != 'pancancer']
for ix, cancer_type in enumerate(cancer_type_cv.sort_values(ascending=False).index[:5]):
ax = axarr[ix]
sns.boxplot(data=me_performance_df[me_performance_df.cancer_type == cancer_type],
x='training_data', y='cindex', order=training_data_map.values(), ax=ax)
ax.set_xlabel('Training data type')
ax.set_ylabel('cindex')
ax.set_title('{} survival performance'.format(cancer_type))
ax.set_ylim(0.4, 1.0)
# plot baseline mean/bootstrapped 95% CI
baseline_vals = (me_baseline_df
[(me_baseline_df.data_type == 'test') &
(me_baseline_df.signal == 'signal') &
(me_baseline_df.cancer_type == cancer_type)]
).cindex.values
baseline_mean = np.mean(baseline_vals)
ax.axhline(y=baseline_mean, linestyle='--', linewidth=3, color='gray')
baseline_ci = sns.utils.ci(
sns.algorithms.bootstrap(baseline_vals,
func=np.mean,
n_boot=1000,
units=None,
seed=cfg.default_seed)
)
ax.axhspan(baseline_ci[0], baseline_ci[1], facecolor='gray', alpha=0.3)
if SAVE_FIGS:
plt.savefig(images_dir / 'me_top_cancers_survival.svg', bbox_inches='tight')
plt.savefig(images_dir / 'me_top_cancers_survival.png',
dpi=300, bbox_inches='tight')
# +
sns.set({'figure.figsize': (8, 6)})
sns.set_style('whitegrid')
sns.boxplot(data=me_performance_df, x='training_data', y='cindex',
order=training_data_map.values())
plt.xlabel('Training data type')
plt.ylabel('cindex')
plt.title('Performance for varying PC count, averaged over cancer types')
# -
# ### Individual cancer survival prediction, all data types
#
# These are models trained and tested on each individual data type in TCGA, for all samples that have all data types (expression, methylation, RPPA, miRNA, mutational signatures) data profiled. Like before, grey dotted lines are clinical covariate-only baselines, for that specific cancer type.
#
# We only ran these experiments using 10 principal components extracted from the -omics datasets, since using more PCs than that led to convergence issues on many cancer types.
# order to plot data types in
training_data_map = {
'expression': 'gene expression',
'me_27k': '27k methylation',
'me_450k': '450k methylation',
'rppa': 'RPPA',
'mirna': 'microRNA',
'mut_sigs': 'mutational signatures',
}
# +
all_performance_df = []
all_drop_cancer_types = set()
all_pcs_dir = Path(all_cancer_type_results_dir, 'results_{}_pca'.format(cancer_type_n_pcs))
all_results_df = su.load_survival_results(all_pcs_dir)
all_results_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
all_performance_df = all_results_df[
(all_results_df.data_type == 'test') &
(all_results_df.signal == 'signal')
].copy()
all_performance_df.drop(columns=['data_type', 'signal'], inplace=True)
all_performance_df.training_data.replace(to_replace=training_data_map, inplace=True)
all_performance_df.head(10)
# -
group_cancer_types = all_performance_df.groupby(['cancer_type']).count().seed
max_count = group_cancer_types.max()
valid_cancer_types = group_cancer_types[group_cancer_types == max_count].index
print(valid_cancer_types)
cancer_type_avg = (
all_performance_df[all_performance_df.cancer_type.isin(valid_cancer_types)]
.groupby('cancer_type')
.mean()
).cindex
cancer_type_avg.sort_values(ascending=False).head(10)
cancer_type_sd = all_performance_df.groupby('cancer_type').std().cindex
cancer_type_cv = cancer_type_avg / cancer_type_sd
cancer_type_cv.sort_values(ascending=False).head(10)
# get baseline predictor results, using non-omics covariates only
all_baseline_df = su.load_survival_results(all_cancer_type_baseline_results_dir)
all_baseline_df.rename(columns={'identifier': 'cancer_type',
'fold_no': 'fold'}, inplace=True)
print(all_baseline_df.shape)
print(all_baseline_df.training_data.unique())
all_baseline_df.head()
# +
sns.set({'figure.figsize': (28, 5)})
sns.set_style('whitegrid')
fig, axarr = plt.subplots(1, 5)
cancer_type_cv = cancer_type_cv[cancer_type_cv.index != 'pancancer']
for ix, cancer_type in enumerate(cancer_type_cv.sort_values(ascending=False).index[:5]):
ax = axarr[ix]
sns.boxplot(data=all_performance_df[all_performance_df.cancer_type == cancer_type],
x='training_data', y='cindex', order=training_data_map.values(), ax=ax)
ax.set_xlabel('Training data type')
for label in ax.get_xticklabels():
label.set_rotation(65)
ax.set_ylabel('cindex')
ax.set_title('{} survival performance'.format(cancer_type))
ax.set_ylim(0.3, 1.0)
# plot baseline mean/bootstrapped 95% CI
baseline_vals = (all_baseline_df
[(all_baseline_df.data_type == 'test') &
(all_baseline_df.signal == 'signal') &
(all_baseline_df.cancer_type == cancer_type)]
).cindex.values
baseline_mean = np.mean(baseline_vals)
ax.axhline(y=baseline_mean, linestyle='--', linewidth=3, color='gray')
baseline_ci = sns.utils.ci(
sns.algorithms.bootstrap(baseline_vals,
func=np.mean,
n_boot=1000,
units=None,
seed=cfg.default_seed)
)
ax.axhspan(baseline_ci[0], baseline_ci[1], facecolor='gray', alpha=0.3)
if SAVE_FIGS:
plt.savefig(images_dir / 'all_top_cancers_survival.svg', bbox_inches='tight')
plt.savefig(images_dir / 'all_top_cancers_survival.png',
dpi=300, bbox_inches='tight')
# +
sns.set({'figure.figsize': (10, 6)})
sns.set_style('whitegrid')
sns.boxplot(data=all_performance_df, x='training_data', y='cindex',
order=training_data_map.values())
plt.xlabel('Training data type')
plt.ylabel('cindex')
plt.title('Performance for varying PC count, averaged over cancer types')
|
06_predict_survival/plot_survival_results.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Pandas library for the pandas dataframes
import pandas as pd
import numpy as np
# Import Scikit-Learn library for the classification models
import sklearn
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.metrics import classification_report
# Another statistic model library
import statsmodels.api as sm
import statsmodels.formula.api as smf
import scipy.stats as stats
import scipy
from scipy import interpolate
from scipy.interpolate import interp1d
# Import plotting libraries
import seaborn as sns
import matplotlib
from matplotlib import pyplot as plt
# Set larger fontsize for all plots
matplotlib.rcParams.update({'font.size': 20})
# Command to automatically reload modules before executing cells
# not needed here but might be if you are writing your own library
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
## Import cleaning and splitting functions
from clean_split_data import clean_data
from clean_split_data import split_data
# -
# ### Data
data = pd.read_csv('data.csv')
data = clean_data(data)
X_train, X_test, y_train, y_test = split_data(data)
# ### Classifier
clf = SVC(kernel='rbf', C=10000)
clf.fit(X_train, y_train)
# ### SVM rbf Prediction with all 10 parameters
def predict(test_data):
#test_data = pd.DataFrame(test_data).T
y_pred = clf.predict(test_data)
return y_pred
y_pred = predict(X_test)
# +
'''Accuracy and MSE Using ALL 10 features'''
accuracy_1 = accuracy_score(y_test, y_pred)
print("Prediction accuracy MSE: ", mean_squared_error(y_test, y_pred))
print("Accuracy score", accuracy_score(y_test, y_pred))
#print('R-Squared Score',r2_score(y_test,y_pred))
# -
# ### Backward Stepwise selection (MSE and AC Using 1 to 9 features)
# +
'''Load the cleaned original dataset for below use'''
X = data[['radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']]
y = data.diagnosis
'''A list of for the for-loop'''
n = [1,2,3,4,5,6,7,8,9]
list_MSE=[]
list_AC=[]
list_MSE_1=[]
list_AC_1=[]
selected_features = []
#list_R2=[]
#list_R2_1=[]
for i in n:
'''Only passing the training data set into the Feature Selector'''
'''Any preprocessing method should not be fitted on the whole dataset to prevent carrying information from the train set
to test set'''
sbs_backward = SequentialFeatureSelector(SVC(kernel = "rbf"),
n_features_to_select=i,
direction='backward').fit(X_train, y_train)
selected = sbs_backward.get_support(indices=True)
'''passing the selected features to the orginial dataset for each number of i'''
f_names = np.array(X.columns)
x_manip = X[f_names[selected]]
selected_features.append(f_names[selected])
#print("Selected input features using backward Stepwise Selection", f_names[selected])
#print(x_manip)
'''Spliting the original data set with selected feature for each number of i'''
X_train1, X_test1, y_train1, y_test1 = train_test_split(x_manip, y , test_size=0.2, random_state=42)
model = SVC(kernel='rbf', C=10000)
model.fit(X_train1, y_train1)
y_pred1 = model.predict(X_test1)
y_pred2 = model.predict(X_train1)
list_AC.append(accuracy_score(y_test1, y_pred1))
list_AC_1.append(accuracy_score(y_train1, y_pred2))
list_MSE.append(mean_squared_error(y_test1, y_pred1))
list_MSE_1.append(mean_squared_error(y_train1, y_pred2))
#print("Prediction accuracy MSE: ", mean_squared_error(y_test1, y_pred1))
#print('R-Squared Score',r2_score(y_test1,y_pred1))
#print("Accuracy score", accuracy_score(y_test1, y_pred1))
#list_R2.append(r2_score(y_test1, y_pred1))
#print('MSE:', list_MSE)
#print('R2:', list_R2)
#print('AC:', list_AC)
#list_R2_1.append(r2_score(y_train1, y_pred2))
# +
fig,ax = plt.subplots(2,1,figsize=(10,10))
#ax[0].plot(n, list_R2, linewidth=3, alpha=1,label='test_R2')
#ax[0].plot(n, list_R2_1, linewidth=3, alpha=1,c='FireBrick',label='train_R2')
#ax[0].set_title('Backwardstepwise: R2 VS. Number of feature, model:SVC_rbf', fontsize=15)
#ax[0].set_xlabel('Number of features')
#ax[0].set_ylabel('R2')
#ax[0].legend()
ax[0].plot(n, list_AC, linewidth=3 ,alpha=1,label='test_AC')
ax[0].plot(n, list_AC_1, linewidth=3, alpha=1,c='FireBrick',label='train_AC')
ax[0].set_title('Backwardstepwise: Accuracy VS. Number of feature, model:SVC_rbf', fontsize=15)
ax[0].set_xlabel('Number of features')
ax[0].set_ylabel('Accuracy')
ax[0].legend()
ax[1].plot(n, list_MSE, linewidth=3 ,alpha=1,label='test_MSE')
ax[1].plot(n, list_MSE_1, linewidth=3, alpha=1,c='FireBrick',label='train_MSE')
ax[1].set_title('Backwardstepwise: MSE VS. Number of feature, model:SVC_rbf', fontsize=15)
ax[1].set_xlabel('Number of features')
ax[1].set_ylabel('MSE')
ax[1].legend()
plt.tight_layout()
plt.show
# -
'''If Testing Line is above training line, indicates the presence of high bias in dataset, causing underfitting'''
# ### Forward Stepwise (Not recommend because of suppressor effects, just for demonstration)
# +
n = [1,2,3,4,5,6,7,8,9]
list_MSE1=[]
list_R21=[]
list_AC1=[]
for i in n:
sfs_forward = SequentialFeatureSelector(SVC(kernel = "rbf"),
n_features_to_select=i,
direction='forward').fit(X_train, y_train)
selected = sfs_forward.get_support(indices=True)
f_names = np.array(X.columns)
x_manip1 = X[f_names[selected]]
X_train2, X_test2, y_train2, y_test2 = train_test_split(x_manip1, y, test_size=0.2, random_state=42)
model = SVC(kernel='rbf', C=10000)
model.fit(X_train2, y_train2)
y_pred3 = model.predict(X_test2)
#print("Prediction accuracy MSE: ", mean_squared_error(y_test1, y_pred1))
#print('R-Squared Score',r2_score(y_test1,y_pred1))
#print("Accuracy score", accuracy_score(y_test1, y_pred1))
list_MSE1.append(mean_squared_error(y_test2, y_pred3))
list_AC1.append(accuracy_score(y_test2, y_pred3))
#list_R21.append(r2_score(y_test2, y_pred3))
#print('MSE:', list_MSE)
#print('R2:', list_R2)
#print('AC:', list_AC)
# +
fig,ax = plt.subplots(2,1,figsize=(8,10))
#ax[0].plot(n, list_R2, linewidth=3, alpha=1,label='stepwise backward')
#ax[0].plot(n, list_R21, linewidth=3, alpha=1,c='FireBrick',label='stepwise forward')
#ax[0].set_title('stepwise: R2 VS. Number of feature, model:SVC_rbf', fontsize=15)
#ax[0].set_xlabel('Number of features')
#ax[0].set_ylabel('R2')
#ax[0].legend()
ax[0].plot(n, list_AC, linewidth=3 ,alpha=1,label='stepwise backward')
ax[0].plot(n, list_AC1, linewidth=3, alpha=1,c='FireBrick',label='stepwise forward')
ax[0].set_title('stepwise: Accuracy VS. Number of feature, model:SVC_rbf', fontsize=15)
ax[0].set_xlabel('Number of features')
ax[0].set_ylabel('Accuracy')
ax[0].legend()
ax[1].plot(n, list_MSE, linewidth=3 ,alpha=1,label='stepwise backward')
ax[1].plot(n, list_MSE1, linewidth=3, alpha=1,c='FireBrick',label='stepwise forward')
ax[1].set_title('stepwise: MSE VS. Number of feature, model:SVC_rbf', fontsize=15)
ax[1].set_xlabel('Number of features')
ax[1].set_ylabel('MSE')
ax[1].legend()
plt.tight_layout()
plt.show
# -
# **Based on the graph, you can see that forward stepwise feature selector has a very unstable trend as the number of feature changes**
# ### Sample Train, Test, Split results
def sample_results():
'''
Returns the results and confusion matrix of the sample dataset from Breast Cancer Wisconsin Dataset.
'''
y_pred = clf.predict(X_test)
print("Prediction accuracy MSE: ", mean_squared_error(y_test, y_pred))
print("Mean accuracy on test set", clf.score(X_test, y_test))
print("The confusion matrix for the sample dataset using a decision tree is displayed below: ")
print(classification_report(y_test, y_pred))
plot_confusion_matrix(clf, X_test, y_test)
plt.show()
return
'''This matrix is only for Non-stepwise SVM only'''
sample_results()
# +
'''This matrix is for Backward Stepwise SVM with 9 features'''
plot_confusion_matrix(model, X_test1, y_test1)
print("Prediction accuracy MSE: ", mean_squared_error(y_test1, y_pred1))
print("Mean accuracy on test set", model.score(X_test1, y_test1))
print("The confusion matrix for the sample dataset using a decision tree is displayed below: ")
print(classification_report(y_test1, y_pred1))
plt.show()
# -
print(list_AC)
print(list_MSE)
# **Based on the above informaiton, the Best number of parameters for SVM_rbf is 2**
# ### Optimized SVM_rbf Predictor ###
selected_features[1]
def feature_names():
'''
Returns array of input features of best performing backwards stepwise selection test.
'''
return ['texture_mean', 'perimeter_mean']
def predict(test_data):
'''
Takes test data and uses classifier to predict boolean output.
'''
X = data[feature_names()]
y = data.diagnosis
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
clf = SVC(kernel='rbf', C=10000)
clf.fit(X_train, y_train)
y_pred = clf.predict(test_data)
return y_pred
|
final_jupyter_notes/svm_rbf_final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
fraud_df = pd.read_csv("FinancialFraudDB.csv", index_col=None)
# -
fraud_df.head()
# +
from collections import Counter
Counter(fraud_df["Class"].values)
# -
card_replacement_cost = 5
customer_freeze_cost = 3
# +
import numpy as np
cost_matrix = np.zeros((len(fraud_df.index), 4))
cost_matrix[:, 0] = customer_freeze_cost * np.ones(len(fraud_df.index))
cost_matrix[:, 1] = fraud_df["Amount"].values
cost_matrix[:, 2] = card_replacement_cost * np.ones(len(fraud_df.index))
# -
cost_matrix
y = fraud_df.pop("Class").values
X = fraud_df.values
# +
from sklearn.model_selection import train_test_split
sets = train_test_split(X, y, cost_matrix, test_size=0.25, random_state=11)
X_train, X_test, y_train, y_test, cost_matrix_train, cost_matrix_test = sets
# +
from sklearn import tree
y_pred_test_dt = tree.DecisionTreeClassifier().fit(X_train, y_train).predict(X_test)
# +
from costcla.models import CostSensitiveDecisionTreeClassifier
y_pred_test_csdt = CostSensitiveDecisionTreeClassifier().fit(X_train, y_train, cost_matrix_train).predict(X_test)
# +
from costcla.metrics import savings_score
print(savings_score(y_test, y_pred_test_dt, cost_matrix_test))
print(savings_score(y_test, y_pred_test_csdt, cost_matrix_test))
# -
|
Chapter06/Credit Card Fraud Detection/Credit Card Fraud Detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from tidal_1d import tidal1d
from tidal_maq import tidalmaq
# ### Example 1
# Single aquifer covered by a leaky layer.
# Comparison of one layer solution and multi-layer solution where the leaky layer is simulated with multiple aquifer layers.
# +
k1 = 0.005
k2 = 50.0
H1 = 20.0
H2 = 20.0
Sll = 5e-5
Ss = 5e-5
beta = 0.5
gamma = 1
tau = 0.5 # days
#1D
Taq = k2 * H2
Saq = Ss * H2
c = H1 / k1
ml1d = tidal1d(Taq, Taq, Ss * H2, Ss * H2, c, c, Sll * H1, Sll * H1, beta, gamma=gamma, tau=tau, hs=1)
# zero leaky layer storage
ml1d0 = tidal1d(Taq, Taq, Ss * H2, Ss * H2, c, c, 0, 0, beta, gamma=gamma, tau=tau, hs=1)
#MAQ, leaky layer simulated with 1 aquifer layer
N = 2
kmaq = np.array([k1, k2])
Hmaq = np.array([H1, H2])
Smaq = np.array([Sll * H1, Ss * H2])
zmaq = np.array([0, -H1, -H1 - H2]) # all layers equal thickness
Tmaq = kmaq * Hmaq
cmaq = np.array([H1 / (2 * k1), H1 / (2 * k1)]) # vertical resistance of aquifer not included
betamaq = np.array([gamma, beta])
gammamaq = gamma * np.ones(N)
mlmaq = tidalmaq(Tmaq, Tmaq, Smaq, Smaq, cmaq, cmaq, 0, 0, betamaq, gammamaq, tau, hs=1)
#MAQ, leaky layer simulated with 10 aquifer layers
Nll = 10
kmaq = np.array(Nll * [k1] + [k2])
Hmaq = np.array(Nll * [H1 / Nll] + [H2])
Smaq = np.hstack((Hmaq[:-1] * Sll, Ss * H2))
Tmaq = kmaq * Hmaq
cmaq = np.hstack((H1 / (2 * k1 * Nll), H1 / (k1 * Nll) * np.ones(Nll - 1), H1 / (2 * k1 * Nll))) # vertical resistance of aquifer not included
betamaq = np.hstack((gamma * np.ones(Nll), beta))
gammamaq = gamma * np.ones(Nll + 1)
mlmaq2 = tidalmaq(Tmaq, Tmaq, Smaq, Smaq, cmaq, cmaq, 0, 0, betamaq, gammamaq, tau, hs=1)
# -
plt.figure(figsize=(12, 4))
plt.subplot(121)
x = np.linspace(-800, 800, 100)
a1 = ml1d.amp(x)
a1_0 = ml1d0.amp(x)
a2 = mlmaq.amp(x)[1]
a3 = mlmaq2.amp(x)[Nll]
plt.plot(x, a1, label='1-layer')
plt.plot(x, a2, label='2-layer')
plt.plot(x, a3, 'k--', label='11-layer')
plt.plot(x, a1_0, 'k:', lw=2, label='1-layer, $\sigma=0$')
plt.legend(fontsize=12)
plt.xlim(-800, 800)
plt.ylim(0, 0.6)
plt.yticks(fontsize=12)
plt.xticks(np.arange(-800, 801, 400), fontsize=12)
plt.xlabel('x (m)', fontsize=12)
plt.ylabel('relative amplitude', fontsize=12)
plt.text(0.92, 0.92, '(a)', transform=plt.gca().transAxes, fontsize=12, zorder=100)
#
plt.subplot(122)
x = np.linspace(-800, 800, 100)
f1 = ml1d.phase(x) * 24 * 60
f1_0 = ml1d0.phase(x) * 24 * 60
f2 = mlmaq.phase(x)[1] * 24 * 60
f3 = mlmaq2.phase(x)[Nll] * 24 * 60
plt.plot(x, f1, label='1-layer')
plt.plot(x, f2, label='2-layer')
plt.plot(x, f3, 'k--', label='11-layer')
plt.plot(x, f1_0, 'k:', lw=2, label='1-layer, $\sigma=0$')
plt.legend(fontsize=12, loc='lower right')
plt.xlim(-800, 800)
plt.ylim(-60, 60)
plt.yticks(np.arange(-60, 61, 15), fontsize=12)
plt.xticks(np.arange(-800, 801, 400), fontsize=12)
plt.xlabel('x (m)', fontsize=12)
plt.ylabel('phase shift (minutes)', fontsize=12)
plt.text(0.92, 0.92, '(b)', transform=plt.gca().transAxes, fontsize=12, zorder=100);
#plt.savefig('maqtides_ex1.eps');
# ### Example 2
# Unconfined aquifer modeled as 1 layer or as 80 layers, and unconfined aquifer with 3 thin clay lenses, where each clay lens is a leaky layer (so 77 aquifer layers).
# +
k = 10.0
H = 20.0
Ss = 5e-5
Sphreatic = 0.1 # phreatic storage if a value
aniso = 0.1
cland = 1e12 # confined top
beta = 0.8
gamma = 1
tau = 0.5 # days
#1D
T = k * H
S1 = Ss * H
S2 = Sphreatic
c1 = 0.195 * H / (k * aniso) # vertical aquifer resistance according to Anderson
c2 = cland
ml1d = tidal1d(T, T, S1, S2, c1, c2, 0, 0, beta, 1, tau, hs=1)
#MAQ
N = 80
kmaq = k * np.ones(N)
Hmaq = H / N * np.ones(N) # all layers equal thickness
zmaq0 = -np.cumsum(Hmaq) + 0.5 * Hmaq # all layers equal thickness
Tmaq = kmaq * Hmaq
S1maq = Ss * Hmaq
S2maq = S1maq.copy()
S2maq[0] = Sphreatic
c1maq = Hmaq / (kmaq * aniso)
c1maq[0] = 0.5 * c1maq[0] # first layer is half thickness
c2maq = c1maq.copy()
c2maq[0] = cland
betamaq = beta * np.ones(N)
gammamaq = gamma * np.ones(N)
mlmaq = tidalmaq(Tmaq, Tmaq, S1maq, S2maq, c1maq, c2maq, np.zeros(N), np.zeros(N), betamaq, gammamaq, tau, hs=1)
# +
k = 10.0
H = 20.0
Ss = 5e-5
Sphreatic = 0.1 # phreatic storage if a value
aniso = 0.1
cland = 1e12 # confined top - cannot be inf
beta = 0.8
gamma = 1
tau = 0.5 # days
#MAQ
N = 77
kmaq = k * np.ones(N)
Hmaq = 0.25 * np.ones(N)
Tmaq = kmaq * Hmaq
S1maq = Ss * Hmaq
S2maq = S1maq.copy()
if Sphreatic:
S2maq[0] = Sphreatic
c1maq = Hmaq / (k * aniso) * np.ones(N)
c1maq[[20, 39, 58]] *= 1000
c1maq[0] *= 0.5
c2maq = c1maq.copy()
c2maq[0] = cland
sig1 = np.zeros(N)
sig1[[20, 39, 58]] = 0.25 * Ss
sig2 = sig1.copy()
betamaq = beta * np.ones(N)
gammamaq = gamma * np.ones(N)
mlmaq2 = tidalmaq(Tmaq, Tmaq, S1maq, S2maq, c1maq, c2maq, np.zeros(N), np.zeros(N), betamaq, gammamaq, tau, hs=1)
# +
# figure amplitude
plt.figure(figsize=(12, 5))
plt.subplot(211)
levels = np.arange(0.1, 1, 0.1)
x = np.linspace(-100, 100, 201)
a1d = ml1d.amp(x)
a1d = np.vstack((a1d, a1d, a1d))
z1d = [0, -0.5 * H, -H]
cs = plt.contour(x, z1d, a1d, levels=levels, colors='C1', linestyles='--')
plt.clabel(cs, fmt='%1.1f')
amaq = mlmaq.amp(x)
# break up contours so that labels are in bottom part
plt.contour(x, zmaq0[:51], amaq[:51], levels=levels, colors='C0')
cs = plt.contour(x, zmaq0[50:71], amaq[50:71], levels=levels, colors='C0')
plt.clabel(cs, fmt='%1.1f')
plt.contour(x, zmaq0[70:], amaq[70:], levels=levels, colors='C0')
#
plt.yticks([-20, 0], fontsize=12)
plt.xticks(np.arange(-300, 301, 100), fontsize=12)
plt.xlim(-300, 300)
plt.ylabel('z (m)', labelpad=-20, fontsize=12)
plt.xlabel('x (m)', fontsize=12)
plt.text(280, -19, '(a)', fontsize=12)
plt.subplot(212)
levels = np.arange(0.1, 1, 0.1)
x = np.linspace(-300, 300, 201)
zmaq1 = np.arange(-0.125, -5, -0.25)
zmaq2 = np.arange(-5.125 - 0.25, -10, -0.25)
zmaq3 = np.arange(-10.125 - 0.25, -15, -0.25)
zmaq4 = np.arange(-15.125 - 0.25, -20, -0.25)
zmaq = np.hstack((zmaq1, zmaq2, zmaq3, zmaq4))
amaq = mlmaq2.amp(x)
plt.contour(x, zmaq[:41], amaq[:41], levels=levels, colors='C0')
cs = plt.contour(x, zmaq[40:61], amaq[40:61], levels=levels, colors='C0')
plt.clabel(cs, fmt='%1.1f')
plt.contour(x, zmaq[60:], amaq[60:], levels=levels, colors='C0')
plt.plot([-300, 300], [-5, -5], color='pink')
plt.plot([-300, 300], [-10, -10], color='pink')
plt.plot([-300, 300], [-15, -15], color='pink')
plt.yticks([-20, 0], fontsize=12)
plt.xticks(np.arange(-300, 301, 100), fontsize=12)
plt.xlim(-300, 300)
plt.xlabel('x (m)', fontsize=12)
plt.ylabel('z (m)', labelpad=-20, fontsize=12)
plt.text(280, -19, '(b)', fontsize=12);
#plt.savefig('maqtides_ex2.eps', bbox_inches='tight')
# -
# figure
plt.figure(figsize=(12, 5))
plt.subplot(211)
levels = np.arange(-15, 180, 15)
x = np.linspace(-300, 300, 201)
fmaq = mlmaq.phase(x) * 24 * 60
plt.contour(x, zmaq0[:41], fmaq[:41], levels=levels, colors='C0')
cs = plt.contour(x, zmaq0[40:61], fmaq[40:61], levels=levels, colors='C0')
plt.clabel(cs, fmt='%1.0f')
plt.contour(x, zmaq0[60:], fmaq[60:], levels=levels, colors='C0')
plt.yticks([-20, 0], fontsize=12)
plt.xticks(np.arange(-300, 301, 100), fontsize=12)
plt.xlim(-300, 300)
plt.ylabel('z (m)', labelpad=-20, fontsize=12)
plt.xlabel('x (m)', fontsize=12)
plt.text(280, -19, '(a)', fontsize=12)
#
plt.subplot(212)
fmaq = mlmaq2.phase(x) * 24 * 60
levels = np.arange(-15, 180, 15)
x = np.linspace(-300, 300, 201)
zmaq1 = np.arange(-0.125, -5, -0.25)
zmaq2 = np.arange(-5.125 - 0.25, -10, -0.25)
zmaq3 = np.arange(-10.125 - 0.25, -15, -0.25)
zmaq4 = np.arange(-15.125 - 0.25, -20, -0.25)
zmaq = np.hstack((zmaq1, zmaq2, zmaq3, zmaq4))
amaq = mlmaq2.amp(x)
plt.contour(x, zmaq[:41], fmaq[:41], levels=levels, colors='C0')
cs = plt.contour(x, zmaq[40:], fmaq[40:], levels=levels, colors='C0')
plt.clabel(cs, fmt='%1.0f')
plt.plot([-300, 300], [-5, -5], color='pink')
plt.plot([-300, 300], [-10, -10], color='pink')
plt.plot([-300, 300], [-15, -15], color='pink')
plt.yticks([-20, 0], fontsize=12)
plt.xticks(np.arange(-300, 301, 100), fontsize=12)
plt.xlim(-300, 300)
plt.xlabel('x (m)', fontsize=12)
plt.ylabel('z (m)', labelpad=-20, fontsize=12)
plt.text(280, -19, '(b)', fontsize=12);
#plt.savefig('maqtides_ex2b.eps', bbox_inches='tight')
|
maqtides_examples_paper.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ktFt6IzKJkSn" colab_type="code" colab={}
import pandas as pd
# + id="_JoUdRaiIwo4" colab_type="code" colab={}
#Loading data from the Github repository to colab notebook
filename = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter15/Dataset/crx.data'
# + id="0ZmzTR-CJra-" colab_type="code" outputId="e5de1489-c0b5-448b-ac00-b981f48b5d80" colab={"base_uri": "https://localhost:8080/", "height": 204}
# Loading the data using pandas
credData = pd.read_csv(filename,sep=",",header = None,na_values = "?")
credData.head()
# + id="rXYA47JRKVz-" colab_type="code" outputId="59a1fde9-7564-42d8-a83a-c7ce7b530b26" colab={"base_uri": "https://localhost:8080/", "height": 204}
# Changing the Classes to 1 & 0
credData.loc[credData[15] == '+' , 15] = 1
credData.loc[credData[15] == '-' , 15] = 0
credData.head()
# + id="R9-NFhigmokr" colab_type="code" outputId="bdfc4da0-bf84-4420-bc4d-594b15f23f79" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Dropping all the rows with na values
newcred = credData.dropna(axis = 0)
newcred.shape
# + id="vxzQZpXMZZN6" colab_type="code" colab={}
# Seperating the categorical variables to make dummy variables
credCat = pd.get_dummies(newcred[[0,3,4,5,6,8,9,11,12]])
# + id="__Gup8InbTmf" colab_type="code" colab={}
# Seperating the numerical variables
credNum = newcred[[1,2,7,10,13,14]]
# + id="5S_Icyu1r8YJ" colab_type="code" outputId="2dcd0dd6-1fc8-4107-b1c0-0b11f4eecb37" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Making the X variable which is a concatenation of categorical and numerical data
X = pd.concat([credCat,credNum],axis = 1)
print(X.shape)
# Seperating the label as y variable
y = newcred[15]
print(y.shape)
# + id="ZflX7J-5GtY_" colab_type="code" outputId="4cdc53bc-fcca-4a71-f101-fb52e9a83aa4" colab={"base_uri": "https://localhost:8080/", "height": 224}
# Normalising the data sets
# Import library function
from sklearn import preprocessing
# Creating the scaling function
minmaxScaler = preprocessing.MinMaxScaler()
# Transforming with the scaler function
X_tran = pd.DataFrame(minmaxScaler.fit_transform(X))
# Printing the output
X_tran.head()
# + id="iJiHJ6zWJ9y_" colab_type="code" colab={}
# Splitting the data set to train and test sets
from sklearn.model_selection import train_test_split
# Splitting the data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_tran, y, test_size=0.3, random_state=123)
# + id="t9icJ1EF0LpQ" colab_type="code" outputId="6a846517-98ac-4d7f-8ba6-5f2b56239f5e" colab={"base_uri": "https://localhost:8080/", "height": 156}
from sklearn.linear_model import LogisticRegression
# Defining the LogisticRegression function
benchmarkModel = LogisticRegression()
# Fitting the model
benchmarkModel.fit(X_train, y_train)
# + id="X6dIm5CQ0Tli" colab_type="code" outputId="b7acb3cc-8d4c-48e2-84f1-39451fbeec70" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Prediction and accuracy metrics
pred = benchmarkModel.predict(X_test)
print('Accuracy of Logistic regression model prediction on test set: {:.2f}'.format(benchmarkModel.score(X_test, y_test)))
# + id="rHqHrnOytmxq" colab_type="code" outputId="fd71b44c-16b7-4340-c0bf-f128a2b71e68" colab={"base_uri": "https://localhost:8080/", "height": 204}
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Confusion Matrix for the model
print(confusion_matrix(y_test, pred))
# Classification report for the model
print(classification_report(y_test, pred))
|
Chapter15/Activity15.01/Activity_15_01_Benchmark_Model_using_Logistic_Regression_v1_0.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="AYV_dMVDxyc2"
# [](https://github.com/lab-ml/nn)
# [](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/capsule_networks/mnist.ipynb)
#
# ## Training a Capsule Network to classify MNIST digits
#
# This is an experiment to train a Capsule Network to classify MNIST digits using PyTorch.
# + [markdown] id="AahG_i2y5tY9"
# Install the `labml-nn` package
# + id="ZCzmCrAIVg0L" colab={"base_uri": "https://localhost:8080/"} outputId="7ab15f72-c99f-4097-ecd2-5740ee9ed61c"
# !pip install labml-nn
# + [markdown] id="SE2VUQ6L5zxI"
# Imports
# + id="0hJXx_g0wS2C"
import torch
from labml import experiment
from labml_nn.capsule_networks.mnist import Configs
# + [markdown] id="Lpggo0wM6qb-"
# Create an experiment
# + id="bFcr9k-l4cAg"
experiment.create(name="capsule_networks")
# + [markdown] id="-OnHLi626tJt"
# Initialize [Capsule Network configurations](https://lab-ml.com/labml_nn/capsule_networks/mnist.html)
# + id="Piz0c5f44hRo"
conf = Configs()
# + [markdown] id="wwMzCqpD6vkL"
# Set experiment configurations and assign a configurations dictionary to override configurations
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="e6hmQhTw4nks" outputId="ebefa8fa-93d2-4131-db95-e27f15aa3aa0"
experiment.configs(conf, {'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 1e-3,
'inner_iterations': 5})
# + [markdown] id="EvI7MtgJ61w5"
# Set PyTorch models for loading and saving
# + colab={"base_uri": "https://localhost:8080/", "height": 102} id="GDlt7dp-5ALt" outputId="9701092b-c88a-4687-c90e-b193c369e59e"
experiment.add_pytorch_models({'model': conf.model})
# + [markdown] id="KJZRf8527GxL"
# Start the experiment and run the training loop.
# + colab={"base_uri": "https://localhost:8080/", "height": 646} id="aIAWo7Fw5DR8" outputId="5ddbfce3-91f8-4506-e483-1640cb5a14b3"
with experiment.start():
conf.run()
# + id="oBXXlP2b7XZO"
|
labml_nn/capsule_networks/mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler, MaxAbsScaler
import category_encoders as ce
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from imblearn.over_sampling import ADASYN
data=pd.read_pickle('socar_origin_ko.p')
# +
no_use_list=['b2b','sharing_type','has_previous_accident','car_model','accident_ratio','socarpass','socarsave','repair_cnt','insurance_site_aid_YN','total_prsn_cnt','car_part2','insurance_site_aid_YN']
num_col_list=['repair_cost','insure_cost']
no_cat_list = num_col_list + ['fraud_YN', 'test_set']
# -
data=data[[col for col in data.columns if col not in no_use_list]]
one_hot_col_list= [col for col in data.columns if col not in no_cat_list]
OHE = ce.OneHotEncoder(cols=one_hot_col_list)
data = OHE.fit_transform(data)
scaler= RobustScaler()
data[num_col_list]=scaler.fit_transform(data[num_col_list])
test_y, train_y=data["fraud_YN"][data['test_set']==1],data["fraud_YN"][data['test_set']==0]
test_x, train_x=data[data['test_set']==1].drop(["fraud_YN",'test_set'], axis=1),data[(data['test_set']==0)].drop(["fraud_YN",'test_set'], axis=1)
train_x.shape, test_x.shape, train_y.shape, test_y.shape
smote=SMOTE(random_state=13)
x_train_over, y_train_over = smote.fit_resample(x_train, y_train)
rf_clf = RandomForestClassifier(random_state=13, n_jobs = -1 , n_estimators=100)
dt_clf = DecisionTreeClassifier(random_state=13, max_depth=4)
lr_clf = LogisticRegression(random_state=13, solver='liblinear')
lgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False)
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score, roc_auc_score)
def get_clf_eval(y_test, pred):
acc=accuracy_score(y_test, pred)
pre=precision_score(y_test, pred)
re=recall_score(y_test, pred)
f1=f1_score(y_test, pred)
auc=roc_auc_score(y_test, pred)
return acc,pre,re,f1,auc
from sklearn.metrics import confusion_matrix
def print_clf_eval(y_test,pred):
confusion = confusion_matrix(y_test, pred)
acc, pre, re, f1, auc = get_clf_eval(y_test, pred)
print(confusion)
print(acc,pre)
print(re,f1,auc)
def get_result(model, x_train, y_train, x_test, y_test):
model.fit(x_train, y_train)
pred=model.predict(x_test)
return get_clf_eval(y_test, pred)
def get_result_pd(models, model_names, x_train, y_train, x_test, y_test):
col_names = ['accuracy', 'precision','recall','f1','roc_auc']
tmp = []
for model in models:
tmp.append(get_result(model, x_train, y_train, x_test, y_test))
return pd.DataFrame(tmp, columns=col_names, index=model_names)
models = [lr_clf, dt_clf, rf_clf, lgbm_clf]
model_names = ['LinearReg','DecisionTree', 'RandomForest', 'LightGBM']
results = get_result_pd(models, model_names, x_train_over, y_train_over, x_test, y_test)
results
pre_process = ColumnTransformer(remainder = 'passthrough',
transformers=[('drop_columns', 'drop',no_use_list),
('scaler',scaler,num_col_list)])
full_pipeline = Pipeline(steps=[('pre_processing', pre_process), ('random_forest', RandomForestClassifier(max_depth=10,random_state=2))])
full_pipeline.fit(train_x, train_y)
full_pipeline.predict(test_x)
# +
# chat('police_site_aid_YN')
# +
# def scaling(x):
# data_num_tmp=data_num.copy()
# transformer = x()
# x_data = transformer.fit_transform(data_num_tmp)
# data_num_tmp[data_num_tmp.columns]=x_data
# pd.plotting.scatter_matrix(data_num_tmp, alpha=0.2)
# data_num_tmp['fraud']=data['fraud_YN']
# insure_cost_dist=data_num_tmp[data_num_tmp['fraud']==1][['repair_cost','insure_cost']]
# insure_cost_dist.plot.box()
# for i in [RobustScaler, StandardScaler, MinMaxScaler]:
# scaling(i)
# +
# from sklearn.pipeline import Pipeline
# from sklearn.preprocessing import RobustScaler
# from sklearn.compose import ColumnTransformer
# from sklearn.preprocessing import OrdinalEncoder
# from sklearn.preprocessing import OneHotEncoder
# num_col_list=['repair_cost','insure_cost']
# drop_list= num_col_list + ['fraud_YN']
# cat_col_list=[x for x in data.columns if x not in drop_list]
# num_pipeline = Pipeline([
# ('robust_scaler', RobustScaler())
# ])
# full_pipeline = ColumnTransformer([
# ("num", num_pipeline, num_col_list),
# ("cat", OneHotEncoder(),cat_col_list)
# ])
# prepared_data = full_pipeline.fit_transform(x_train)
|
socar/socar.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using the `apply()` method in pandas
#
# Sometimes, creating a calculated column in pandas is as simple as this:
#
# ```python
# df['difference'] = df['first_column'] - df['second_column']
# ```
#
# or this:
#
# ```python
# df['date_fixed'] = pd.to_datetime(df['date'])
# ```
#
# Other times, though, your needs are more complex -- you need to take each row of data in your data frame and do _several things_ to it. That's where [`apply()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.apply.html) comes in.
#
# Given a function, `apply()` will, uh, _apply_ that function to every row in the data frame. A common scenario for doing so would be to create a new column.
#
# An example might make this idea a little more clear. Let's load up a CSV of Texas death row media witnesses.
import pandas as pd
df = pd.read_csv('../data/tx-death-row-media-list.csv', parse_dates=['execution_date'])
# Now, let's say, we want to create a new column with the _month_ of the execution. [Given what we know about date objects](Date%20and%20time%20data%20types.ipynb), this should be simple, right?
#
# So this might be my first guess:
df['month'] = df['execution_date'].month
# Womp womp. Looks like we need to create a _function_ to do this for us. Then we can _apply_ that function to each row.
#
# 👉 For a refresher on writing your own functions, [check out this notebook](Functions.ipynb).
def get_month(row):
'''Given a row of data, return the month of the execution date'''
return row['execution_date'].month
# ... and now we can apply it. We also need to specify _how_ it's going to be applied. `axis=0` is the default and attempts to apply the function to each _column_. We want `axis=1`, which applies the function to each _row_ of data.
df['month'] = df.apply(get_month, axis=1)
df.head()
# We could also have dropped in a _lambda expression_ for the function -- in this case, it's simple enough to be readable:
df['month'] = df.apply(lambda x: x['execution_date'].month, axis=1)
|
reference/Using the apply method in pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Prototyping with Python
#
# _This is the manuscript of <NAME>'s keynote
# "Coding Effective Testing Tools Within Minutes" at the TAIC PART 2020 conference._
# + [markdown] slideshow={"slide_type": "subslide"}
# In our [Fuzzing Book](index.ipynb), we use Python to implement automated testing techniques, and also as the language for most of our test subjects. Why Python? The short answer is
#
# > Python made us amazingly _productive_. Most techniques in this book took **2-3 days** to implement. This is about **10-20 times faster** than for "classic" languages like C or Java.
#
# A factor of 10–20 in productivity is enormous, almost ridiculous. Why is that so, and which consequences does this have for research and teaching?
#
# In this essay, we will explore some of the reasons, prototyping a _symbolic test generator_ from scratch. This normally would be considered a very difficult task, taking months to build. Yet, developing the code in this chapter took less than two hours – and explaining it takes less than 20 minutes.
# + slideshow={"slide_type": "skip"}
from bookutils import YouTubeVideo
YouTubeVideo("IAreRIID9lM")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Python is Easy
#
# Python is a high-level language that allows one to focus on the actual _algorithms_ rather than how individual bits and bytes are passed around in memory. For this book, this is important: We want to focus on how individual techniques work, and not so much their optimization. Focusing on algorithms allows you to toy and tinker with them, and quickly develop your own. Once you have found out how to do things, you can still port your approach to some other language or specialized setting.
# + [markdown] slideshow={"slide_type": "fragment"}
# As an example, take the (in)famous _triangle_ program, which classifies a triangle of lengths $a$, $b$, $c$ into one of three categories. It reads like pseudocode; yet, we can easily execute it.
# + slideshow={"slide_type": "subslide"}
def triangle(a, b, c):
if a == b:
if b == c:
return 'equilateral'
else:
return 'isosceles #1'
else:
if b == c:
return 'isosceles #2'
else:
if a == c:
return 'isosceles #3'
else:
return 'scalene'
# + [markdown] slideshow={"slide_type": "fragment"}
# Here's an example of executing the `triangle()` function:
# + slideshow={"slide_type": "subslide"}
triangle(2, 3, 4)
# + [markdown] slideshow={"slide_type": "fragment"}
# For the remainder of this chapter, we will use the `triangle()` function as ongoing example for a program to be tested. Of course, the complexity of `triangle()` is a far cry from large systems, and what we show in this chapter will not apply to, say, an ecosystem of thousands of intertwined microservices. Its point, however, is to show how easy certain techniques can be – if you have the right language and environment.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Fuzzing is as Easy as Always
# + [markdown] slideshow={"slide_type": "fragment"}
# If you want to test `triangle()` with random values, that's fairly easy to do. Just bring along one of the Python random number generators and throw them into `triangle()`.
# + slideshow={"slide_type": "skip"}
from random import randrange
# + slideshow={"slide_type": "subslide"}
for i in range(10):
a = randrange(1, 10)
b = randrange(1, 10)
c = randrange(1, 10)
t = triangle(a, b, c)
print(f"triangle({a}, {b}, {c}) = {repr(t)}")
# + [markdown] slideshow={"slide_type": "subslide"}
# So far, so good – but that's something you can do in pretty much any programming language. What is it that makes Python special?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dynamic Analysis in Python: So Easy it Hurts
#
# Dynamic analysis is the ability to track what is happening during program execution. The Python `settrace()` mechanism allows you to track all code lines, all variables, all values, as the program executes – and all this in a handful of lines of code. Our `Coverage` class from [the chapter on coverage](Coverage.ipynb) shows how to capture a trace of all lines executed in five lines of code; such a trace easily converts into sets of lines or branches executed. With two more lines, you can easily track all functions, arguments, variable values, too – see for instance our [chapter on dynamic invariants](DynamicInvariants). And you can even access the source code of individual functions (and print it out, too!) All this takes 10, maybe 20 minutes to implement.
# + [markdown] slideshow={"slide_type": "subslide"}
# Here is a piece of Python that does it all. We track lines executed, and for every line, we print its source codes and the current values of all local variables:
# + slideshow={"slide_type": "skip"}
import sys
import inspect
# + slideshow={"slide_type": "subslide"}
def traceit(frame, event, arg):
function_code = frame.f_code
function_name = function_code.co_name
lineno = frame.f_lineno
vars = frame.f_locals
source_lines, starting_line_no = inspect.getsourcelines(frame.f_code)
loc = f"{function_name}:{lineno} {source_lines[lineno - starting_line_no].rstrip()}"
vars = ", ".join(f"{name} = {vars[name]}" for name in vars)
print(f"{loc:50} ({vars})")
return traceit
# + [markdown] slideshow={"slide_type": "fragment"}
# The function `sys.settrace()` registers `traceit()` as a trace function; it will then trace the given invocation of `triangle()`:
# + slideshow={"slide_type": "subslide"}
def triangle_traced():
sys.settrace(traceit)
triangle(2, 2, 1)
sys.settrace(None)
# + slideshow={"slide_type": "fragment"}
triangle_traced()
# + [markdown] slideshow={"slide_type": "subslide"}
# In comparison, try to build such a dynamic analysis for, say, C. You can either _instrument_ the code to track all lines executed and record variable values, storing the resulting info in some database. This will take you _weeks,_ if not _months_ to implement. You can also run your code through a debugger (step-print-step-print-step-print); but again, programming the interaction can take days. And once you have the first results, you'll probably realize you need something else or better, so you go back to the drawing board. Not fun.
# + [markdown] slideshow={"slide_type": "fragment"}
# Together with a dynamic analysis such as the one above, you can make fuzzing much smarter. Search-based testing, for instance, evolves a population of inputs towards a particular goal, such as coverage. With a good dynamic analysis, you can quickly implement search-based strategies for arbitrary goals.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Static Analysis in Python: Still Easy
#
# Static analysis refers to the ability to analyze _program code_ without actually executing it. Statically analyzing Python code to deduce any property can be a nightmare, because the language is so highly dynamic. (More on that below.)
#
# If your static analysis does not have to be _sound_, – for instance, because you only use it to _support_ and _guide_ another technique such as testing – then a static analysis in Python can be very simple. The `ast` module allows you to turn any Python function into an abstract syntax tree (AST), which you then can traverse as you like. Here's the AST for our `triangle()` function:
# + slideshow={"slide_type": "skip"}
from bookutils import rich_output
# + slideshow={"slide_type": "skip"}
import ast
# + slideshow={"slide_type": "subslide"}
if rich_output():
# Normally, this will do
from showast import show_ast
else:
def show_ast(tree):
ast.dump(tree, indent=4)
# + slideshow={"slide_type": "fragment"}
triangle_source = inspect.getsource(triangle)
triangle_ast = ast.parse(triangle_source)
show_ast(triangle_ast)
# + [markdown] slideshow={"slide_type": "fragment"}
# Now suppose one wants to identify all `triangle` branches and their conditions using static analysis. You would traverse the AST, searching for `If` nodes, and take their first child (the condition). This is easy as well:
# + slideshow={"slide_type": "subslide"}
def collect_conditions(tree):
conditions = []
def traverse(node):
if isinstance(node, ast.If):
cond = ast.unparse(node.test).strip()
conditions.append(cond)
for child in ast.iter_child_nodes(node):
traverse(child)
traverse(tree)
return conditions
# + [markdown] slideshow={"slide_type": "fragment"}
# Here are the four `if` conditions occurring in the `triangle()` code:
# + slideshow={"slide_type": "subslide"}
collect_conditions(triangle_ast)
# + [markdown] slideshow={"slide_type": "fragment"}
# Not only can we extract individual program elements, we can also change them at will and convert the tree back into source code. Program transformations (say, for instrumentation or mutation analysis) are a breeze. The above code took five minutes to write. Again, try that in Java or C.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Symbolic Reasoning in Python: There's a Package for That
#
# Let's get back to testing. We have shown how to extract conditions from code. To reach a particular location in the `triangle()` function, one needs to find a solution for the _path conditions_ leading to that branch. To reach the last line in `triangle()` (the `'scalene'` branch), we have to find a solution for
# $$a \ne b \land b \ne c \land a \ne c$$
#
#
# We can make use of a _constraint_ solver for this, such as Microsoft's [_Z3_ solver](https://github.com/Z3Prover/z3):
# + slideshow={"slide_type": "skip"}
import z3 # type: ignore
# + [markdown] slideshow={"slide_type": "subslide"}
# Let us use Z3 to find a solution for the `'scalene'` branch condition:
# + slideshow={"slide_type": "fragment"}
a = z3.Int('a')
b = z3.Int('b')
c = z3.Int('c')
# + slideshow={"slide_type": "fragment"}
s = z3.Solver()
s.add(z3.And(a > 0, b > 0, c > 0)) # Triangle edges are positive
s.add(z3.And(a != b, b != c, a != c)) # Our condition
s.check()
# + [markdown] slideshow={"slide_type": "fragment"}
# Z3 has shown us that there is a solution ("sat" = "satisfiable"). Let us get one:
# + slideshow={"slide_type": "fragment"}
m = s.model()
m
# + [markdown] slideshow={"slide_type": "subslide"}
# We can use this solution right away for testing the `triangle()` function and find that it indeed covers the `'scalene'` branch. The method `as_long()` converts the Z3 results into numerical values.
# + slideshow={"slide_type": "fragment"}
triangle(m[a].as_long(), m[b].as_long(), m[c].as_long())
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Symbolic Test Generator
#
# With what we have seen, we can now build a _symbolic test generator_ – a tool that attempts to systematically create test inputs that cover all paths. Let us find all conditions we need to solve, by exploring all paths in the tree. We turn these paths to Z3 format right away:
# + slideshow={"slide_type": "subslide"}
def collect_path_conditions(tree):
paths = []
def traverse_if_children(children, context, cond):
old_paths = len(paths)
for child in children:
traverse(child, context + [cond])
if len(paths) == old_paths:
paths.append(context + [cond])
def traverse(node, context):
if isinstance(node, ast.If):
cond = ast.unparse(node.test).strip()
not_cond = "z3.Not(" + cond + ")"
traverse_if_children(node.body, context, cond)
traverse_if_children(node.orelse, context, not_cond)
else:
for child in ast.iter_child_nodes(node):
traverse(child, context)
traverse(tree, [])
return ["z3.And(" + ", ".join(path) + ")" for path in paths]
# + slideshow={"slide_type": "subslide"}
path_conditions = collect_path_conditions(triangle_ast)
path_conditions
# + [markdown] slideshow={"slide_type": "fragment"}
# Now all we need to do is to feed these constraints into Z3. We see that we easily cover all branches:
# + slideshow={"slide_type": "subslide"}
for path_condition in path_conditions:
s = z3.Solver()
s.add(a > 0, b > 0, c > 0)
eval(f"s.check({path_condition})")
m = s.model()
print(m, triangle(m[a].as_long(), m[b].as_long(), m[c].as_long()))
# + [markdown] slideshow={"slide_type": "fragment"}
# Success! We have covered all branches of the triangle program!
# + [markdown] slideshow={"slide_type": "subslide"}
# Now, the above is still very limited – and tailored to the capabilities of the `triangle()` code. A full implementation would actually
#
# * translate entire Python conditions into Z3 syntax (if possible),
# * handle more control flow constructs such as returns, assertions, exceptions
# * and half a million things more (loops, calls, you name it)
#
# Some of these may not be supported by the Z3 theories.
# + [markdown] slideshow={"slide_type": "subslide"}
# To make it easier for a constraint solver to find solutions, you could also provide _concrete values_ observed from earlier executions that already are known to reach specific paths in the program. Such concrete values would be gathered from the tracing mechanisms above, and boom: you would have a pretty powerful and scalable concolic (concrete-symbolic) test generator.
# + [markdown] slideshow={"slide_type": "fragment"}
# Now, the above might take you a day or two, and as you expand your test generator beyond `triangle()`, you will add more and more features. The nice part is that every of these features you will invent might actually be a research contribution – something nobody has thought of before. Whatever idea you might have: you can quickly implement it and try it out in a prototype. And again, this will be orders of magnitude faster than for conventional languages.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Things that will not work
#
# Python has a reputation for being hard to analyze statically, and this is true; its dynamic nature makes it hard for traditional static analysis to exclude specific behaviors.
#
# We see Python as a great language for prototyping automated testing and dynamic analysis techniques, and as a good language to illustrate _lightweight_ static and symbolic analysis techniques that would be used to _guide_ and _support_ other techniques (say, generating software tests).
#
# But if you want to _prove_ specific properties (or the absence thereof) by static analysis of code only, Python is a challenge, to say the least; and there are areas for which we would definitely _warn_ against using it.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### (No) Type Checking
#
# Using Python to demonstrate _static type checking_ will be suboptimal (to say the least) because, well, Python programs typically do not come with type annotations. You _can_, of course, annotate variables with types, as we assume in the [chapter on Symbolic Fuzzing](SymbolicFuzzer.ipynb):
# + slideshow={"slide_type": "fragment"}
def typed_triangle(a: int, b: int, c: int) -> str:
return triangle(a, b, c)
# + [markdown] slideshow={"slide_type": "fragment"}
# Most real-world Python code will not be annotated with types, though. While you can also _retrofit them_, as discussed in [our chapter on dynamic invariants](DynamicInvariants.ipynb), Python simply is not a good domain to illustrate type checking. If you want to show the beauty and usefulness of type checking, use a strongly typed language like Java, ML, or Haskell.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### (No) Program Proofs
#
# Python is a highly dynamic language in which you can change _anything_ at runtime. It is no problem assigning a variable different types, as in
# + slideshow={"slide_type": "fragment"}
x = 42
x = "a string" # type: ignore
# + [markdown] slideshow={"slide_type": "fragment"}
# or change the existence (and scope) of a variable depending on some runtime condition:
# + slideshow={"slide_type": "subslide"}
p1, p2 = True, False
if p1:
x = 42
if p2:
del x
# Does x exist at this point?
# + [markdown] slideshow={"slide_type": "subslide"}
# Such properties make symbolic reasoning on code (including static analysis and type checking) much harder, if not outright impossible. If you need lightweight static and symbolic analysis techniques to _guide_ other techniques (say, test generation), then imprecision may not hurt much. But if you want to derive _guarantees_ from your code, do not use Python as test subject; again, strongly statically typed languages like Java/ML/Haskell (or some very restricted toy language) are much better grounds for experimentation.
# + [markdown] slideshow={"slide_type": "fragment"}
# This does not mean that languages like Python should _not_ be statically checked. On the contrary, the widespread usage of Python calls loudly for better static checking tools. But if you want to teach or research static and symbolic techniques, we definitely would not use Python as our language of choice.
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Virtues of Prototyping
#
# One neat thing about prototyping (with Python or whatever) is that it allows you to fully focus on your _approach_, rather than on the infrastructure. Very obviously, this is useful for _teaching_ – you can use examples as the ones above in a lecture to very quickly communicate essential techniques of program analysis and test generation.
#
# But prototyping has more advantages. A Jupyter Notebook (like this one) documents how you developed your approach, together with examples, experiments, and rationales – and still focusing on the essentials. If you write a tool the "classical" way, you will eventually deliver thousands of lines of code that do everything under the sun, but only once you have implemented everything will you know whether things actually work. This is a huge risk, and if you still have to change things, you will have to refactor things again and again. Furthermore, for anyone who will work on that code later, it will take days, if not weeks, to re-extract the basic idea of the approach, as it will be buried under loads and loads of infrastructure and refactorings.
#
# Our consequence at this point is that we now implement new ideas _twice_:
#
# * First, we implement things as a notebook (as this one), experimenting with various approaches and parameters until we get them right.
#
# * Only once we have the approach right, and if we have confidence that it works, we reimplement it in a tool that works on large scale programs. This can still take weeks to months, but at least we know we are on a good path.
#
# Incidentally, it may well be that the original notebooks will have a longer life, as they are simpler, better documented, and capture the gist of our novel idea. And this is how several of the notebooks in this book came to be.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Try it out!
#
# All the code examples above can be run by you – and changed as you like! From the Web page, the easiest way is to go to "Resources $\rightarrow$ Edit as Notebook", and you can experiment with the original Jupyter Notebook right within your browser. (Use `Shift` + `Return` to execute code.)
#
# From the "Resources" menu, you can also download the Python code (`.py`) to run it within a Python environment, or download the notebook (`.ipynb`) to run it within Jupyter – and again, change them as you like. If you want to run this code on your own machine, you will need the following packages:
#
# ```
# pip install showast
# pip install z3-solver
# ```
#
# Enjoy!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Lessons Learned
#
# **Python** is a great language for prototyping testing and debugging tools:
#
# * In Python, dynamic analysis and static analysis are extremely easy to implement.
# * Python provides an enormous infrastructure for parsing, handling programs as trees, and constraint solving.
# * These can make you develop new techniques within _hours_ instead of weeks.
#
# Python is _not_ recommended as a domain for pure symbolic code analysis, though.
#
# * There is little to no static typing
# * The language is highly dynamic with little to no static guarantees
#
# However, even a potentially _unsound_ symbolic analysis can still guide test generation – and this again is very easy to build.
#
# **Jupyter Notebooks** (using Python or other languages) are great for _prototyping_:
#
# * Notebooks document the gist of your approach, including examples and experiments.
# * This is great for teaching, communication, and even documentation.
# * Doing experiments on prototypes early reduces risks for later large-scale implementations.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Next Steps
#
# If you want to see more examples of us using Python for prototyping – have a look at [this book](index.ipynb)! Specifically,
#
# * see how we develop [fuzzers](Fuzzer.ipynb) step by step;
# * see how we use [dynamic analysis to check coverage](Coverage.ipynb); or
# * see how we analyze Python code for [concolic](ConcolicFuzzer.ipynb) and [symbolic](SymbolicFuzzer.ipynb) and fuzzing.
#
# There's lots to learn – enjoy the read!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Background
#
# The _triangle problem_ is adapted from "The Art of Software Testing" by <NAME> Sandler \cite{Myers2004}. It is a allegedly simple problem but which reveals a surprising depth when you think about all the things that might go wrong.
#
# The _Z3 solver_ we use in this chapter was developed at Microsoft Research under the lead of <NAME> and <NAME> \cite{z3}. It is one of the most powerful and most popular solvers.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercises
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exercise 1: Features! Features!
#
# Our path collector is still very limited. Things that do not work include
#
# * Complex conditions, such as boolean operators. Python operators `a and b` need to be translated to Z3 syntax `z3.And(a, b)`.
# * Early returns. After `if A: return`, the condition `not A` must hold for the following statements.
# * Assignments.
# * Loops.
# * Function calls.
#
# The more of these you implement, the closer you will get to a full-fledged symbolic test generator for Python. But at some point, _your prototype may not be a prototype anymore_, and then, Python may no longer be the best language to use. Find a good moment when it is time to switch from a prototypical to a production tool.
|
docs/notebooks/PrototypingWithPython.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''mlenv'': conda)'
# language: python
# name: python3
# ---
# + [markdown] id="RYjkKmJm4pfd"
# # ECIP Models for Software Defect Prediction Dataset
# + [markdown] id="8f9NBQgF4lPl"
# ## Import
# + id="Bjl35YxY0kaq"
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,f1_score, confusion_matrix
import pandas as pd
import numpy as np
from collections import Counter
import random
from sklearn import tree
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,AdaBoostClassifier
from imblearn.ensemble import EasyEnsembleClassifier
import math
# + colab={"base_uri": "https://localhost:8080/"} id="0Qjjfwxv0kax" outputId="2d94201c-299c-488e-a0cc-a28be0c3b460"
df = pd.read_csv("./water_potability.csv")
df.head()
# + [markdown] id="n_r5dgcm5C-9"
# ## Data Preprocessing
# + colab={"base_uri": "https://localhost:8080/", "height": 602} id="VAog2nDl0ka0" outputId="992eb868-c8e7-4e33-d3a1-c1964189c2d3"
df.corr()
# + id="jj8vFPWR0ka1"
for x in df:
df.drop(df.loc[df[x]=='?'].index, inplace=True)
print(df.isna().sum())
# -
df["Sulfate"].fillna(df["Sulfate"].mean(), inplace=True)
df["ph"].fillna(df["ph"].mean(), inplace=True)
df["Trihalomethanes"].fillna(df["Trihalomethanes"].mean(), inplace=True)
print(df.isna().sum())
# + colab={"base_uri": "https://localhost:8080/"} id="nHn6TGEs0ka2" outputId="447ec93a-422c-45fb-f6d7-16d06226297a"
for i, row in df.iterrows():
if df.at[i,'Potability'] == True:
a = random.choice([0,1])
if a==0:
df.drop(i, inplace=True)
print(Counter(df["Potability"]))
df["Potability"].replace({False: 0, True: 1}, inplace=True)
# -
(Counter(df['Potability'])[1]/Counter(df['Potability'])[0])*100
# + colab={"base_uri": "https://localhost:8080/"} id="ChOmumuF0ka4" outputId="d2ba702c-2ffc-4b1f-d7d3-4f14b7ddc62c"
X = df.drop(columns=["Potability"])
y = df.Potability
print(type(y[0]))
print(y[0])
# + id="CFR1dkIX0ka4"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# + colab={"base_uri": "https://localhost:8080/"} id="eRw695zU0ka5" outputId="764fdb1e-e70c-43fc-9c34-27034a274ec3"
print(y_test.value_counts())
print(y_train.value_counts())
# + [markdown] id="P78XMq2M5eW3"
# ## Data Resampling Techniques
# + [markdown] id="fdTYMvf16y-Z"
# Random Oversampling
# + colab={"base_uri": "https://localhost:8080/"} id="su7S5YrX0ka5" outputId="0c9bd533-8361-4df4-94da-9672ea09419d"
oversampler = RandomOverSampler(sampling_strategy=.5)
X_over, y_over = oversampler.fit_resample(X_train, y_train)
print(Counter(y_over))
# + [markdown] id="taG6JCAm61Sa"
# Random Undersampling
# + colab={"base_uri": "https://localhost:8080/"} id="lX-lLmHi0ka6" outputId="b57b69d1-c825-45f6-9683-3521889e3b7c"
undersampler = RandomUnderSampler(sampling_strategy=.5)
X_under, y_under = undersampler.fit_resample(X_train, y_train)
print(Counter(y_under))
# + [markdown] id="bpRTavtx63ov"
# Synthetic Minority Over Sampling
# + id="PRi1SWgC0ka6"
X_train_smote = np.asarray(X_train)
y_train_smote = np.asarray(y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="gD4yiSTs0ka7" outputId="402046e6-5cbc-4a90-e90d-c3023f0e7a1a"
smotesampler = SMOTE()
X_smote, y_smote = smotesampler.fit_resample(X_train_smote, y_train_smote)
print(Counter(y_smote))
# + [markdown] id="Z-oJ8HH_0ka7"
# ## Decision Tree
# + id="VYT7kzlY0ka9"
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
# + id="6vspvdmr0ka-"
pred = clf.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="_9Szyz9ZSlt4" outputId="287cc3e8-ae00-4e87-9650-dcb539553a32"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="rWY5zNX-Slt9"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_dec = math.sqrt(true_positive_rate*true_negative_rate)
# + id="5j7kQaZlSlt9"
f1_dec = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="SsNufMDhSlt9" outputId="63e04ed6-c231-490f-c7bb-426ae4477f61"
print("g-mean = ",g_mean_dec)
print("F1-score = ",f1_dec)
# + [markdown] id="ijl-_xYg0ka_"
# ## Bagging
# + colab={"base_uri": "https://localhost:8080/"} id="gvOG5NkP0kbA" outputId="39f655b4-2087-4206-aba4-1cecb590bb37"
bag = BaggingClassifier()
bag.fit(X_train, y_train)
# + id="0fOlADBD0kbA"
pred = bag.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="4mvVep3r0kbB" outputId="3a30eaad-a6b5-4497-8289-4123f3f1674d"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="QjNFMvry7RbE"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_bag = math.sqrt(true_positive_rate*true_negative_rate)
# + id="3hKXe72NGKVj"
f1_bag = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="z2n5iwg-F50R" outputId="79315e44-311a-453e-cd85-ea42ffefae07"
print("g-mean = ",g_mean_bag)
print("F1-score = ",f1_bag)
# + [markdown] id="eSvTV4Vj0kbB"
# ## AdaBoost
# + colab={"base_uri": "https://localhost:8080/"} id="MHe9q6W20kbC" outputId="51c27ee8-d7d2-4b2e-ebb6-b1ac3ce7a107"
ada = AdaBoostClassifier()
ada.fit(X_train, y_train)
# + id="7VLi0d_U0kbC"
pred = ada.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="GwGNUcldSnvm" outputId="fc4171e1-e76d-4829-c452-b45bcd9683df"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="OY9EMJJ0Snvm"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_ada = math.sqrt(true_positive_rate*true_negative_rate)
# + id="-SHpz1EtSnvm"
f1_ada = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="HDA0Bo0FSnvm" outputId="8e1c909f-683b-4736-a3e1-74bf80ff2635"
print("g-mean = ",g_mean_ada)
print("F1-score = ",f1_ada)
# + [markdown] id="cCSZXA940kbD"
# ## Easy Ensemble
# + colab={"base_uri": "https://localhost:8080/", "height": 328} id="UDwArmgn0kbD" outputId="1020c4d7-1877-4998-f04a-869e5311db87"
easy_ensemble = EasyEnsembleClassifier(random_state=42)
easy_ensemble.fit(X_smote, y_smote)
# + id="5RZucfES0kbD"
pred = easy_ensemble.predict(X_test)
# + id="TUqQneq6Spl1"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="3l03g8YXSpl1"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_ez = math.sqrt(true_positive_rate*true_negative_rate)
# + id="7E75GTvASpl2"
f1_ez = f1_score(y_test, pred)
# + id="WEQp6TPuSpl2"
print("g-mean = ",g_mean_ez)
print("F1-score = ",f1_ez)
# + [markdown] id="6nCzrk100kbE"
# ## RusBoost
# + colab={"base_uri": "https://localhost:8080/"} id="V3e4RhLA0kbE" outputId="81436630-678d-41f3-d5a8-d8be655c7b91"
rus = AdaBoostClassifier()
rus.fit(X_under, y_under)
# + id="qJPlON-H0kbE"
pred = rus.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="H1EGACQeSr02" outputId="1d00719e-d8e8-4639-b635-4c690017ef38"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="opvBK4GJSr02"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_rus = math.sqrt(true_positive_rate*true_negative_rate)
# + id="bwVNE5VgSr02"
f1_rus = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="741bPbPXSr02" outputId="4ab065d1-cc48-4ee0-e781-7c88991920d0"
print("g-mean = ",g_mean_rus)
print("F1-score = ",f1_rus)
# + [markdown] id="CdVEsya90kbE"
# ## Smote Boost
# + colab={"base_uri": "https://localhost:8080/"} id="FixYD3KP0kbF" outputId="91aeb735-9fa5-47fc-9879-3c1610cdac83"
smote_boost = AdaBoostClassifier()
smote_boost.fit(X_smote,y_smote)
# + colab={"base_uri": "https://localhost:8080/"} id="UvVdWIK00kbF" outputId="09473906-953b-4e45-b9dd-6328dba54e57"
pred = smote_boost.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="1mI23hgiStfg" outputId="ae910797-0f47-4f33-dd8f-bc286d8f03d4"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="DLK7fNDKStfg"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_smb = math.sqrt(true_positive_rate*true_negative_rate)
# + id="NgAoiLkgStfg"
f1_smb = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="7gsMh2XjStfh" outputId="42f3e4fe-e647-4d6f-8fd8-16b8ec32800b"
print("g-mean = ",g_mean_smb)
print("F1-score = ",f1_smb)
# + [markdown] id="ggu7x4Zx0kbF"
# ## Under Bagging
# + colab={"base_uri": "https://localhost:8080/"} id="xZL64Bkk0kbG" outputId="e62cebdb-f1e9-4ae3-82e3-ee595a7dcbe5"
under_bagging = BaggingClassifier()
under_bagging.fit(X_under, y_under)
# + id="OIOt0kxV0kbG"
pred = under_bagging.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="BT8oEC9US23q" outputId="26a0751b-531d-4b21-acbf-cc25be4d0ed9"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="CqKpMf2yS23q"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_ubag = math.sqrt(true_positive_rate*true_negative_rate)
# + id="lJc7qmNSS23q"
f1_ubag = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="0udmE8NrS23q" outputId="2e1f12a6-87e7-4a84-a416-83491513c3a4"
print("g-mean = ",g_mean_ubag)
print("F1-score = ",f1_ubag)
# + [markdown] id="OpUcQzcuV4XK"
# ## Over Bagging
# + colab={"base_uri": "https://localhost:8080/"} id="PThRYWGUWGB6" outputId="c0501e0c-7018-46bc-8b1a-6bb4c5160adf"
over_bagging = BaggingClassifier()
over_bagging.fit(X_over, y_over)
# + id="t9Yd0jWcWGB6"
pred = over_bagging.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="Eji2n2pGWGB6" outputId="0edd81a4-d2cf-4306-d742-6ba0c7c72904"
conf = confusion_matrix(y_test, pred)
print(conf)
# + id="hCusvbU8WGB6"
true_negative_rate = conf[0][0]/(conf[0][0]+conf[0][1])
true_positive_rate = conf[1][1]/(conf[1][1]+conf[1][0])
g_mean_obag = math.sqrt(true_positive_rate*true_negative_rate)
# + id="tUzb-M45WGB7"
f1_obag = f1_score(y_test, pred)
# + colab={"base_uri": "https://localhost:8080/"} id="Cx3-NiNXWGB7" outputId="9ebe3ed2-301a-44fc-82bf-eee75ae870a1"
print("g-mean = ",g_mean_obag)
print("F1-score = ",f1_obag)
# + [markdown] id="pkRaXA7DU8oe"
# ## Results
# + id="kKedNOEnVBAJ"
models = ["Decision Tree","Bagging", "AdaBoost", "Easy Ensemble","RusBoost","Smote Boost","Under Bagging", "Over Bagging"]
g_mean_score = [g_mean_dec,g_mean_bag,g_mean_ada,g_mean_ez,g_mean_rus,g_mean_smb,g_mean_ubag,g_mean_obag]
F1_score = [f1_dec,f1_bag,f1_ada,f1_ez,f1_rus,f1_smb,f1_ubag,f1_obag]
# + id="X0_Ad7WbWbJQ"
result_data = {
'Models': models,
'g-mean': g_mean_score,
'F1-score': F1_score
}
result_df = pd.DataFrame(result_data)
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="JYYgNAKzYCQc" outputId="12a3fbb6-1480-424d-85aa-3ef2d1ff596e"
result_df
|
Models/water_potability.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## **Следующий раздел посвещен векторизации рабочего датасета. Что бы в дальнейшем подавать его на обучение различным моделям.**
# +
import warnings
warnings.filterwarnings('ignore')
import json
import pandas as pd
import numpy as np
from pathlib import Path
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# -
path_root = Path('/home/owl/PycharmProjects/Questions_search_task')
work_dataset = pd.read_csv(path_root / 'dataset_of_questions' /'work_dataset.csv', sep='\t')
work_dataset = work_dataset.drop('Unnamed: 0', axis=1)
work_dataset.head(10)
# +
# work_dataset.to_csv('/home/owl/PycharmProjects/Questions_search_task/dataset_of_questions/work_dataset.csv', sep='\t')
# +
def class_return(sign):
return 1 if sign == '?' else 0
work_dataset['label'] = work_dataset['Sign'].apply(class_return)
# -
work_dataset.head(10)
# **pipline** для векторизации **TfidfVectorizer**.
#
#
#
# Для того что бы получить из предложения вектор.
# - Важно для начала понять как работает **TfidfVectorizer**. Из чего он состоит, как рассчитывается.
# - После чего мы должны обучить его. через методо **.fit()** при этом на **fit** мы подаем столбец строк из датасета.
# - После того как vectorizer обучился. Мы можем получить из него вектора предложений. Но при этом важно понимать что результаты выполненной работы нигде не сохранятся. Плюс ко всему нужно понимать что обрабатывать строки он будет по бачам.
# - Исходя из вышестоящего пункта появляется необходимость в познании того, как сохранить vectorizer - как записать его в бинарном формате, тоесть *сериализовать* его.
# - Соответственно после того как мы сохранили vectorizer, нужно знать, как получить информацию зашитую в неём и хранящую результат векторизации строк датасета - *десирилизовать* файл.
#
#
# После чего уже можно будет прописать обучения моделей.
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
# +
fit_object = TfidfVectorizer()
fit_object.fit(work_dataset['line_information'])
with open('fit_vectorizer.pkl', 'wb') as fit_file:
pickle.dump(fit_object, fit_file)
# -
print(type(fit_object))
# print(type(fit_object))
from sklearn.model_selection import train_test_split
train_data, test_data, train_label, test_label = train_test_split(work_dataset, work_dataset['label'], test_size=0.3, random_state=0)
with open('fit_vectorizer.pkl', 'rb') as file:
work_vectorizer_model = pickle.load(file)
fit_work_matrix = fit_object.transform(train_data['line_information'])
#
# ## пример для понимания ---------------------------------------------------------------------------------
#
# посчитать tf-ifd для слова **Интересно**
# +
# print(work_dataset['Line information'][2])
# print(work_dataset['Line information'][1])
# print(work_dataset['Line information'][4])
# -
# ## -------------------------------------------------------------------------------------------------------------------
#
# ### Обучение модели **ЛОГИСТИЧЕСКОЙ РЕГРЕСИИ**
# ### Теория
# После осознания того, какую именно задачу требуется решить на этих данных, следующим шагом при реальном анализе был бы подбор подходящего метода. В данном задании выбор метода было произведён за вас, это логистическая регрессия. Кратко напомним вам используемую модель.
#
# Логистическая регрессия предсказывает вероятности принадлежности объекта к каждому классу. Сумма ответов логистической регрессии на одном объекте для всех классов равна единице.
#
# $$ \sum_{k=1}^K \pi_{ik} = 1, \quad \pi_k \equiv P\,(y_i = k \mid x_i, \theta), $$
#
# где:
# - $\pi_{ik}$ - вероятность принадлежности объекта $x_i$ из выборки $X$ к классу $k$
# - $\theta$ - внутренние параметры алгоритма, которые настраиваются в процессе обучения, в случае логистической регрессии - $w, b$
#
# Из этого свойства модели в случае бинарной классификации требуется вычислить лишь вероятность принадлежности объекта к одному из классов (вторая вычисляется из условия нормировки вероятностей). Эта вероятность вычисляется, используя логистическую функцию:
#
# $$ P\,(y_i = 1 \mid x_i, \theta) = \frac{1}{1 + \exp(-w^T x_i-b)} $$
#
# Параметры $w$ и $b$ находятся, как решения следующей задачи оптимизации (указаны функционалы с L1 и L2 регуляризацией, с которыми вы познакомились в предыдущих заданиях):
#
# L2-regularization:
#
# $$ Q(X, y, \theta) = \frac{1}{2} w^T w + C \sum_{i=1}^l \log ( 1 + \exp(-y_i (w^T x_i + b ) ) ) \longrightarrow \min\limits_{w,b} $$
#
# L1-regularization:
#
# $$ Q(X, y, \theta) = \sum_{d=1}^D |w_d| + C \sum_{i=1}^l \log ( 1 + \exp(-y_i (w^T x_i + b ) ) ) \longrightarrow \min\limits_{w,b} $$
#
# $C$ - это стандартный гиперпараметр модели, который регулирует то, насколько сильно мы позволяем модели подстраиваться под данные.
train_data.shape
# +
# Загрузка сохраненного векторайзера
# with open('fit_vectorizer.pkl', 'rb') as file:
# work_vectorizer_model = pickle.load(file)
#
# -
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.01, 0.05, 0.1, 0.5, 1, 5, 10]}
gscv_obj_3 = GridSearchCV(LogisticRegression(), param_grid, cv=3)
gscv_obj_3.fit(fit_work_matrix, train_label)
# +
# with open('gscv_obj_3.')
# -
# ## **----------------------------------------------------------------------------
# +
# gscv_obj_5 = GridSearchCV(LogisticRegression(), param_grid, cv=5)
# gscv_obj_5.fit(fit_work_matrix, train_label)
# +
# def plot_scores(optimizer):
# scores=[]
# for i in range(len(optimizer.cv_results_['params'])):
# scores.append([optimizer.cv_results_['params'][i]['C'],
# optimizer.cv_results_['mean_test_score'][i],
# optimizer.cv_results_['std_test_score'][i]])
# scores = np.array(scores)
# plt.semilogx(scores[:,0], scores[:,1])
# plt.fill_between(scores[:,0], scores[:,1]-scores[:,2],
# scores[:,1]+scores[:,2], alpha=0.3)
# plt.show()
# +
# plot_scores(gscv_obj_3)
# -
# ## **----------------------------------------------------------------------------
# ## **Расчет метрик**
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
test_work_matrix = fit_object.transform(test_data['line_information'])
best_model = gscv_obj_3.best_estimator_
predict_answer = best_model.predict(test_work_matrix)
predict_answer
with open('best_model_logreg.pkl', 'wb') as file:
pickle.dump(best_model, file)
precision_count = precision_score(predict_answer, test_label)
f1_count = f1_score(predict_answer, test_label)
classification_assessment = classification_report(test_label, predict_answer, output_dict=True)
# +
# disp = plot_precision_recall_curve(gscv_obj_3, predict_answer, test_label)
# disp.ax_.set_title('2-class Precision-Recall curve: '
# 'AP={0:0.2f}'.format(average_precision))
# +
# print(f'Метрика precision: {precision_count}')
# print(f'Метрика f1: {f1_count}')
# print(f'Все метрики: \n {classification_assessment}')
# -
for key, value in classification_assessment.items():
print(key, value)
# +
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
disp = plot_precision_recall_curve(gscv_obj_3, test_work_matrix, test_label)
disp.ax_.set_title(f'2-class Precision-Recall curve: AP={0.92}')
# +
from sklearn.metrics import confusion_matrix
matrix_erorrs = pd.DataFrame()
print(confusion_matrix(test_label, predict_answer))
# -
for key, value in classification_assessment.items():
print(key, value)
|
jupyter_notebooks/fit_randomizer.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ## Errores estándar
#
# Hemos mostrado cómo encontrar las estimaciones de mínimos cuadrados con álgebra matricial. Estas estimaciones son variables aleatorias ya que son combinaciones lineales de los datos. Para que estas estimaciones sean útiles, también necesitamos calcular sus errores estándar. El álgebra lineal proporciona un enfoque poderoso para esta tarea. Damos varios ejemplos.
#
#
# #### Objeto cayendo
#
# Es útil pensar de dónde viene la aleatoriedad. En nuestro ejemplo de objetos que caen, la aleatoriedad se introdujo a través de errores de medición. Cada vez que volvamos a ejecutar el experimento, se cometerá un nuevo conjunto de errores de medición. Esto implica que nuestros datos cambiarán aleatoriamente, lo que a su vez sugiere que nuestras estimaciones cambiarán aleatoriamente. Por ejemplo, nuestra estimación de la constante gravitatoria cambiará cada vez que realicemos el experimento. La constante es fija, pero nuestras estimaciones no lo son. Para ver esto, podemos ejecutar una simulación de Monte Carlo. Específicamente, generaremos los datos repetidamente y cada vez calcularemos la estimación del término cuadrático.
set.seed(1)
B <- 10000
h0 <- 56.67
v0 <- 0
g <- 9.8 ## metros por segundo
n <- 25
tt <- seq(0,3.4,len=n) ## tiempo en segundos, t es una función base
X <-cbind(1,tt,tt^2)
## crear X'X^-1 X'
A <- solve(crossprod(X)) %*% t(X)
betahat<-replicate(B,{
y <- h0 + v0*tt - 0.5*g*tt^2 + rnorm(n,sd=1)
betahats <- A%*%y
return(betahats[3])
})
head(betahat)
# Como era de esperar, la estimación es diferente cada vez. Esto se debe a que $\hat{\beta}$ es una variable aleatoria. Tiene por tanto una distribución:
library(rafalib)
mypar(1,2)
hist(betahat)
qqnorm(betahat)
qqline(betahat)
# Dado que $\hat{\beta}$ es una combinación lineal de los datos que hicimos normales en nuestra simulación, también es normal como se ve en el qq-plot anterior. Además, la media de la distribución es el parámetro verdadero $-0.5g$, como lo confirma la simulación de Monte Carlo realizada anteriormente.
round(mean(betahat),1)
# Pero no observaremos este valor exacto cuando estimemos porque el error estándar de nuestra estimación es aproximadamente:
sd(betahat)
# Aquí mostraremos cómo podemos calcular el error estándar sin una simulación de Monte Carlo. Dado que en la práctica no sabemos exactamente cómo se generan los errores, no podemos utilizar el enfoque de Monte Carlo.
#
# #### Alturas de padre e hijo
#
# En los ejemplos de altura de padre e hijo, tenemos aleatoriedad porque tenemos una muestra aleatoria de pares de padre e hijo. Por el bien de la ilustración, supongamos que esta es toda la población:
father.son = read.csv('https://raw.githubusercontent.com/jabernalv/Father-Son-height/master/Pearson.csv')
x <- father.son$fheight
y <- father.son$sheight
n <- length(y)
# Ahora hagamos una simulación Monte Carlo en la que tomamos una muestra de 50 una y otra vez.
N <- 50
B <-1000
betahat <- replicate(B,{
index <- sample(n,N)
sampledat <- father.son[index,]
x <- sampledat$fheight
y <- sampledat$sheight
lm(y~x)$coef
})
betahat <- t(betahat) # tenemos las estimaciones en dos columnas
head(betahat)
# Al hacer qq-plots, vemos que nuestras estimaciones son variables aleatorias aproximadamente normales:
mypar(1,2)
qqnorm(betahat[,1])
qqline(betahat[,1])
qqnorm(betahat[,2])
qqline(betahat[,2])
# Distribución de los coeficientes de regresión estimados obtenidos a partir de los datos de estatura padre-hijo simulados de Monte Carlo. La izquierda es un histograma y a la derecha tenemos un diagrama qq contra la altura teórica normal cuantiles.
#
# También vemos que la correlación de nuestras estimaciones es negativa:
cor(betahat[,1],betahat[,2])
# Cuando calculamos combinaciones lineales de nuestras estimaciones, necesitaremos conocer esta información para calcular correctamente el error estándar de estas combinaciones lineales.
#
# En la siguiente sección, describiremos la matriz de varianza-covarianza. La covarianza de dos variables aleatorias se define de la siguiente manera:
mean( (betahat[,1]-mean(betahat[,1] ))* (betahat[,2]-mean(betahat[,2])))
# La covarianza es la correlación multiplicada por las desviaciones estándar de cada variable aleatoria:
#
# $$\mbox{Corr}(X,Y) = \frac{\mbox{Cov}(X,Y)}{\sigma_X \sigma_Y}$$
#
# Aparte de eso, esta cantidad no tiene una interpretación útil en la práctica. Sin embargo, como veremos, es una cantidad muy útil para las derivaciones matemáticas. En las siguientes secciones, mostramos cálculos útiles de álgebra matricial que se pueden usar para estimar los errores estándar de las estimaciones del modelo lineal.
#
# <a nombre="varcov"></a>
#
# #### Matriz de varianza-covarianza (Avanzado)
#
# Como primer paso, necesitamos definir la *matriz de varianza-covarianza*, $\boldsymbol{\Sigma}$. Para un vector de variables aleatorias, $\mathbf{Y}$, definimos $\boldsymbol{\Sigma}$ como la matriz con la entrada $i,j$:
#
# $$ \Sigma_{i,j} \equiv \mbox{Cov}(Y_i, Y_j) $$
#
# La covarianza es igual a la varianza si $i = j$ e igual a 0 si las variables son independientes. En los tipos de vectores considerados hasta ahora, por ejemplo, un vector $\mathbf{Y}$ de observaciones individuales $Y_i$ muestreadas de una población, hemos supuesto independencia de cada observación y supuesto que $Y_i$ tienen todas las mismas varianza $\sigma^2$, por lo que la matriz de varianza-covarianza ha tenido solo dos tipos de elementos:
#
# $$ \mbox{Cov}(Y_i, Y_i) = \mbox{var}(Y_i) = \sigma^2$$
#
# $$ \mbox{Cov}(Y_i, Y_j) = 0, \mbox{ para } i \neq j$$
#
# lo que implica que $\boldsymbol{\Sigma} = \sigma^2 \mathbf{I}$ con $\mathbf{I}$, la matriz identidad.
#
# Más adelante, veremos un caso, específicamente los coeficientes estimados de un modelo lineal, $\hat{\boldsymbol{\beta}}$, que tiene entradas distintas de cero en los elementos fuera de la diagonal de $\boldsymbol{\Sigma}$ . Además, los elementos de la diagonal no serán iguales a un solo valor $\sigma^2$.
#
#
# #### Varianza de una combinación lineal
#
# Un resultado útil proporcionado por el álgebra lineal es que la matriz de covarianza de varianza de una combinación lineal $\mathbf{AY}$ de $\mathbf{Y}$ se puede calcular de la siguiente manera:
#
# $$
# \mbox{var}(\mathbf{AY}) = \mathbf{A}\mbox{var}(\mathbf{Y}) \mathbf{A}^\top
# $$
#
# Por ejemplo, si $Y_1$ y $Y_2$ son independientes ambos con varianza $\sigma^2$ entonces:
#
# $$\mbox{var}\{Y_1+Y_2\} =
# \mbox{var}\left\{ \begin{pmatrix}1&1\end{pmatrix}\begin{pmatrix} Y_1\\Y_2\\ \end{pmatrix}\right\}$$
#
# $$ =\begin{pmatrix}1&1\end{pmatrix} \sigma^2 \mathbf{I}\begin{pmatrix} 1\\1\\ \end{pmatrix}=2\sigma^2$$
#
# como esperamos Usamos este resultado para obtener los errores estándar de la LSE (estimación de mínimos cuadrados).
#
# #### Errores estándar de LSE (avanzado)
#
# Tenga en cuenta que $\boldsymbol{\hat{\beta}}$ es una combinación lineal de $\mathbf{Y}$: $\mathbf{AY}$ con $\mathbf{A}=\mathbf{(X^\top X)^{-1}X}^\top$, por lo que podemos usar la ecuación anterior para derivar la varianza de nuestras estimaciones:
#
# $$\mbox{var}(\boldsymbol{\hat{\beta}}) = \mbox{var}( \mathbf{(X^\top X)^{-1}X^\top Y} ) = $$
#
# $$\mathbf{(X^\top X)^{-1} X^\top} \mbox{var}(Y) (\mathbf{(X^\top X)^{-1} X^\top})^\top = $$
#
# $$\mathbf{(X^\top X)^{-1} X^\top} \sigma^2 \mathbf{I} (\mathbf{(X^\top X)^{-1} X^\top})^\top = $$
#
# $$\sigma^2 \mathbf{(X^\top X)^{-1} X^\top}\mathbf{X} \mathbf{(X^\top X)^{-1}} = $$
#
# $$\sigma^2\mathbf{(X^\top X)^{-1}}$$
#
# La diagonal de la raíz cuadrada de esta matriz contiene el error estándar de nuestras estimaciones.
#
# #### Estimación de $\sigma^2$
#
# Para obtener una estimación real en la práctica a partir de las fórmulas anteriores, necesitamos estimar $\sigma^2$. Previamente estimamos los errores estándar de la muestra. Sin embargo, la desviación estándar de la muestra de $Y$ no es $\sigma$ porque $Y$ también incluye la variabilidad introducida por la parte determinista del modelo: $\mathbf{X}\boldsymbol{\beta}$. El enfoque que tomamos es usar los residuos.
#
# Formamos los residuos así:
#
# $$
# \mathbf{r}\equiv\boldsymbol{\hat{\varepsilon}} = \mathbf{Y}-\mathbf{X}\boldsymbol{\hat{\beta}}$$
#
# Las notaciones $\mathbf{r}$ y $\boldsymbol{\hat{\varepsilon}}$ se utilizan para indicar residuos.
#
# Luego los usamos para estimar, de manera similar a lo que hacemos en el caso univariante:
#
# $$ s^2 \equiv \hat{\sigma}^2 = \frac{1}{Np}\mathbf{r}^\top\mathbf{r} = \frac{1}{Np}\sum_{i =1}^Nr_i^2$$
#
# Aquí $N$ es el tamaño de la muestra y $p$ es el número de columnas en $\mathbf{X}$ o el número de parámetros (incluido el término de intercepción $\beta_0$). La razón por la que dividimos entre $N-p$ es porque la teoría matemática nos dice que esto nos dará una mejor estimación (imparcial).
#
# Intentemos esto en R y veamos si obtenemos los mismos valores que obtuvimos con la simulación de Monte Carlo anterior:
n <- nrow(father.son)
N <- 50
index <- sample(n,N) # Este debe quedar
sampledat <- father.son[index,]
x <- sampledat$fheight
y <- sampledat$sheight
X <- model.matrix(~x) # En este caso agrega una columna con 1s
N <- nrow(X)
p <- ncol(X)
XtXinv <- solve(crossprod(X))
resid <- y - X %*% XtXinv %*% crossprod(X,y)
# print(resid)
s <- sqrt( sum(resid^2)/(N-p))
#print(s)
ses <- sqrt(diag(XtXinv))*s
print(ses)
# Comparemos con lo que proporciona `lm`:
summary(lm(y~x))$coef[,2]
ses
# Son idénticos porque están haciendo lo mismo. Además, tenga en cuenta que aproximamos los resultados de Monte Carlo:
apply(betahat,2,sd)
# #### Combinación lineal de estimaciones
#
# Con frecuencia, queremos calcular la desviación estándar de una combinación lineal de estimaciones como $\hat{\beta}_2 - \hat{\beta}_1$. Esta es una combinación lineal de $\hat{\boldsymbol{\beta}}$:
#
# $$\hat{\beta}_2 - \hat{\beta}_1 =
# \begin{pmatrix}0&-1&1&0&\dots&0\end{pmatrix} \begin{pmatrix}
# \hat{\beta}_0\\
# \hat{\beta}_1 \\
# \hat{\beta}_2 \\
# \vdots\\
# \hat{\beta}_p
# \end{pmatrix}$$
#
# Usando lo anterior, sabemos cómo calcular la matriz de covarianza de varianza de $\hat{\boldsymbol{\beta}}$.
#
#
# #### CLT y distribución t
#
# Hemos mostrado cómo podemos obtener errores estándar para nuestras estimaciones. Sin embargo, como aprendimos en el primer capítulo, para realizar inferencias necesitamos conocer la distribución de estas variables aleatorias. La razón por la que hicimos el esfuerzo de calcular los errores estándar es porque el CLT se aplica en modelos lineales. Si $N$ es lo suficientemente grande, entonces la LSE se distribuirá normalmente con una media de $\boldsymbol{\beta}$ y errores estándar como se describe. Para muestras pequeñas, si $\varepsilon$ se distribuyen normalmente, entonces $\hat{\beta}-\beta$ siguen una distribución t. No derivamos este resultado aquí, pero los resultados son extremadamente útiles ya que es la forma en que construimos los valores p y los intervalos de confianza en el contexto de los modelos lineales.
#
# #### Código versus matemáticas
#
# El enfoque estándar para escribir modelos lineales asume que los valores en $\mathbf{X}$ son fijos o que los estamos condicionando. Por lo tanto, $\mathbf{X} \boldsymbol{\beta}$ no tiene varianza ya que $\mathbf{X}$ se considera fijo. Por eso escribimos $\mbox{var}(Y_i) = \mbox{var}(\varepsilon_i)=\sigma^2$. Esto puede causar confusión en la práctica porque si, por ejemplo, calcula lo siguiente:
x = father.son$fheight
beta = c(34,0.5)
var(beta[1]+beta[2]*x)
# no está cerca de 0. Este es un ejemplo en el que debemos tener cuidado al distinguir el código de las matemáticas. La función `var` simplemente calcula la varianza de la lista que le damos, mientras que la definición matemática de varianza considera solo cantidades que son variables aleatorias. En el código R anterior, `x` no está fijo en absoluto: lo dejamos variar, pero cuando escribimos $\mbox{var}(Y_i) = \sigma^2$ estamos imponiendo, matemáticamente, `x` a ser arreglado De manera similar, si usamos R para calcular la varianza de $Y$ en nuestro ejemplo de caída de objetos, obtenemos algo muy diferente a $\sigma^2=1$ (la varianza conocida):
n <- length(tt)
y <- h0 + v0*tt - 0.5*g*tt^2 + rnorm(n,sd=1)
var(y)
# Nuevamente, esto se debe a que no estamos arreglando `tt`.
|
linear/standard_errors_R.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''tmclass-3'': conda)'
# name: python3
# ---
# +
import os, sys, glob
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# +
csv_file = pd.read_csv("koreahouse.csv", encoding="euc-kr")
data = glob.glob("*.csv")
for csv_data in data:
csv_read = pd.read_csv(csv_data, encoding="euc-kr")
def delete_data(data):
print(data.isnull().sum())
del data["대지권면적"]
del data["Unnamed: 7"]
print(data)
return data
def pre_processing(first_data):
csv_file = delete_data(first_data)
csv_save = pd.DataFrame(csv_file).to_csv("pre_seoul_aprtment.csv",
index=False,
index_label=False)
return csv_save
first_data_presing = delete_data(csv_file)
# +
from random import shuffle
seoul_index = first_data_presing["자치구명"][:10000]
home_index = first_data_presing["건물주용도"][:10000]
arch_place = first_data_presing["건물면적"][:10000] # x
arch_price = first_data_presing["물건금액"][:10000] # y
L = [seoul_index, home_index, arch_place, arch_price]
for i in L:
shuffle(i)
print(arch_place[0:5])
# +
plt.rc('font', family='Malgun Gothic')
# 자치구당 면적
plt.barh(seoul_index, arch_place)
plt.xlabel("면적")
plt.ylabel("자치구")
plt.title("자치구당 면적")
plt.show()
# 주용도 별 면적
plt.barh(home_index, arch_place)
plt.xlabel("면적")
plt.ylabel("주용도")
plt.title("주용도 별 면적")
plt.show()
# +
# 자치구 대비 가격
plt.barh(seoul_index, arch_price, color='black')
plt.xlabel("가격")
plt.ylabel("자치구")
plt.show()
plt.title("자치구 대비 가격")
# 주용도 별 가격
plt.barh(home_index, arch_price, color='blue')
plt.xlabel("가격")
plt.ylabel("주용도")
plt.title("주용도 별 가격")
plt.show()
# -
# 면적 대비 가격
plt.plot(arch_place, arch_price, "r.", label="면적 대비 가격 ")
plt.xlabel("면적")
plt.ylabel("가격")
plt.title("면적 대비 가격")
plt.show()
# +
architecture_mapping = {"아파트": 0,
"연립주택": 1,
"단독주택": 2,
"오피스텔": 3}
buliding = home_index.map(architecture_mapping)
# -
# 주용도 별 가격
plt.plot(buliding, arch_price, "b.", label="주용도별 가격")
plt.xlabel("주용도")
plt.ylabel("가격")
plt.xticks([0,1,2,3], ["아파트","연립주택","단독주택","오피스텔"])
plt.title("주용도 대비 가격 ")
plt.show()
# +
"""
bulid = []
price = []
data = glob.glob("*.csv")
for csv_data in data:
csv_build = pd.read_csv(csv_data, encoding="euc-kr")["건물면적"]
csv_price = pd.read_csv(csv_data, encoding="euc-kr")["물건금액"]
bulid.append(csv_build)
price.append(csv_price)
print(csv_build.shape)
print(csv_price.shape)
y = pd.concat(bulid, ignore_index=True)
X = pd.concat(price, ignore_index=True)
"""
data = pd.read_csv('tests.csv', encoding="euc-kr")
X = data['건물면적']
y = data['물건금액']
# 데이터셋 나누기
def data_split():
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2021)
return X_train, X_test, y_train, y_test
# 데이터 스케일링
def data_scaliing(X_train, X_test):
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train.values.reshape(-1, 1))
X_test_std = sc.fit_transform(X_test.values.reshape(-1, 1))
print(X_train_std.shape)
print(X_test_std.shape)
return X_train_std, X_test_std
# 선형회귀 모델
def linear_model(X_train_std, y_train, X_test_std, y_test):
linear = LinearRegression()
linear.fit(X_train_std, y_train)
print(f"linear train score --> {linear.score(X_train_std, y_train)*100}")
print(f"linear test score --> {linear.score(X_test_std, y_test)*100}")
plt.plot(X_train_std, y_train, 'o')
plt.plot(X_train_std, linear.predict(X_train_std.reshape(-1, 1)))
plt.show()
X_train_std, X_test_std = data_scaliing(data_split()[0], data_split()[1])
linear_model(X_train_std, data_split()[2], X_test_std, data_split()[3])
# -
|
test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import math
import torch
from torch import nn
from pathlib import Path
import pandas as pd
import ipywidgets as widgets
from ipywidgets import GridspecLayout, Dropdown, BoundedFloatText, Button
from ipywidgets import Output, Layout
import ageclock
from ageclock import NN
from pickle import load
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
DEBUG = False
# -
#Handling errors through callbacks
out_errors = widgets.Output(layout={'border': '1px solid black'})
@out_errors.capture()
def example_change(change):
def change_values(age):
sex_box.index = None
sex_box.value = 'Male' if samples[age][0] == 1 else 'Female'
for i in range(len(samples[age])-1):
grid[i+2, 2].value = samples[age][i+1]
if change['type'] == 'change' and change['name'] == 'value':
if change['new'] == '-':
sex_box.index = None
sex_box.value = '-'
for i in range(len(list(samples.values())[0])-1):
grid[i+2, 2].value = '0'
elif float(change['new'].split()[1]) in list(samples.keys()):
change_values(int(change['new'].split()[1]))
@out_errors.capture()
def create_button(description, button_style, height='auto'):
button = Button(description=description,
tooltip=description,
button_style=button_style,
disabled=True,
layout=Layout(height=height, width='auto'))
button.style.font_weight = 'bold'
return button
@out_errors.capture()
def submit_clicked(b):
def predict(inputs):
infile = open('data/transformer.pkl','rb')
transf = load(infile)
infile.close()
inputs.append(0)
inputs = transf.transform([inputs])
inputs = inputs[0][:-1]
inputs = inputs.astype(float)
inputs = torch.Tensor(inputs)
inputs = inputs.reshape(1, 20)
if DEBUG: age_button.tooltip = str(inputs)
path = Path('models/adam_1113_data_state_dict')
lm = NN()
lm.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
lm.eval()
return(lm(inputs))
age_button.button_style='primary'
markers = [grid[i+2, 2].value for i in range(len(biomarkers))]
if sex_box.value=='-':
age_button.description='Please enter your Sex'
age_button.style.button_color = 'orange'
elif 0 in markers:
age_button.description='Please enter missing biomarkers'
age_button.style.button_color = 'orange'
else:
markers.insert(0, sex_box.value[0])
if DEBUG: age_button.tooltip = str(markers)
age_button.description='Your biological age is {} '.format(
round(predict(markers).data.item()))
age_button.style.button_color = '#2471a3'
warn_text='''Age predictor needs all blood marker values and your Sex to make
a prediction. If you are missing values and filling them in, this might have
a dramatic impact on the results. However your entered Height, Weight, and
Smoking Status are not required for estimating your biological age and only
used for record keeping.'''
title_text = ['Deep Biomarkers Of Human Aging: How Old By a Basic Blood Test',
'''This is a deep-learned predictor of your age made with a DNN
trained on tens of thousands anonymized human blood tests. <br>
Enter your data below and the model will guess your age.''']
asterics_text = [
'* This should be in your clinical biochemistry blood test results.',
'''** Required parameter for minimal prediction accuracy of 70% within
10 year frame.''',
'''*** We can not show you reference values before knowing your age
apriori, so this is only a reference metric.''']
wiki_links = [
'https://en.wikipedia.org/wiki/Serum_albumin',
'https://en.wikipedia.org/wiki/Glucose',
'https://en.wikipedia.org/wiki/Urea',
'https://en.wikipedia.org/wiki/Cholesterol',
'https://en.wikipedia.org/wiki/Blood_proteins',
'https://en.wikipedia.org/wiki/Sodium_in_biology',
'https://en.wikipedia.org/wiki/Creatinine',
'https://en.wikipedia.org/wiki/Hemoglobin',
'https://en.wikipedia.org/wiki/Bilirubin',
'https://en.wikipedia.org/wiki/Triglycerides',
'https://en.wikipedia.org/wiki/High-density_lipoprotein',
'https://en.wikipedia.org/wiki/Low-density_lipoprotein',
'https://en.wikipedia.org/wiki/Calcium_metabolism',
'https://en.wikipedia.org/wiki/Potassium_in_biology',
'https://en.wikipedia.org/wiki/Hematocrit',
'https://en.wikipedia.org/wiki/Mean_corpuscular_hemoglobin_concentration',
'https://en.wikipedia.org/wiki/Mean_corpuscular_volume',
'https://en.wikipedia.org/wiki/Platelet',
'https://en.wikipedia.org/wiki/Red_blood_cell']
# +
biomarkers=pd.read_csv('data/non_NHANES/markers_units.csv').values.tolist()
samples = [list(pd.read_csv('data/non_NHANES/samples_from_aging.csv').iloc[i])
for i in range(len(pd.read_csv(
'data/non_NHANES/samples_from_aging.csv')))]
samples = {sample[-1]:sample[:-1] for sample in samples}
# +
header_grid = widgets.GridspecLayout(4, 5, width='auto')
title_html = widgets.HTML(
value='''<center><h1 style="font-family:verdana"><font color=
"paleturquoise">{}</h1><hr><h2 style="font-family:verdana">
{}</h2></center>'''.format(*title_text))
height_box = widgets.Text(
placeholder='Enter your height (Optional)',
description='Height:')
weight_box = widgets.Text(
placeholder='Enter your weight (Optional)',
description='Weight:')
load_html = widgets.HTML(value = '<center><h3>Load an Example</h3></center>',
layout=Layout(top='45px', left='5px'))
sex_box = widgets.Dropdown(options=['-','Male','Female'], description='Sex:')
smoke_box = widgets.Dropdown(options=['-','No','Yes'], description='Smoke:')
load_box = widgets.Dropdown(
value='-',
options=['-']+[{1.0:"Male", 0.0:"Female"}[value[0]]+' '+str(int(key))+
' y/o' for key, value in samples.items()],
description=' ',
layout=Layout(bottom='45px', right='5px'))
load_box.observe(example_change)
header_grid[0:1,1:-1] = title_html
header_grid[2,1] = height_box
header_grid[2,2] = weight_box
header_grid[2,3] = load_html
header_grid[3,1] = sex_box
header_grid[3,2] = smoke_box
header_grid[3,3] = load_box
header_grid
# +
grid=widgets.GridspecLayout(len(biomarkers)+3,5,height='950px',width='auto')
HEIGHT = '28px'
grid[0,1:-1] = widgets.HTML(
value='<center><h4><font color="orange">{}</h4></center>'.format(
warn_text))
grid[1,1] = create_button('Blood Marker*', 'primary', '50px')
grid[1,2] = create_button('Your Value', 'primary', '50px')
grid[1,3] = create_button('Units and Sample Metric***', 'primary', '50px')
for i, marker in enumerate(biomarkers):
grid[i+2,1] = create_button(marker[0], 'info', HEIGHT)
grid[i+2,3] = create_button(marker[1], 'info', HEIGHT)
grid[i+2,2] = BoundedFloatText(min=0,
max=1000,
layout=Layout(height='auto',
width='auto'),
value=None,
step=0.01,
disabled=False)
grid[-1,1:] = widgets.HTML(
value = '''<font color="orange">{}<br><fontcolor=
"orange">{}<br><font color="orange">{}'''.format(*asterics_text))
grid
# +
footer_grid = widgets.GridspecLayout(3,5)
submit_button = Button(description='Submit',
tooltip='Predict your age with entered biomarkers',
button_style='primary',
layout=Layout(height='50px', width='auto', bottom='0px'))
age_button = Button(layout=Layout(height='50px', width='auto', top='15px'))
age_button.style.button_color = '#111111'
age_button.style.font_weight = 'bold'
submit_button.style.font_weight = 'bold'
submit_button.on_click(submit_clicked)
footer_grid[0,2] = submit_button
footer_grid[1,2] = age_button
footer_grid
# -
if DEBUG: display(out_errors)
# +
footer_html = {
'gap':'<br><br><br><br><br><br><hr>',
'sjsu':'''Made by <NAME> as a project for the
<a href='https://pytorch2021.devpost.com/'><u>PyTorch Hackathon
</u></a> with the help of a <a href='https://mlatsjsu.club'>
<u>Machine Learning Club</u></a> at San Jose State University.''',
'github':'''This project is open-source and is covered by the MIT License,
for more details or if you want to contribure here is the
<a href='https://github.com/mdmittriy/AgingClock'><u>
GitHub repository</u></a>.''',
'demo':'''You can take a look at the demo of this project <a
href='https://www.youtube.com/watch?v=SCxRiXIG13Q'><u>here</u></a>. And
notice that this work is heavily inspired by
<a href='http://www.aging.ai'><u>aging.ai</u></a>'''}
widgets.HTML(value='<center>{}<br>{}<br>{}<br>{}</center>'.format(
footer_html['gap'],footer_html['sjsu'],
footer_html['github'],footer_html['demo']))
|
AgingClock.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext watermark
# %watermark -v -p numpy,scipy,matplotlib,tensorflow
# **10장 – 인공 신경망 소개**
# _이 노트북은 10장에 있는 모든 샘플 코드와 연습문제 해답을 가지고 있습니다._
# # 설정
# 파이썬 2와 3을 모두 지원합니다. 공통 모듈을 임포트하고 맷플롯립 그림이 노트북 안에 포함되도록 설정하고 생성한 그림을 저장하기 위한 함수를 준비합니다:
# +
# 파이썬 2와 파이썬 3 지원
from __future__ import division, print_function, unicode_literals
# 공통
import numpy as np
import os
# 일관된 출력을 위해 유사난수 초기화
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# 맷플롯립 설정
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# 한글출력
plt.rcParams['font.family'] = 'NanumBarunGothic'
plt.rcParams['axes.unicode_minus'] = False
# 그림을 저장할 폴더
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# -
# # 퍼셉트론
# +
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2, 3)] # 꽃잎 길이, 꽃잎 너비
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(max_iter=100, random_state=42)
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
# -
y_pred
# +
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]
axes = [0, 5, 0, 2]
x0, x1 = np.meshgrid(
np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Iris-Setosa 아님")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")
plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
plt.xlabel("꽃잎 길이", fontsize=14)
plt.ylabel("꽃잎 너비", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)
save_fig("perceptron_iris_plot")
plt.show()
# -
# # 활성화 함수
# +
def logit(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
# +
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=2, label="스텝")
plt.plot(z, logit(z), "g--", linewidth=2, label="로지스틱")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("활성화 함수", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=2, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(logit, z), "g--", linewidth=2, label="Logit")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.title("도함수", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("activation_functions_plot")
plt.show()
# +
def heaviside(z):
return (z >= 0).astype(z.dtype)
def sigmoid(z):
return 1/(1+np.exp(-z))
def mlp_xor(x1, x2, activation=heaviside):
return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)
# +
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)
z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("활성화 함수: 헤비사이드", fontsize=14)
plt.grid(True)
plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("활성화 함수: 시그모이드", fontsize=14)
plt.grid(True)
# -
# # MNIST를 위한 FNN
import tensorflow as tf
# 주의: `tf.examples.tutorials.mnist`은 삭제될 예정이므로 대신 `tf.keras.datasets.mnist`를 사용하겠습니다. `tf.contrib.learn` API는 `tf.estimator`와 `tf.feature_column`로 옮겨졌고 상당히 많이 바뀌었습니다. 특히 `infer_real_valued_columns_from_input()` 함수와 `SKCompat` 클래스가 없습니다.
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
# ## Estimator API를 사용
# +
feature_cols = [tf.feature_column.numeric_column("X", shape=[28 * 28])]
dnn_clf = tf.estimator.DNNClassifier(hidden_units=[300,100], n_classes=10,
feature_columns=feature_cols)
input_fn = tf.estimator.inputs.numpy_input_fn(
x={"X": X_train}, y=y_train, num_epochs=40, batch_size=50, shuffle=True)
dnn_clf.train(input_fn=input_fn)
# -
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"X": X_test}, y=y_test, shuffle=False)
eval_results = dnn_clf.evaluate(input_fn=test_input_fn)
eval_results
y_pred_iter = dnn_clf.predict(input_fn=test_input_fn)
y_pred = list(y_pred_iter)
y_pred[0]
# ## `tf.contrib.learn`을 사용
# +
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/")
# +
# X_train = mnist.train.images
# X_test = mnist.test.images
# y_train = mnist.train.labels.astype("int")
# y_test = mnist.test.labels.astype("int")
# +
config = tf.contrib.learn.RunConfig(tf_random_seed=42) # 책에는 없음
feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)
dnn_clf = tf.contrib.learn.DNNClassifier(hidden_units=[300,100], n_classes=10,
feature_columns=feature_cols, config=config)
dnn_clf = tf.contrib.learn.SKCompat(dnn_clf) # if TensorFlow >= 1.1
tf.logging.set_verbosity(tf.logging.INFO)
dnn_clf.fit(X_train, y_train, batch_size=50, steps=40000)
# +
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test)
accuracy_score(y_test, y_pred['classes'])
# +
from sklearn.metrics import log_loss
y_pred_proba = y_pred['probabilities']
log_loss(y_test, y_pred_proba)
# -
# ## 저수준의 TensorFlow API 사용
# +
import tensorflow as tf
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# -
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.get_shape()[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.Variable(init, name="kernel")
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
if activation is not None:
return activation(Z)
else:
return Z
with tf.name_scope("dnn"):
hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "배치 데이터 정확도:", acc_batch, "검증 세트 정확도:", acc_valid)
save_path = saver.save(sess, "./my_model_final.ckpt")
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt") # 또는 save_path를 사용합니다
X_new_scaled = X_test[:20]
Z = logits.eval(feed_dict={X: X_new_scaled})
y_pred = np.argmax(Z, axis=1)
print("예측 클래스:", y_pred)
print("진짜 클래스:", y_test[:20])
from tensorflow_graph_in_jupyter import show_graph
show_graph(tf.get_default_graph())
# ## `neuron_layer()` 대신 `dense()` 사용
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# -
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
y_proba = tf.nn.softmax(logits)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
n_epochs = 20
n_batches = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "배치 데이터 정확도:", acc_batch, "검증 세트 정확도:", acc_valid)
save_path = saver.save(sess, "./my_model_final.ckpt")
# -
show_graph(tf.get_default_graph())
# # 연습문제 정답
# ## 1. to 8.
# 부록 A 참조.
# ## 9.
# _깊은 다층 퍼셉트론을 MNIST 데이터셋에 훈련시키고 98% 정확도를 얻을 수 있는지 확인해보세요. 9장의 마지막 연습문제에서와 같이 모든 부가 기능을 추가해보세요(즉, 체크포인트를 저장하고, 중지되었을 때 마지막 체크포인트를 복원하고, 서머리를 추가하고, 텐서보드를 사용해 학습 곡선을 그려보세요)._
# 먼저 심층 신경망을 만듭니다. 한가지 추가된 것 외에는 앞서 했던 것과 동일합니다. 텐서보드에서 학습 곡선을 볼 수 있도록 훈련하는 동안 손실과 정확도를 기록하는 `tf.summary.scalar()` 추가합니다.
n_inputs = 28*28 # MNIST
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# +
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# -
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
loss_summary = tf.summary.scalar('log_loss', loss)
# +
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# -
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_summary = tf.summary.scalar('accuracy', accuracy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# 텐서보드 로그를 기록할 디렉토리를 정의합니다:
# +
from datetime import datetime
def log_dir(prefix=""):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
if prefix:
prefix += "-"
name = prefix + "run-" + now
return "{}/{}/".format(root_logdir, name)
# -
logdir = log_dir("mnist_dnn")
# 텐서보드 로그를 작성하는 데 필요한 `FileWriter` 객체를 만듭니다:
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
# 잠시만요! 조기 종료를 구현하는 것이 좋겠죠? 이렇게 하려면 검증 세트가 필요합니다.
# +
# X_valid = mnist.validation.images
# y_valid = mnist.validation.labels
# -
m, n = X_train.shape
# +
n_epochs = 10001
batch_size = 50
n_batches = int(np.ceil(m / batch_size))
checkpoint_path = "/tmp/my_deep_mnist_model.ckpt"
checkpoint_epoch_path = checkpoint_path + ".epoch"
final_model_path = "./my_deep_mnist_model"
best_loss = np.infty
epochs_without_progress = 0
max_epochs_without_progress = 50
with tf.Session() as sess:
if os.path.isfile(checkpoint_epoch_path):
# 체크포인트 파일이 있으면 모델을 복원하고 에포크 숫자를 로드합니다
with open(checkpoint_epoch_path, "rb") as f:
start_epoch = int(f.read())
print("이전 훈련이 중지되었습니다. 에포크 {}에서 시작합니다".format(start_epoch))
saver.restore(sess, checkpoint_path)
else:
start_epoch = 0
sess.run(init)
for epoch in range(start_epoch, n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val, loss_val, accuracy_summary_str, loss_summary_str = sess.run([accuracy, loss, accuracy_summary, loss_summary], feed_dict={X: X_valid, y: y_valid})
file_writer.add_summary(accuracy_summary_str, epoch)
file_writer.add_summary(loss_summary_str, epoch)
if epoch % 5 == 0:
print("에포크:", epoch,
"\t검증 세트 정확도: {:.3f}%".format(accuracy_val * 100),
"\t손실: {:.5f}".format(loss_val))
saver.save(sess, checkpoint_path)
with open(checkpoint_epoch_path, "wb") as f:
f.write(b"%d" % (epoch + 1))
if loss_val < best_loss:
saver.save(sess, final_model_path)
best_loss = loss_val
else:
epochs_without_progress += 5
if epochs_without_progress > max_epochs_without_progress:
print("조기 종료")
break
# -
os.remove(checkpoint_epoch_path)
with tf.Session() as sess:
saver.restore(sess, final_model_path)
accuracy_val = accuracy.eval(feed_dict={X: X_test, y: y_test})
accuracy_val
|
10_introduction_to_artificial_neural_networks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: scvi-tools-paper
# language: python
# name: scvi-tools-paper
# ---
# ## Runtime
# + colab={"base_uri": "https://localhost:8080/"} id="m9LNaUOTmVOn" outputId="e55f69b8-111f-4617-ceeb-cd531f2ec44d"
import sys
#if True, will install via pypi, else will install from source
stable = True
IN_COLAB = "google.colab" in sys.modules
if IN_COLAB:
# !pip install --quiet scvi-tools[tutorials]==0.9.1
# + colab={"base_uri": "https://localhost:8080/", "height": 103} id="-P-Iw9lHmj6U" outputId="03a1607b-e9a2-4a8a-9b47-db8ca67777e2"
if IN_COLAB:
# import gdown
# url = 'https://drive.google.com/uc?id=1tJSOI9ve0i78WmszMLx2ul8F8tGycBTd'
# output = 'FL_celltype.csv'
# gdown.download(url, output, quiet=False)
# !wget https://ndownloader.figshare.com/files/27458831 -O FL_celltype.csv
# + id="TqUagzYDmqfY"
import scvi
import scanpy as sc
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import time
# +
sns.reset_orig()
sc.settings._vector_friendly = True
# p9.theme_set(p9.theme_classic)
plt.rcParams["svg.fonttype"] = "none"
plt.rcParams["pdf.fonttype"] = 42
plt.rcParams["savefig.transparent"] = True
plt.rcParams["figure.figsize"] = (4, 4)
plt.rcParams["axes.titlesize"] = 15
plt.rcParams["axes.titleweight"] = 500
plt.rcParams["axes.titlepad"] = 8.0
plt.rcParams["axes.labelsize"] = 14
plt.rcParams["axes.labelweight"] = 500
plt.rcParams["axes.linewidth"] = 1.2
plt.rcParams["axes.labelpad"] = 6.0
plt.rcParams["axes.spines.top"] = False
plt.rcParams["axes.spines.right"] = False
plt.rcParams["font.size"] = 11
# plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Helvetica', "Computer Modern Sans Serif", "DejaVU Sans"]
plt.rcParams['font.weight'] = 500
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['xtick.minor.size'] = 1.375
plt.rcParams['xtick.major.size'] = 2.75
plt.rcParams['xtick.major.pad'] = 2
plt.rcParams['xtick.minor.pad'] = 2
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['ytick.minor.size'] = 1.375
plt.rcParams['ytick.major.size'] = 2.75
plt.rcParams['ytick.major.pad'] = 2
plt.rcParams['ytick.minor.pad'] = 2
plt.rcParams["legend.fontsize"] = 12
plt.rcParams['legend.handlelength'] = 1.4
plt.rcParams['legend.numpoints'] = 1
plt.rcParams['legend.scatterpoints'] = 3
plt.rcParams['legend.frameon'] = False
plt.rcParams['lines.linewidth'] = 1.7
DPI = 300
# + colab={"base_uri": "https://localhost:8080/"} id="Wz5Xirboms90" outputId="0ea6caad-abe7-4aa8-cf32-05b84a87e40b"
adata = scvi.data.dataset_10x("fresh_68k_pbmc_donor_a")
adata.var_names_make_unique()
adata.obs_names_make_unique()
# + id="iSYXFs7_mvGi"
marker_gene_mat = pd.read_csv('data/FL_celltype.csv', index_col=0)
# + id="Xn3D7B3cpIEr"
marker_gene_mat = marker_gene_mat.drop(index='TRAC')
# -
marker_gene_mat.shape
# + colab={"base_uri": "https://localhost:8080/"} id="FwM3lWf5mxQe" outputId="a0208c12-22a0-4310-ad3f-0e131790a6aa"
bdata = adata[:, marker_gene_mat.index].copy()
scvi.data.setup_anndata(bdata)
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="5630VCrencB8" outputId="a6862025-48c4-42d1-8e7f-6ca2016d6074"
num_samples = []
times = []
n = bdata.n_obs
while n > 1000:
cdata = bdata[:n].copy()
scvi.data.setup_anndata(cdata)
start = time.time()
model = scvi.external.CellAssign(cdata, marker_gene_mat, "n_counts")
model.train()
end = time.time()
times += [end - start]
num_samples += [cdata.n_obs]
print(end - start)
n = n // 2
# bdata = sc.pp.subsample(bdata, fraction=0.5, copy=True)
# + colab={"base_uri": "https://localhost:8080/"} id="WTKaICt5wzGv" outputId="cb47e9d9-d24a-4bc1-e738-bd77402b59fb"
df = pd.DataFrame(data={'n_obs':num_samples,'time':times})
df["Implementation"] = "scvi-tools"
# -
r_time_df = pd.read_csv("data/cell_assign_r_runtime.csv", index_col=0)
r_time_df["Implementation"] = "Original"
r_time_df
total_df = pd.concat([df, r_time_df], axis=0)
fig, ax = plt.subplots(1, 1)
sns.scatterplot(data = total_df, x="n_obs", y="time", hue="Implementation", ax=ax)
# ax.set(xscale="log", yscale="log")
ax.set_xlabel("Number of cells")
ax.set_ylabel("Time (s)")
ax.set(yscale="log")
fig.savefig("figs/cellassign_runtime.pdf", dpi=300, bbox_inches="tight")
# ### R code
# ```R
# library(SingleCellExperiment)
# library(cellassign)
# library(readr)
#
# pbmc <- readRDS("data/pbmc.rds")
# marker_mat <- read_csv("data/FL_celltype.csv")
# marker_mat <- marker_mat[-c(24),]
# col <- marker_mat[,1]
# rownames <- col[['Gene']]
# counts(pbmc) <- assay(pbmc, "X")
# mm <- data.matrix(marker_mat)
# mm <- mm[,-1]
# rownames(mm) <- rownames
# times <- c()
# n_obs <- c()
# n <- 68579
# i <- 1
# s <- colSums(counts(pbmc))
# while (n > 1000){
# start <- Sys.time()
# fit <- cellassign(exprs_obj = pbmc[rownames,1:n],
# marker_gene_info = mm,
# s=s[1:n],
# learning_rate = 1e-2,
# shrinkage = TRUE,
# verbose = TRUE)
# times[i] <- difftime(Sys.time(), start, units='secs')
# n_obs[i] <- n
# i <- i + 1
# n <- n %/% 2
# }
# df <- data.frame("n_obs" = n_obs, "time" = times)
# write.csv(df, "data/cell_assign_r_runtime.csv")
# ```
|
docs/cell_assign_runtime.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df=pd.read_csv("D:/train.csv")
df.head()
df.describe()
# +
"""Now i have to find skewness for Non-Numerical data e.g:(Credit_history,Property_area etc)"""
df['Property_Area'].value_counts()
# -
df['Credit_History'].value_counts()
"""Now i have to study Distribution of Numerical Variables"""
df['ApplicantIncome'].hist(bins=50)
df['LoanAmount'].hist(bins=50)
df.boxplot(column='ApplicantIncome')
"""Box Plot shows that here is some outliers in the data """
df.boxplot(column='ApplicantIncome',by='Education')
"""Now we can see that there is just a little difference between graduate and Non_graduate Income
But there are high number of Graduates with very high Income,which might be outliers """
temp1=df['Credit_History'].value_counts(ascending=False)
"""Now i will change 'Y' and 'N ' to 1 And 0 through Pivot table """
temp2=df.pivot_table(values='Loan_Status',index='Credit_History',aggfunc=lambda x:x.map({'Y':1,'N':0}).mean())
temp2
"""Now we observe that we get a similar Pivot table Like Ms excel."""
"""Now i will plot this data through bar chart using matplotlib.pyplot"""
fig=plt.figure(figsize=(8,4))
ax1=fig.add_subplot(121)
temp1.plot(kind='bar')
ax1.set_xlabel('Credit_History')
ax1.set_ylabel('Count of Apllicant')
ax1.set_title('Applicants by Credit History')
ax2 = fig.add_subplot(122)
temp2.plot(kind = 'bar')
ax2.set_xlabel('Credit_History')
ax2.set_ylabel('Probability of getting loan')
ax2.set_title("Probability of getting loan by credit history")
"""This shows that probability of getting a loan are eight-fold if the applicant has a valid credit History"""
temp3=df['Married'].value_counts(ascending=False)
temp3
ax3=fig.add_subplot(122)
temp3.plot(kind='bar')
ax3.set_xlabel('Married')
ax3.set_ylabel('Count Of Applicant')
ax3.set_title('Married Applicants')
"""Alternatively we can show these plots by combining them in a stacked"""
temp4=pd.crosstab(df['Credit_History'],df['Loan_Status'])
temp4.plot(kind='bar',stacked=True,color=['red','blue'],grid=False)
temp5=pd.crosstab(df['Credit_History'],df['Married'])
temp5.plot(kind='line',stacked=True,color=['Yellow','green'],grid=True)
"""Now i have to check null values in the dataset"""
df.apply(lambda x:sum(x.isnull()),axis=0)
df['LoanAmount'].fillna(df['LoanAmount'].mean(),inplace=True)
df.head()
"""Now removing null values from fields"""
df['ApplicantIncome'].fillna(df['ApplicantIncome'].mean(),inplace=True)
df['CoapplicantIncome'].fillna(df['CoapplicantIncome'].mean(),inplace=True)
"""Now, we will create a Pivot table, which provides us median values for all the groups of unique values
of Self_Employed and Education features. Next, we define a function,
which returns the values of these cells and apply it to fill the missing values of loan amount:"""
df['Self_Employed'].fillna('No',inplace=True)
df['Gender'].fillna(df['Gender'].mode()[0],inplace=True)
df['Married'].fillna(df['Married'].mode()[0],inplace=True)
df['Loan_Amount_Term'].fillna(df['Loan_Amount_Term'].mode()[0],inplace=True)
df['Credit_History'].fillna(df['Credit_History'].mode()[0],inplace=True)
df['Dependents'].fillna(df['Dependents'].mode()[0],inplace=True)
df.apply(lambda x:sum(x.isnull()),axis=0)
"""Now all missing values has been replaced ,Now it's time to detect outliers and remove them"""
"""Let’s analyze LoanAmount first. Since the extreme values are practically possible,
i.e. some people might apply for high value loans due to specific needs. So instead of treating them as outliers,
let’s try a log transformation to nullify their effect:"""
df['LoanAmount_log']=np.log(df['LoanAmount'])
df['LoanAmount_log'].hist(bins=50)
"""Now it's look like Normal distributed"""
"""Coming to ApplicantIncome. One intuition can be that some applicants have lower income but strong support
Co-applicants. So it might be a good idea to combine both incomes as total income and take a
log transformation of the same."""
df['Total_income']=df['ApplicantIncome']+df['CoapplicantIncome']
df['Total_income_log']=np.log(df['Total_income'])
df['Total_income_log'].hist(bins=50)
df['LoanApproval']=df['LoanAmount']/df['Total_income']
df.head()
"""Since, sklearn requires all inputs to be numeric, we should convert all our categorical variables into
numeric by encoding the categories. """
# +
from sklearn.preprocessing import LabelEncoder
# -
var_mod=['Gender','Married','Dependents','Education','Self_Employed','Property_Area','Loan_Status']
le=LabelEncoder()
for i in var_mod:
df[i] = le.fit_transform(df[i])
df.dtypes
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier,export_graphviz
from sklearn import metrics
# +
"""Generic function for making a classification """
# -
#Generic function for making a classification model and accessing performance:
def classification_model(model, data, predictors, outcome):
#Fit the model:
model.fit(data[predictors],data[outcome])
#Make predictions on training set:
predictions = model.predict(data[predictors])
#Print accuracy
accuracy = metrics.accuracy_score(predictions,data[outcome])
print ("Accuracy : %s" % "{0:.3%}".format(accuracy))
#Perform k-fold cross-validation with 5 folds
kf = KFold(data.shape[0], n_folds=5)
error = []
for train, test in kf:
# Filter training data
train_predictors = (data[predictors].iloc[train,:])
# The target we're using to train the algorithm.
train_target = data[outcome].iloc[train]
# Training the algorithm using the predictors and target.
model.fit(train_predictors, train_target)
#Record error from each cross-validation run
error.append(model.score(data[predictors].iloc[test,:], data[outcome].iloc[test]))
print ("Cross-Validation Score : %s" % "{0:.3%}".format(np.mean(error)))
#Fit the model again so that it can be refered outside the function:
model.fit(data[predictors],data[outcome])
outcome_var='Loan_Status'
model=LogisticRegression()
predictor_var=['Credit_History']
classification_model(model,df,predictor_var,outcome_var)
predictors_var=['Credit_History','Education','Married','Self_Employed','Property_Area']
classification_model(model,df,predictors_var,outcome_var)
"""Now Decision Tree is another Algorithm Which is better than LogisticRegression"""
model=DecisionTreeClassifier()
predictors_var1=['Credit_History','Gender','Education','Self_Employed']
classification_model(model,df,predictors_var1,outcome_var)
"""With more Feature Accuracy is increasing but but a slight decrease in Cross-Validation"""
"""Now try Random Forest which another algorithm for Classification"""
model = RandomForestClassifier(n_estimators=100)
predictor_var2 = ['Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'Loan_Amount_Term', 'Credit_History', 'Property_Area',
'LoanAmount_log','Total_income_log']
classification_model(model, df,predictor_var2,outcome_var)
"""Here we see that the accuracy is 100% for the training set.
This is the ultimate case of overfitting and can be resolved in two ways:
Reducing the number of predictors
Tuning the model parameters """
featimpo=pd.Series(model.feature_importances_,index=predictor_var2).sort_values(ascending=False)
print(featimpo)
"""Lets use a top five variable to build A model"""
model=RandomForestClassifier(n_estimators=25,min_samples_split=25,max_depth=7,max_features=1)
predictor_var=['Credit_History','Total_income_log','LoanAmount_log','Dependents','Property_Area']
classification_model(model,df,predictor_var,outcome_var)
"""Notice that although accuracy reduced, but the cross-validation score is improving showing that
the model is generalizing well"""
"""At the end it is concluded that those applicant will get loan who has Credit History,PropertY and Dependents on someone.Also having Some Income"""
|
LoanPrediction1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pickle
from pandas.tools.plotting import *
import matplotlib
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["font.size"] = 20
import os
import requests
import pandas as pd
#from cStringIO import StringIO
import numpy as np
import matplotlib.pyplot as pl
import scipy as spy
from scipy.stats import gamma
from scipy.optimize import minimize
import emcee
import pystan
from pystan import StanModel
import stan_utility_copy
import corner
import gc
import seaborn as sns
import h5py
import credible_interval
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
# -
periodInsolationSwitch = 'P' #raw_input("Period or Insolation: P or I? -- ")
# +
# %%time
# !date
'''
with open("", "rb") as fx:
data_dictx = pickle.load(fx)
Out_x_x_strlx_x_x_50_x_fit = data_dictx['fit']
Out_x_x_strlx_x_x_50_x_data = data_dictx['data']
alpha_x = x['alpha']
beta_x = x['beta']
lnf0_x = x['lnf0']
PyStan_flatchain_x = np.hstack((lnf0_x.reshape(-1,1), beta_x.reshape(-1,1), alpha_x.reshape(-1,1)))
koi_rps_obs_x = Out_x_data['koi_rps_obs']
koi_rps_err_ave_x = Out_x_data['koi_rps_err_ave']
la_x = Out_x_x_strlx_x_x_50_x_fit.extract(permuted=True)
#print(la5["koi_rps_true"][:,2])
rad_x = pd.DataFrame(la_x["koi_rps_true"])
#rad_x[2].plot()
def plot_x(var):
fig, axes = pl.subplots(1, 1, figsize=(10, 5))
fig.suptitle(var, y=0.95, fontsize='xx-large')
rad_x[var].plot();
pl.legend(fontsize=11)
pl.plot([1,6000],[koi_rps_obs_x[var],koi_rps_obs_x[var]] )
pl.plot([1,6000],[koi_rps_obs_x[var]+koi_rps_err_ave_x[var],koi_rps_obs_x[var]+koi_rps_err_ave_x[var]] )
pl.plot([1,6000],[koi_rps_obs_x[var]-koi_rps_err_ave_x[var],koi_rps_obs_x[var]-koi_rps_err_ave_x[var]] )
pl.tight_layout(pad=3)
#fig_x.savefig('/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/plots/Koi_rps_true_traceplots_x'+'{}.png'.format(var))
for i in np.arange(0,len(koi_rps_obs_x),1):
plot_x(i);
'''
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/Q1_16_9.1_occ_rate_3000_no_Runcs.pkl", "rb") as f2:
#with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/Q1_16_9.1_occ_rate_3000_real_Runcs_NB_cuts_50_300_0p75_2p5_RpTruncated_NO_uncs.pkl", "rb") as f2:
data_dict2 = pickle.load(f2)
Out_Q1Q16_NO_strl1_0p75_2p5_50_300_fit = data_dict2['fit']
#Out_Q1Q16_NO_strl1_0p75_2p5_50_300_data = data_dict2['data']
alpha_Q1Q16_NO_strl1_0p75_2p5_50_300 = Out_Q1Q16_NO_strl1_0p75_2p5_50_300_fit['alpha']
beta_Q1Q16_NO_strl1_0p75_2p5_50_300 = Out_Q1Q16_NO_strl1_0p75_2p5_50_300_fit['beta']
lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300 = Out_Q1Q16_NO_strl1_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_Q1Q16_NO_strl1_0p75_2p5_50_300 = np.hstack((lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300.reshape(-1,1), beta_Q1Q16_NO_strl1_0p75_2p5_50_300.reshape(-1,1), alpha_Q1Q16_NO_strl1_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 0.75 to 2.5 and 50 to 300 days with truncation REAL uncs and stlr rad cut at 1.15 for stlr case 1. Selected 88807 targets after cuts, Selected 118 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_REAL_Runcs_origStlr1_wtruncation_0p75_2p5_50_300.pkl", "rb") as f5:
data_dict5 = pickle.load(f5)
Out_DR25_REAL_strl1_0p75_2p5_50_300_fit = data_dict5['fit']
Out_DR25_REAL_strl1_0p75_2p5_50_300_data = data_dict5['data']
alpha_DR25_REAL_strl1_0p75_2p5_50_300 = Out_DR25_REAL_strl1_0p75_2p5_50_300_fit['alpha']
beta_DR25_REAL_strl1_0p75_2p5_50_300 = Out_DR25_REAL_strl1_0p75_2p5_50_300_fit['beta']
lnf0_DR25_REAL_strl1_0p75_2p5_50_300 = Out_DR25_REAL_strl1_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_REAL_strl1_0p75_2p5_50_300 = np.hstack((lnf0_DR25_REAL_strl1_0p75_2p5_50_300.reshape(-1,1), beta_DR25_REAL_strl1_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_REAL_strl1_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 0.75 to 2.5 and 50 to 300 days with truncation NO uncs and stlr rad cut at 1.15 for stlr case 1. Selected 88807 targets after cuts, Selected 118 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_NO_Runcs_origStlr1_wtruncation_0p75_2p5_50_300.pkl", "rb") as f6:
data_dict6 = pickle.load(f6)
Out_DR25_NO_strl1_0p75_2p5_50_300_fit = data_dict6['fit']
Out_DR25_NO_strl1_0p75_2p5_50_300_data = data_dict6['data']
alpha_DR25_NO_strl1_0p75_2p5_50_300 = Out_DR25_NO_strl1_0p75_2p5_50_300_fit['alpha']
beta_DR25_NO_strl1_0p75_2p5_50_300 = Out_DR25_NO_strl1_0p75_2p5_50_300_fit['beta']
lnf0_DR25_NO_strl1_0p75_2p5_50_300 = Out_DR25_NO_strl1_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_NO_strl1_0p75_2p5_50_300 = np.hstack((lnf0_DR25_NO_strl1_0p75_2p5_50_300.reshape(-1,1), beta_DR25_NO_strl1_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_NO_strl1_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 1.0 to 2.0 and 50 to 200 days with truncation REAL uncs and stlr rad cut at 1.15 for stlr case 1. Selected 88807 targets after cuts, Selected 54 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_REAL_Runcs_origStlr1_wtruncation_1_2_50_200.pkl", "rb") as f7:
data_dict7 = pickle.load(f7)
Out_DR25_REAL_strl1_1_2_50_200_fit = data_dict7['fit']
Out_DR25_REAL_strl1_1_2_50_200_data = data_dict7['data']
alpha_DR25_REAL_strl1_1_2_50_200 = Out_DR25_REAL_strl1_1_2_50_200_fit['alpha']
beta_DR25_REAL_strl1_1_2_50_200 = Out_DR25_REAL_strl1_1_2_50_200_fit['beta']
lnf0_DR25_REAL_strl1_1_2_50_200 = Out_DR25_REAL_strl1_1_2_50_200_fit['lnf0']
PyStan_flatchain_DR25_REAL_strl1_1_2_50_200 = np.hstack((lnf0_DR25_REAL_strl1_1_2_50_200.reshape(-1,1), beta_DR25_REAL_strl1_1_2_50_200.reshape(-1,1), alpha_DR25_REAL_strl1_1_2_50_200.reshape(-1,1)))
#DR25 9.3 1.0 to 2.0 and 50 to 200 days with truncation NO uncs and stlr rad cut at 1.15 for stlr case 1. Selected 88807 targets after cuts, Selected 54 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_NO_Runcs_origStlr1_wtruncation_1_2_50_200.pkl", "rb") as f8:
data_dict8 = pickle.load(f8)
Out_DR25_NO_strl1_1_2_50_300_fit = data_dict8['fit']
Out_DR25_NO_strl1_1_2_50_300_data = data_dict8['data']
alpha_DR25_NO_strl1_1_2_50_300 = Out_DR25_NO_strl1_1_2_50_300_fit['alpha']
beta_DR25_NO_strl1_1_2_50_300 = Out_DR25_NO_strl1_1_2_50_300_fit['beta']
lnf0_DR25_NO_strl1_1_2_50_300 = Out_DR25_NO_strl1_1_2_50_300_fit['lnf0']
PyStan_flatchain_DR25_NO_strl1_1_2_50_300 = np.hstack((lnf0_DR25_NO_strl1_1_2_50_300.reshape(-1,1), beta_DR25_NO_strl1_1_2_50_300.reshape(-1,1), alpha_DR25_NO_strl1_1_2_50_300.reshape(-1,1)))
#DR25 9.3 0.75 to 2.5 and 50 to 300 days with truncation REAL uncs and for stlr case 2. Selected 81882 targets after cuts, Selected 124 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_REAL_Runcs_Stlr2_wtruncation_0p75_2p5_50_300.pkl", "rb") as f9:
data_dict9 = pickle.load(f9)
Out_DR25_REAL_strl2_0p75_2p5_50_300_fit = data_dict9['fit']
Out_DR25_REAL_strl2_0p75_2p5_50_300_data = data_dict9['data']
alpha_DR25_REAL_strl2_0p75_2p5_50_300 = Out_DR25_REAL_strl2_0p75_2p5_50_300_fit['alpha']
beta_DR25_REAL_strl2_0p75_2p5_50_300 = Out_DR25_REAL_strl2_0p75_2p5_50_300_fit['beta']
lnf0_DR25_REAL_strl2_0p75_2p5_50_300 = Out_DR25_REAL_strl2_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_REAL_strl2_0p75_2p5_50_300 = np.hstack((lnf0_DR25_REAL_strl2_0p75_2p5_50_300.reshape(-1,1), beta_DR25_REAL_strl2_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_REAL_strl2_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 0.75 to 2.5 and 50 to 300 days with truncation NO uncs and for stlr case 2. Selected 81882 targets after cuts, Selected 124 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_NO_Runcs_Stlr2_wtruncation_0p75_2p5_50_300.pkl", "rb") as f10:
data_dict10 = pickle.load(f10)
Out_DR25_NO_strl2_0p75_2p5_50_300_fit = data_dict10['fit']
Out_DR25_NO_strl2_0p75_2p5_50_300_data = data_dict10['data']
alpha_DR25_NO_strl2_0p75_2p5_50_300 = Out_DR25_NO_strl2_0p75_2p5_50_300_fit['alpha']
beta_DR25_NO_strl2_0p75_2p5_50_300 = Out_DR25_NO_strl2_0p75_2p5_50_300_fit['beta']
lnf0_DR25_NO_strl2_0p75_2p5_50_300 = Out_DR25_NO_strl2_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_NO_strl2_0p75_2p5_50_300 = np.hstack((lnf0_DR25_NO_strl2_0p75_2p5_50_300.reshape(-1,1), beta_DR25_NO_strl2_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_NO_strl2_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 1.0 to 2.0 and 50 to 200 days with truncation REAL uncs and for stlr case 2. Selected 81882 targets after cuts, Selected 58 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_REAL_Runcs_Stlr2_wtruncation_1_2_50_200.pkl", "rb") as f11:
data_dict11 = pickle.load(f11)
Out_DR25_REAL_strl2_1_2_50_200_fit = data_dict11['fit']
Out_DR25_REAL_strl2_1_2_50_200_data = data_dict11['data']
alpha_DR25_REAL_strl2_1_2_50_200 = Out_DR25_REAL_strl2_1_2_50_200_fit['alpha']
beta_DR25_REAL_strl2_1_2_50_200 = Out_DR25_REAL_strl2_1_2_50_200_fit['beta']
lnf0_DR25_REAL_strl2_1_2_50_200 = Out_DR25_REAL_strl2_1_2_50_200_fit['lnf0']
PyStan_flatchain_DR25_REAL_strl2_1_2_50_200 = np.hstack((lnf0_DR25_REAL_strl2_1_2_50_200.reshape(-1,1), beta_DR25_REAL_strl2_1_2_50_200.reshape(-1,1), alpha_DR25_REAL_strl2_1_2_50_200.reshape(-1,1)))
#DR25 9.3 1.0 to 2.0 and 50 to 200 days with truncation NO uncs and for stlr case 2. #Selected 81882 targets after cuts, Selected 58 KOIs after cuts
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_NO_Runcs_Stlr2_wtruncation_1_2_50_200.pkl", "rb") as f12:
data_dict12 = pickle.load(f12)
Out_DR25_NO_strl2_1_2_50_200_fit = data_dict12['fit']
Out_DR25_NO_strl2_1_2_50_200_data = data_dict12['data']
alpha_DR25_NO_strl2_1_2_50_200 = Out_DR25_NO_strl2_1_2_50_200_fit['alpha']
beta_DR25_NO_strl2_1_2_50_200 = Out_DR25_NO_strl2_1_2_50_200_fit['beta']
lnf0_DR25_NO_strl2_1_2_50_200 = Out_DR25_NO_strl2_1_2_50_200_fit['lnf0']
PyStan_flatchain_DR25_NO_strl2_1_2_50_200 = np.hstack((lnf0_DR25_NO_strl2_1_2_50_200.reshape(-1,1), beta_DR25_NO_strl2_1_2_50_200.reshape(-1,1), alpha_DR25_NO_strl2_1_2_50_200.reshape(-1,1)))
#DR25 9.3 + Gaia 0.75 to 2.5 and 50 to 300 days with truncation REAL uncs and for stlr case 2.
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_REAL_Runcs_Gaia_stlr_Gaia_rorUnc_Rp_0p75_2p5_50_300_w_truncation.pkl", "rb") as f13:
data_dict13 = pickle.load(f13)
Out_DR25_Gaia_REAL_strl2_0p75_2p5_50_300_fit = data_dict13['fit']
Out_DR25_Gaia_REAL_strl2_0p75_2p5_50_300_data = data_dict13['data']
alpha_DR25_Gaia_REAL_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_REAL_strl2_0p75_2p5_50_300_fit['alpha']
beta_DR25_Gaia_REAL_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_REAL_strl2_0p75_2p5_50_300_fit['beta']
lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_REAL_strl2_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_strl2_0p75_2p5_50_300 = np.hstack((lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300.reshape(-1,1), beta_DR25_Gaia_REAL_strl2_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_Gaia_REAL_strl2_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 + Gaia 0.75 to 2.5 and 50 to 300 days with truncation REAL RP uncs and for stlr case 2.
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_real_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation.pkl", "rb") as f14:
data_dict14 = pickle.load(f14)
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_fit = data_dict14['fit']
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_data = data_dict14['data']
alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_fit['alpha']
beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_fit['beta']
lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300 = np.hstack((lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300.reshape(-1,1), beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300.reshape(-1,1)))
#DR25 9.3 + Gaia 0.75 to 2.5 and 50 to 300 days with truncation NO uncs and for stlr case 2.
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_NO_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation.pkl", "rb") as f15:
data_dict15 = pickle.load(f15)
Out_DR25_Gaia_NO_strl2_0p75_2p5_50_300_fit = data_dict15['fit']
Out_DR25_Gaia_NO_strl2_0p75_2p5_50_300_data = data_dict15['data']
alpha_DR25_Gaia_NO_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_NO_strl2_0p75_2p5_50_300_fit['alpha']
beta_DR25_Gaia_NO_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_NO_strl2_0p75_2p5_50_300_fit['beta']
lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300 = Out_DR25_Gaia_NO_strl2_0p75_2p5_50_300_fit['lnf0']
PyStan_flatchain_DR25_Gaia_NO_strl2_0p75_2p5_50_300 = np.hstack((lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300.reshape(-1,1), beta_DR25_Gaia_NO_strl2_0p75_2p5_50_300.reshape(-1,1), alpha_DR25_Gaia_NO_strl2_0p75_2p5_50_300.reshape(-1,1)))
###DR25 9.3 0.75 to 2.5 and 50 to 300 days with truncation NO uncs and for stlr case 2 higher res
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_NO_Runcs_Stlr2_wtruncation_0p75_2p5_50_300_highres.pkl", "rb") as f16:
data_dict16 = pickle.load(f16)
Out_DR25_NO_strl2_0p75_2p5_50_300_hires_fit = data_dict16['fit']
Out_DR25_NO_strl2_0p75_2p5_50_300_hires_data = data_dict16['data']
alpha_DR25_NO_strl2_0p75_2p5_50_300_hires = Out_DR25_NO_strl2_0p75_2p5_50_300_hires_fit['alpha']
beta_DR25_NO_strl2_0p75_2p5_50_300_hires = Out_DR25_NO_strl2_0p75_2p5_50_300_hires_fit['beta']
lnf0_DR25_NO_strl2_0p75_2p5_50_300_hires = Out_DR25_NO_strl2_0p75_2p5_50_300_hires_fit['lnf0']
PyStan_flatchain_DR25_NO_strl2_0p75_2p5_50_300_hires = np.hstack((lnf0_DR25_NO_strl2_0p75_2p5_50_300_hires.reshape(-1,1), beta_DR25_NO_strl2_0p75_2p5_50_300_hires.reshape(-1,1), alpha_DR25_NO_strl2_0p75_2p5_50_300_hires.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_NO_Runcs_Stlr2_wtruncation_0p75_2p5_50_300_highres_PorbandRp.pkl", "rb") as f17:
data_dict17 = pickle.load(f17)
Out_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp_fit = data_dict17['fit']
Out_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp_data = data_dict17['data']
alpha_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp = Out_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp_fit['alpha']
beta_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp = Out_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp_fit['beta']
lnf0_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp = Out_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp_fit['lnf0']
PyStan_flatchain_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp = np.hstack((lnf0_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp.reshape(-1,1), beta_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp.reshape(-1,1), alpha_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Stan_Kepler_Populations/outputs/outputs_used_6_6_18/DR25_9.3_occ_rate_3000_REAL_Runcs_Stlr2_wtruncation_0p75_2p5_50_300_hires_Porb_andRp.pkl", "rb") as f18:
data_dict18 = pickle.load(f18)
Out_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp_fit = data_dict18['fit']
Out_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp_data = data_dict18['data']
alpha_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp = Out_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp_fit['alpha']
beta_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp = Out_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp_fit['beta']
lnf0_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp = Out_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp_fit['lnf0']
PyStan_flatchain_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp = np.hstack((lnf0_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp.reshape(-1,1), beta_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp.reshape(-1,1), alpha_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_REAL_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation_Berger_Huber.pkl", "rb") as f19:
data_dict19 = pickle.load(f19)
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_fit = data_dict19['fit']
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_data = data_dict19['data']
alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_fit['alpha']
beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_fit['beta']
lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber = np.hstack((lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber.reshape(-1,1), beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber.reshape(-1,1), alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_REAL_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation_Berger_Huber_rematched.pkl", "rb") as f20:
data_dict20 = pickle.load(f20)
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fit = data_dict20['fit']
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_data = data_dict20['data']
alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fit['alpha']
beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fit['beta']
lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched = np.hstack((lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched.reshape(-1,1), beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched.reshape(-1,1), alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_REAL_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation_Berger_Huber_and_Custom.pkl", "rb") as f21:
data_dict21 = pickle.load(f21)
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom_fit = data_dict21['fit']
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom_data = data_dict21['data']
alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom_fit['alpha']
beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom_fit['beta']
lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom = np.hstack((lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom.reshape(-1,1), beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom.reshape(-1,1), alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_REAL_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation_Berger_Huber_rematched_fixed_truncation.pkl", "rb") as f21:
data_dict21 = pickle.load(f21)
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_fit = data_dict21['fit']
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched__fixed_truncationdata = data_dict21['data']
alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_fit['alpha']
beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_fit['beta']
lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation = np.hstack((lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation.reshape(-1,1), beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation.reshape(-1,1), alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation.reshape(-1,1)))
with open("/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Output/DR25_9.3_occ_rate_3000_REAL_Runcs_Gaia_stlr_yes_Prad_update_0p75_2p5_50_300_w_truncation_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained.pkl", "rb") as f22:
data_dict22 = pickle.load(f22)
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained_fit = data_dict22['fit']
Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched__fixed_truncation_Rp_True_is_constrained_data = data_dict22['data']
alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained_fit['alpha']
beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained_fit['beta']
lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained = Out_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained_fit['lnf0']
PyStan_flatchain_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained = np.hstack((lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained.reshape(-1,1), beta_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained.reshape(-1,1), alpha_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained.reshape(-1,1)))
# !date
# -
# lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300
# lnf0_DR25_Gaia_REAL_strl1_0p75_2p5_50_300
# lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300
# lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300
# +
fileIn = "/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/Data/occur_alt_50P300_0p75R2p5_Burke_2018.hd5"
f = h5py.File(fileIn,'r')
vals = np.array(f['values'])
Burke_2018_DR25_0p75_2p5_50_300 = vals
print(len(Burke_2018_DR25_0p75_2p5_50_300))
# -
current_palette = sns.color_palette()
sns.palplot(current_palette)
# +
#FIGURE 1a
#Reorder key and produce results slides for figure.
fig_15 = pl.figure()
pl.rcParams["figure.figsize"] = (10,3)
#pl.hist(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), normed=True, bins=50, alpha=0.1, color='darkgreen')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), normed=True, bins=50, alpha=0.1, color='black')
#pl.hist(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), normed=True, bins=50, alpha=0.1, color='blue')
#pl.hist(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), normed=True, bins=50, alpha=0.1, color='purple')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow}$', color='darkgreen',ls=':')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow}$', color='black')
sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow}$', color='blue',ls=':', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow}$', color='purple', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='darkorange',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='magenta')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow \;|\; planets\uparrow}$', color='red',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='brown')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='mediumseagreen', linewidth=3)
#sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
#pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=18)
pl.ylabel(r"Probability Density", fontsize=18)
pl.legend(fontsize=10.5)
pl.tick_params(labelsize=18)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
pl.ylim([0, 15])
pl.xlim([0.0, 1.5])
ax = pl.subplot(111)
ann1 = ax.annotate(r"\begin{center} $\boldsymbol{DR25}$ \\ $\boldsymbol{\sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(0.175, 13.7), xycoords='data',
xytext=(0.31, 13.), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="-[",
connectionstyle="arc3,rad=0.2",
relpos=(0., 1.),
fc="w"),
)
ann2 = ax.annotate(r"\begin{center} $\boldsymbol{DR25}$ \\ $\boldsymbol{\sigma_{R_{p}}\uparrow}$ \end{center}",
xy=(0.205, 9.9), xycoords='data',
xytext=(0.36, 9.9), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="-[",
connectionstyle="arc3,rad=0.2",
relpos=(0., 1.),
fc="w"),
)
propsb = dict(boxstyle='square,pad=0.4', facecolor='none')
ann3 = ax.annotate(r"\begin{center}$\boldsymbol{planets\downarrow}:$ \, 0.75-2.5\, $R_{\oplus}$; \, 50-300\, Days \end{center}",
xy=(0.7, 14), xycoords='data',
xytext=(0.7, 13.5), textcoords='data',
size=12, va="center", ha="center",
bbox=propsb
)
#ann10 = ax.annotate('',
# xy=(0.2, .65), xycoords='data',
# xytext=(1.25, .65), textcoords='data',
# size=10, va="center", ha="center",
# arrowprops=dict(arrowstyle="|-|,widthA=0.25,widthB=.25",
# fc="w"),
# )
#ann11 = ax.annotate('',
# xy=(0.06, 8.), xycoords='data',
# xytext=(.33, 8.), textcoords='data',
# size=10, va="center", ha="center",
# arrowprops=dict(arrowstyle="|-|,widthA=0.25,widthB=.25",
# fc="w"),
# )
#props2 = dict(boxstyle='square', facecolor='white')
#textstr1 = r"\begin{center} $\boldsymbol{planets\downarrow}$ \end{center}"
#ann8 = ax.text(0.07, 0.545, textstr1, transform=ax.transAxes, fontsize=9,
# verticalalignment='top', bbox=props2)
#textstr2 = r"\begin{center} $\boldsymbol{planets\uparrow}$ \end{center}"
#ann9 = ax.text(0.227, 0.055, textstr2, transform=ax.transAxes, fontsize=9,
# verticalalignment='top', bbox=props2)
fig_15.savefig('/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/plots/plots_june_6_2018/Occ_Rate_kdeplot_DR25_strl2_and_stlr2BurkeCustom_0p75_2p5_50_300_ALL_re_ordered_key_final_no_Burke_re_colored_1A.pdf',bbox_inches='tight')
# +
# FIGURE 1b
#Reorder key and produce results slides for figure.
fig_15 = pl.figure()
pl.rcParams["figure.figsize"] = (10,3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\;planets\downarrow}$', color='darkgreen',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow\;|\; planets\downarrow}$', color='blue',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='purple', linewidth=3)
#pl.hist(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='darkorange')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='magenta')
#pl.hist(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='red')
#pl.hist(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='cyan')
#pl.hist(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='dimgrey')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow}$', color='brown',ls=':')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow}$', color='magenta')
sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow}$', color='red',ls=':', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow}$', color='cyan', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='brown')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='mediumseagreen', linewidth=3)
sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
#pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=18)
pl.ylabel(r"Probability Density", fontsize=18)
pl.legend(fontsize=10.5)
pl.tick_params(labelsize=18)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
pl.ylim([0, 15])
pl.xlim([0.0, 1.5])
ax = pl.subplot(111)
ann3 = ax.annotate(r"\begin{center} $\boldsymbol{DR25}$ \\ $\boldsymbol{\sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(0.37, 7.4), xycoords='data',
xytext=(0.41, 9.7), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="-[",
connectionstyle="arc3,rad=-0.2",
relpos=(0., 1.),
fc="w"),
)
ann4 = ax.annotate(r"\begin{center} $\boldsymbol{DR25}$ $\boldsymbol{|}$ $\boldsymbol{\sigma_{R_{p}}\uparrow}$ \end{center}",
xy=(0.46, 5.3), xycoords='data',
xytext=(0.58, 7.1), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="-[",
connectionstyle="arc3,rad=0.2",
relpos=(0., 0.),
fc="w"),
)
props1 = dict(boxstyle='round,pad=0.4', facecolor='cornsilk')
ann7 = ax.annotate(r"\begin{center} $\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(.95, 2.), xycoords='data',
xytext=(1.2, 3), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
fc="w"),
)
propsb = dict(boxstyle='square,pad=0.4', facecolor='none')
ann3 = ax.annotate(r"\begin{center}$\boldsymbol{planets\uparrow}:$ \, 0.75-2.5\, $R_{\oplus}$; \, 50-300\, Days \end{center}",
xy=(0.4, 14), xycoords='data',
xytext=(0.4, 13.5), textcoords='data',
size=12, va="center", ha="center",
bbox=propsb
)
fig_15.savefig('/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/plots/plots_june_6_2018/Occ_Rate_kdeplot_DR25_strl2_and_stlr2BurkeCustom_0p75_2p5_50_300_ALL_re_ordered_key_final_no_Burke_re_colored_1B.pdf',bbox_inches='tight')
# +
#FIGURE 2
#Reorder key and produce results slides for figure.
fig_15 = pl.figure()
pl.rcParams["figure.figsize"] = (10,3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\;planets\downarrow}$', color='darkgreen',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow\;|\; planets\downarrow}$', color='blue',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='purple', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='darkorange',ls=':')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='magenta')
#pl.hist(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='brown')
#pl.hist(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='mediumseagreen')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow}$', color='magenta')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow \;|\; planets\uparrow}$', color='red',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow}$', color='brown')
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow}$', color='mediumseagreen', linewidth=3)
#sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=18)
pl.ylabel(r"Probability Density", fontsize=18)
pl.legend(fontsize=10.5)
pl.tick_params(labelsize=18)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
pl.ylim([0, 15])
pl.xlim([0,1.5])
ax = pl.subplot(111)
ann4 = ax.annotate(r"\begin{center} $\boldsymbol{DR25}$ $\boldsymbol{|}$ $\boldsymbol{\sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(0.4, 6.65), xycoords='data',
xytext=(0.4, 10.), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=-0.2",
fc="w"),
)
ann5 = ax.annotate(r"\begin{center} $\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(0.545, 3.6), xycoords='data',
xytext=(0.78, 6), textcoords='data',
size=10, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
fc="w"),
)
props1 = dict(boxstyle='round,pad=0.4', facecolor='cornsilk')
ann6 = ax.annotate(r"\begin{center} $\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow}$ \end{center}",
xy=(0.72, 2.), xycoords='data',
xytext=(1.15, 3.6), textcoords='data',
size=10, va="top", ha="center",
bbox=props1,
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
relpos=(0., 1.),
fc="w"),
)
propsb = dict(boxstyle='square,pad=0.4', facecolor='none')
ann3 = ax.annotate(r"\begin{center}$\boldsymbol{planets\uparrow}:$ \, 0.75-2.5\, $R_{\oplus}$; \, 50-300\, Days \end{center}",
xy=(0.4, 14), xycoords='data',
xytext=(0.4, 13.5), textcoords='data',
size=12, va="center", ha="center",
bbox=propsb
)
fig_15.savefig('/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/plots/plots_june_6_2018/Occ_Rate_kdeplot_DR25_strl2_and_stlr2BurkeCustom_0p75_2p5_50_300_ALL_re_ordered_key_final_no_Burke_re_colored_1C.pdf',bbox_inches='tight')
# +
fig_hires = pl.figure()
pl.rcParams["figure.figsize"] = (10,3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\;planets\downarrow}$', color='darkgreen',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow\;|\; planets\downarrow}$', color='blue',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='purple', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='darkorange',ls=':')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='magenta')
#pl.hist(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='brown')
#pl.hist(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='mediumseagreen')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='magenta')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow \;|\; planets\uparrow}$', color='red',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='brown')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='mediumseagreen', linewidth=3)
#sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300_hires), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$ \,hires', color='black')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300_hires_PorbandRp), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$ \,hires', color='blue')
pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=18)
pl.ylabel(r"Probability Density", fontsize=18)
pl.legend(fontsize=10.5)
pl.tick_params(labelsize=18)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#pl.ylim([0, 15])
#pl.xlim([0,1.5])
# +
fig_hires = pl.figure()
pl.rcParams["figure.figsize"] = (10,3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\;planets\downarrow}$', color='darkgreen',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow\;|\; planets\downarrow}$', color='blue',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='purple', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='darkorange',ls=':')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='magenta')
#pl.hist(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='brown')
#pl.hist(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='mediumseagreen')
sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='magenta')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow \;|\; planets\uparrow}$', color='red',ls=':', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='brown')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='mediumseagreen', linewidth=3)
#sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$ \,hires', color='blue')
pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=18)
pl.ylabel(r"Probability Density", fontsize=18)
pl.legend(fontsize=10.5)
pl.tick_params(labelsize=18)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#pl.ylim([0, 15])
#pl.xlim([0,1.5])
# +
#GAIA with Berger and Huber catalog as well and Hsu and Ford.
#Reorder key and produce results slides for figure.
fig_GaiaBerger = pl.figure()
pl.rcParams["figure.figsize"] = (9,7)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\;planets\downarrow}$', color='darkgreen',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow\;|\; planets\downarrow}$', color='blue',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='purple', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='darkorange',ls=':')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='magenta')
#pl.hist(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='brown')
#pl.hist(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='mediumseagreen')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='magenta')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$ \,hires', color='lightblue', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow \;|\; planets\uparrow}$', color='red',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='brown')
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation), label=r'$\boldsymbol{Berger\;2018\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained), label=r'$\boldsymbol{Berger\;2018\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='magenta', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched), label=r'$\boldsymbol{Berger\;2018\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='darkorange', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$\,Berger+2018 rematched', color='blue', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom), label=r'$\boldsymbol{Berger+Custom\;Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \; stars\downarrow\;|\; planets\uparrow}$', color='red', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='mediumseagreen', linewidth=3)
#sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=22)
pl.ylabel(r"Probability Density", fontsize=22)
pl.title(r"$\boldsymbol{Gaia\;stars\downarrow\;|\; planets\uparrow}$", fontsize=20)
pl.legend(fontsize=16)
pl.tick_params(labelsize=22)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
pl.ylim([0, 6])
#pl.xlim([0,1.5])
ax = pl.subplot(111)
ax.legend().set_visible(False)
ann4 = ax.annotate(r"\begin{center} $\boldsymbol{1.\;DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(0.49, 4.4), xycoords='data',
xytext=(0.48, 5.5), textcoords='data',
size=16, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=-0.2",
fc="w"),
)
ann5 = ax.annotate(r"\begin{center} $\boldsymbol{3.\;Berger\;2018}$ $\boldsymbol{|}$ $\boldsymbol{\sigma_{R_{p}} w/\star\uparrow}$ \end{center}",
xy=(0.567, 3.6), xycoords='data',
xytext=(0.89,4.), textcoords='data',
size=16, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
fc="w"),
)
props1 = dict(boxstyle='round,pad=0.4', facecolor='cornsilk')
ann6 = ax.annotate(r"\begin{center} $\boldsymbol{2.\;DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow}$ \end{center}",
xy=(0.72, 2.), xycoords='data',
xytext=(1.15, 2.), textcoords='data',
size=16, va="top", ha="center",
bbox=props1,
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
relpos=(0., 1.),
fc="w"),
)
fig_GaiaBerger.savefig('/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/plots/plots_june_6_2018/Occ_Rate_kdeplot_DR25_strl2_and_stlr2BurkeCustom_0p75_2p5_50_300_ALL_Gaia_compBerger_Huber_fixed_truncation.png',bbox_inches='tight')
# +
#GAIA with Berger and Huber catalog as well and Hsu and Ford.
#Reorder key and produce results slides for figure.
fig_GaiaBerger = pl.figure()
pl.rcParams["figure.figsize"] = (9,7)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_1_2_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\;planets\downarrow}$', color='darkgreen',ls=':')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\uparrow\;|\; planets\downarrow}$', color='blue',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_1_2_50_200), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\downarrow}$', color='purple', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='darkorange',ls=':')
#pl.hist(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='magenta')
#pl.hist(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='brown')
#pl.hist(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), normed=True, bins=50, alpha=0.1, color='mediumseagreen')
#sns.kdeplot(np.exp(lnf0_DR25_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='magenta')
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300_hires_PorbandRp), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$ \,hires', color='lightblue', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{DR25 \;|\; \sigma_{R_{p}}\uparrow \;|\; stars\uparrow \;|\; planets\uparrow}$', color='red',ls=':', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_NO_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='brown')
##sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation), label=r'$\boldsymbol{Berger\;2018\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='cyan', linewidth=3)
##sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched_fixed_truncation_Rp_True_is_constrained), label=r'$\boldsymbol{Berger\;2018\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='magenta', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched), label=r'$\boldsymbol{Berger\;2018\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='darkorange', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_rematched), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$\,Berger+2018 rematched', color='blue', linewidth=3)
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300_Berger_Huber_and_Custom), label=r'$\boldsymbol{Berger+Custom\;Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \; stars\downarrow\;|\; planets\uparrow}$', color='red', linewidth=3)
sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_RP_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow\;|\; Gaia \;stars\downarrow\;|\; planets\uparrow}$', color='mediumseagreen', linewidth=3)
#sns.kdeplot(np.exp(lnf0_Q1Q16_NO_strl1_0p75_2p5_50_300), label=r'$\boldsymbol{Q1-Q16\;|\; \sigma_{R_{p}}\downarrow\;|\; stars\uparrow\;|\; planets\uparrow}$', color='dimgrey',ls=':')
#sns.kdeplot(Burke_2018_DR25_0p75_2p5_50_300, label=r'$\boldsymbol{Burke \, 2018\, DR25\;|\; stars\downarrow\;|\; planets\uparrow}$', color='black')
#sns.kdeplot(np.exp(lnf0_DR25_Gaia_REAL_strl2_0p75_2p5_50_300), label=r'$\boldsymbol{DR25\;|\; \sigma_{R_{p}}\uparrow\;|\; Gaia stars\downarrow\;|\; planets\uparrow}$', color='darkkhaki')
pl.xlabel(r"\begin{center} Marginal Posterior of $F_{0}$ \\ (i.e., occurrence rate or number of planets per GK star) \end{center}", fontsize=22)
pl.ylabel(r"Probability Density", fontsize=22)
pl.title(r"$\boldsymbol{Gaia\;stars\downarrow\;|\; planets\uparrow}$", fontsize=20)
pl.legend(fontsize=16)
pl.tick_params(labelsize=22)
#pl.tick_params(axis='y',labelleft='off')
#pl.legend(loc='center left', bbox_to_anchor=(1, 0.5))
pl.ylim([0, 6])
#pl.xlim([0,1.5])
ax = pl.subplot(111)
ax.legend().set_visible(False)
ann4 = ax.annotate(r"\begin{center} $\boldsymbol{1.\;DR25+Gaia\;|\; \sigma_{R_{p}}\downarrow}$ \end{center}",
xy=(0.49, 4.4), xycoords='data',
xytext=(0.48, 5.5), textcoords='data',
size=16, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=-0.2",
fc="w"),
)
ann5 = ax.annotate(r"\begin{center} $\boldsymbol{3.\;Berger\;2018}$ $\boldsymbol{|}$ $\boldsymbol{\sigma_{R_{p}} w/\star\uparrow}$ \end{center}",
xy=(0.567, 3.6), xycoords='data',
xytext=(0.89,4.), textcoords='data',
size=16, va="center", ha="center",
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
fc="w"),
)
props1 = dict(boxstyle='round,pad=0.4', facecolor='cornsilk')
ann6 = ax.annotate(r"\begin{center} $\boldsymbol{2.\;DR25+Gaia\;|\; \sigma_{R_{p}} w/\star\uparrow}$ \end{center}",
xy=(0.72, 2.), xycoords='data',
xytext=(1.15, 2.), textcoords='data',
size=16, va="top", ha="center",
bbox=props1,
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.2",
relpos=(0., 1.),
fc="w"),
)
style = dict(size=16, color='black')
ax.text(1.25, 5.5, r"\begin{center} $\boldsymbol{R_{p}: 0.75-2.5 R_{\oplus}}$ \end{center}", ha='center', **style)
ax.text(1.25, 5.2, r"\begin{center} $\boldsymbol{P_{orb}: 50-300 Days}$ \end{center}", ha='center', **style)
fig_GaiaBerger.savefig('/Users/meganshabram/Dropbox/NASA_Postdoctoral_Program_Fellowship/Gaia_Kepler_Occ_Rates/plots/plots_june_6_2018/Occ_Rate_kdeplot_DR25_strl2_and_stlr2BurkeCustom_0p75_2p5_50_300_ALL_Gaia_compBerger_Huber_fixed_truncation_2.pdf',bbox_inches='tight')
# -
|
Sensitivity-Analyses-of-Exoplanet-Occurrence-Rates-from-Kepler-and-Gaia/occurrence_rate_plotting_Figure_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
divvy_data = pd.read_csv('data/divvy_zip_summer_15.csv')
divvy_data.head()
divvy_data['start_time']=pd.to_datetime(divvy_data['start_time'],format='%Y-%m-%d %H:%M:%S')
divvy_data.start_time
divvy_data['stop_time']=pd.to_datetime(divvy_data['stop_time'],format='%Y-%m-%d %H:%M:%S')
divvy_data.stop_time
divvy_data['trip_time'] = divvy_data['stop_time']- divvy_data['start_time']
divvy_data.head()
divvy_data.trip_time.describe()
divvy_data.hist(column = 'trip_time')
(divvy_data['trip_time'] / np.timedelta64(1, 'M')).astype(int)
divvy_data['trip_time'].astype('timedelta64[m]')
divvy_data['trip_time_mins'] = divvy_data['trip_time'].astype('timedelta64[m]')
divvy_data.head()
divvy_data[divvy_data['trip_time_mins'] < 60].trip_time_mins.hist()
divvy_data[divvy_data['trip_time_mins'] < 2]['trip_time_mins'].count()
divvy_data[divvy_data['trip_time_mins'] < 10]['trip_time_mins'].hist()
divvy_data[(divvy_data['from_lat'] == divvy_data['to_lat']) & (divvy_data['trip_time_mins'] < 4)].trip_time_mins.count()
divvy_data[(divvy_data['from_lat'] != divvy_data['to_lat']) & (divvy_data['trip_time_mins'] < 4)].trip_time_mins.count()
divvy_data[divvy_data['trip_time_mins'] >30].trip_time_mins.count()
divvy_data.head()
divvy_data.user_type.unique()
divvy_data.groupby("user_type").count()
|
transportation/mob/Divvy Pandas Mob Programming.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: sandsbot
# language: python
# name: sandsbot
# ---
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append('..')
# +
from state import State
import os
import json
from matplotlib import pyplot as plt
import matplotlib
import numpy as np
from utils.order_parameters import (
calculate_avg_dist,
)
from utils.state_utils import convert_states_to_local
import colorsys
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
RESULTS_DIR = 'results/drones/'
LIPSCHITZ_RESULTS_DIR = 'results/log/swarm/final'
SYNC_RESULTS_DIR = 'results/log/swarm/async'
PLOTS_DIR = '/home/agniewek/repos/papers/2021-ABarcis-MRS-Swarming/data/'
# +
plt.rcParams['grid.alpha'] = 1
plt.rcParams['grid.linewidth'] = 0.8
plt.rcParams['axes.grid'] = True
plt.rcParams['legend.fancybox'] = True
#print(plt.rcParams.find_all('axes'))
plt.rcParams['xtick.bottom'] = False
plt.rcParams['ytick.left'] = False
plt.rcParams['figure.frameon'] = True
plt.rcParams['axes.edgecolor'] = '#b3b3b3'
#plt.rcParams['figure.framealpha'] = 0.5
#plt.tick_params(left=False, bottom=False)
#matplotlib.style.use('default')
#matplotlib.style.use('seaborn-talk')
#matplotlib.style.use('seaborn-whitegrid')
# +
def make_zoom_in_plot(ax, x1, x2, y1, y2, zoom_level=2):
if zoom_level != 'fixed':
axins = zoomed_inset_axes(ax, zoom_level, loc=1)
else:
axins = inset_axes(ax, 1, 1 , loc=1,
bbox_to_anchor=(0.97, 0.95),
bbox_transform=ax.figure.transFigure) # no zoom
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.get_xaxis().set_visible(False)
axins.get_yaxis().set_visible(False)
mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec="0.5")
return axins
def plot_experiments(plot_data, zoom=None, xlim=None, instants=None, save_name=None, drop_samples_factor=1, ylim=None):
times = plot_data['ts']
fig, ax = plt.subplots(
1, 1, sharex=True, constrained_layout=True, figsize=(5.5, 2.5)
)
#fig.suptitle(f"{experiment_name_prefix} {data['params']}")
#fig.suptitle(filename)
fig2, s = plt.subplots(1, 3, constrained_layout=True, figsize=(5.5, 2), sharex=True, sharey=True)
#print(len(times[0]), len(data['centroids']), len(data['var_rs']), len(data['Ss']))
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928']
ax.set_axisbelow(False)
#ax.scatter(times, plot_data['S'], s=5, label="$S$")
#ax.scatter(times, plot_data['potentials'][0])
# zoom = False
if zoom:
axins = make_zoom_in_plot(ax, *zoom)
for i, p in enumerate(plot_data['potentials']):
ax.plot(times[::drop_samples_factor], p[::drop_samples_factor], 'o', c=colors[i], rasterized=True, markersize=2)
if zoom:
axins.scatter(times, p, c=colors[i], s=5)
#ax.spines['left'].set_visible(False)
#ax.set_ylim(ymin=-0.01)
snapshots = plot_data['snapshots']
#print(len(snapshots))
#print(snapshots)
if xlim is None:
xlim = (-times[0]*0.01, times[-1]*1.01)
if instants is None:
instants = [0, int(len(snapshots)/2), -1]
offsets = [xlim[1]*0.01, xlim[1]*0.03, xlim[1]*0.12]
# instants = []
for i, idx in enumerate(instants):
t = times[idx]
ax.axvline(t, color='black', ls='--')
ax.text(t-offsets[i], 1.01, f'$t={t:.0f}\\:$s', transform=ax.get_xaxis_transform())
#s[i].xaxis.set_major_locator(MultipleLocator(2))
#s[i].yaxis.set_major_locator(MultipleLocator(2))
#s[i].axis('square', anchor='C')
s[i].set_aspect('equal', adjustable='box', anchor='C')
# print(data['snapshots'])
#print(snapshots[i])
xs = [s.position[0] for s in snapshots[idx]]
avg_x = np.average(xs)
ys = [s.position[1] for s in snapshots[idx]]
avg_y = np.average(ys)
cs = [colorsys.hsv_to_rgb(s.phase, 1, 1) for s in snapshots[idx]]
#s[i].set_axisbelow(True)
#s[i].set_xlim((-30, 30))
#s[i].set_ylim((-30, 30))
s[i].set_xlim(s[i].get_ylim())
for j in range(len(xs)):
s[i].scatter(xs[j]-avg_x, ys[j]-avg_y, c=colors[j], s=30)
#s[i].scatter(xs, ys, s=30)
s[i].set_title(f'$t={t:.0f}\\:$s')
s[i].set_xlabel('[m]')
s[0].set_ylabel('[m]')
# ax.legend(loc='upper right')
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ymin=-0.01)
ax.set_xlabel('time [s]')
ax.set_ylabel('potential')
# ax.set_yscale('log')
if save_name:
path_prefix = os.path.join(PLOTS_DIR, save_name)
fig.savefig(f'{path_prefix}-pot.eps', bbox_inches="tight", dpi=400)
fig2.savefig(f'{path_prefix}-snap.pdf', bbox_inches="tight", dpi=400)
#print(data)
# +
robot_names = ['01', '02', '03', '04', '00']
experiments = os.listdir(os.path.join(RESULTS_DIR, 'b01'))
experiments = [experiments[1], experiments[-1]]
sim_lipschitz = os.listdir(LIPSCHITZ_RESULTS_DIR)
def state_from_json(s, params):
#print(s)
if not s:
return s
params.update({
'phase_levels_number': s['phase_levels_number'],
'orientation_mode': s['orientation_mode'],
})
state = State(
phase=s['phase'],
position=np.array(s['position']),
velocity=np.array(s['velocity']),
params=params,
)
state.phase_level = s['phase_level']
return state
def parse_params_from_filename(filename):
params = {}
param_string = filename[:-5].split(':')[-1]
param_strings = param_string.split(',')
for ps in param_strings:
param_name, value_string = ps.split('=')
if param_name == 'M':
value = int(value_string)
else:
value = float(value_string)
params[param_name] = value
return params
def get_experiment_raw_data(experiment_name):
result = {}
with open(os.path.join(RESULTS_DIR, 'b01', experiment_name)) as f:
result = json.load(f)
params = parse_params_from_filename(experiment_name)
return result, params
def calculate_exp_potential(states):
N = len(states)
potentials = []
a = 0.1
r = 30
d = 5
for i, s1 in enumerate(states):
pot = 0
x1 = s1.position
for s2 in states[:i] + states[i+1:]:
x2 = s2.position
pot += a * np.linalg.norm(x2-x1)**2/2 - r * np.log(np.linalg.norm(x2-x1) - d)
potentials.append(pot/N)
return potentials
def calculate_sim_potential(states):
N = len(states)
potentials = []
a = 0.75
r = 2
d = 0.2
for i, s1 in enumerate(states):
pot = 0
x1 = s1.position
for s2 in states[:i] + states[i+1:]:
x2 = s2.position
pot += a * np.linalg.norm(x2-x1)**2/2 - r * np.log(np.linalg.norm(x2-x1) - d)
potentials.append(pot/N)
return potentials
def get_plot_data(experiments_data, params):
states = {}
result = {
'S': [],
'potentials': [],
'snapshots': [],
'ts': [],
'params': params
}
states['01'] = [state_from_json(s[1]['01'], params) for s in experiments_data['states']]
#print(states)
for drone_name in robot_names[1:]:
#print(experiments_data['knowledge'])
tmp = [state_from_json(s[1]['01'].get(drone_name, None), params) for s in experiments_data['knowledge']]
if any(tmp):
states[drone_name] = tmp
history = [[(k, states[k][i]) for k in states.keys()] for i in range(len(states['01']))]
states_matrix = list(zip(*list(states.values())))
relative_states = []
for current_states in history:
if not all([s[1] for s in current_states]):
continue
root = current_states[0]
own, other = convert_states_to_local(root[1], current_states)
#root.position = np.zeros(3)
relative_states.append([s[1] for s in other])
result['S'] = [calculate_avg_dist(current_states) for current_states in relative_states]
result['potentials'] = list(zip(*[calculate_exp_potential(current_states) for current_states in relative_states]))
print(len(result['potentials']))
result['snapshots'] = relative_states
result['ts'] = [d[0] for i, d in enumerate(experiments_data['states']) if all(states_matrix[i])]
return result
def get_raw_data(path, file):
result = {}
with open(os.path.join(path, file)) as f:
result = json.load(f)
params = parse_params_from_filename(file)
return result, params
def get_sim_plot_data(sim_data, params):
result = {
'potentials': [],
'snapshots': [],
'ts': [],
'params': params,
}
states = {k: [state_from_json(s[1][k], params) for s in sim_data['states']] for k in sim_data['states'][0][1].keys()}
history = [[(k, states[k][i]) for k in states.keys()] for i in range(len(states[next(iter(states))]))]
states_list = []
for current_states in history:
if not all([s[1] for s in current_states]):
continue
#root.position = np.zeros(3)
states_list.append([s[1] for s in current_states])
result['potentials'] = list(zip(*[calculate_sim_potential(current_states) for current_states in states_list]))
print(len(result['potentials']))
result['snapshots'] = states_list
result['ts'] = [d[0] for i, d in enumerate(sim_data['states']) if all(sim_data['states'][i][1].values())]
#history = sim_data['states']
#print(len(history))
#for t, states in history:
#for k, s in states.items():
#states[k] = state_from_json(s, params)
#if t > maxt[filename]:
# break
#if list(states.values())[0].small_phase == 0:
#result['ts'].append(t)
#result['potentials'].append(calculate_potential(list(states.values())))
#result['snapshots'].append(states)
#times = ts[filename]
#instants[filename] = [times[-3], times[-2], times[-1]]
#result['potentials'] = list(zip(*result['potentials']))
return result
# -
sim_lipschitz_plot_data = {}
for experiment in sim_lipschitz:
print(experiment)
data, params = get_raw_data(LIPSCHITZ_RESULTS_DIR, experiment)
if params['T'] != 1:
sim_lipschitz_plot_data[experiment] = get_sim_plot_data(data, params)
#
# +
xlims = [
(-0.2, 40),
(-5, 600),
(-5, 600),
(-0.2, 40),
]
instantss = [
(0, 380, 1000),
(0, 333, 1000),
(0, 333, 1000),
(0, 380, 1000),
]
zooms = [
(35, 36, 0.5, 0.8, 20),
(535, 550, 0.5, 0.8, 20),
(535, 550, 0.5, 0.8, 20),
(35, 36, 0.5, 0.8, 20),
]
save_names = [
"sim-lipschitz-no-short",
"sim-lipschitz-no-long",
"sim-lipschitz-yes-long",
"sim-lipschitz-yes-short",
]
factors = [
7,
2,
6,
7,
]
for item, xlim, instants, zoom, save_name, factor in zip(sim_lipschitz_plot_data.items(), xlims, instantss, zooms, save_names, factors):
experiment_name, plot_data = item
print(experiment_name)
plot_experiments(plot_data, xlim=xlim, instants=instants, zoom=zoom, save_name=save_name, drop_samples_factor=1, ylim=True)
# pierwsze dwa bez lipschitza
# kolejne dwa z lipschitzem
# +
zooms_exp = [
#(160, 178, -49, -42, 1.5),
(70, 80, -49, -44),
#(10, 20, -50, -40),
#(10, 20, -50, -40),
(50, 55, -47, -42),
]
save_names = [
"exp-5",
"exp-4",
]
xlims = [
(0, 87.1),
(0, 57.1),
]
for experiment, zoom, save_name, xlim in zip(experiments, zooms_exp, save_names, xlims):
print(experiment)
data, params = get_experiment_raw_data(experiment)
plot_data = get_plot_data(data, params)
plot_experiments(plot_data, zoom=zoom, save_name=save_name, xlim=xlim)
# +
t_end = 400
xlims = [
(-0, t_end),
]*4
instantss = [
(0, int(t_end/2), t_end),
]*4
zooms = [
(340, 350, 0.5, 0.7, 'fixed'),
]*4
async_files = [
'2021-04-28 17:28:06.347950:J=0,K=0,M=1,T=0.01.json',
'2021-04-28 17:28:28.745012:J=0,K=0,M=1,T=0.01.json',
'2021-04-28 17:28:53.440962:J=0,K=0,M=1,T=0.01.json',
'2021-04-28 17:29:18.848159:J=0,K=0,M=1,T=0.01.json',
]
save_names = [
"sim-sync-nothing",
"sim-sync-sync",
"sim-sync-lipschitz",
"sim-sync-both",
]
factors = [
1,
1,
2,
2,
]
for experiment, save_name, xlim, instants, zoom, factor in zip(async_files, save_names, xlims, instantss, zooms, factors):
print(experiment)
data, params = get_raw_data(SYNC_RESULTS_DIR, experiment)
plot_data = get_sim_plot_data(data, params)
plot_experiments(plot_data, save_name=save_name, xlim=xlim, instants=instants, zoom=zoom, drop_samples_factor=1, ylim=True)
# -
|
robot_framework/swarming_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, shutil
from pathlib import Path
from os.path import join as joinpath
from os import listdir
import sys, copy
import itertools, math
from functools import partial
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# from tqdm import tqdm_notebook as tqdm
# from tqdm.autonotebook import tqdm
# from IPython.display import display
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import sklearn.metrics
import PIL
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.models as models
import torch.autograd as autograd
import captum.attr
import scipy
# +
pd.options.mode.chained_assignment = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
pltparams = {
'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large',
'figure.titlesize': 'x-large',
'savefig.dpi': 600,
}
plt.rcParams.update(pltparams)
sns.set(font_scale = 1.2)
# + id="british-science" papermill={"duration": 0.028034, "end_time": "2021-05-19T16:05:23.350039", "exception": false, "start_time": "2021-05-19T16:05:23.322005", "status": "completed"} tags=[]
basedir = "/mnt/beegfs/home/vu/Codalab-MetaDL"
datadir = joinpath(basedir, "data/")
random_seed = 2021
labelcolumn = "label"
filecolumn = "filename"
# +
# dataname = 'resisc45-label_embed'
# datadomain = "remotesensing"
# collectiondir = joinpath(datadir, "resisc45")
# imagedir = joinpath(collectiondir, "images")
# +
dataname = 'sd_v2_curated_20shots'
datadomain = "sd_v2"
collectiondir = joinpath(datadir, "sd-skindisease")
imagedir = joinpath(datadir, "sd-skindisease/sd-v2-curated")
# +
figdir = joinpath(basedir, 'fig', datadomain, dataname)
modeldir = joinpath(basedir, 'model', datadomain, dataname)
metricdir = joinpath(basedir, 'metric', datadomain, dataname)
# for outputdir in (figdir, modeldir, resultdir):
# os.makedirs(outputdir, exist_ok=True)
# -
# ## Regularize
# +
def filter_resultfile(resultfile):
includes = "nodropout dropout0101 dropout0202 weightdecay10".split()
excludes = "weightdecay5 weightdecay30 weightdecay40 nodropout_sgd-imagesize dropout0201 dropblock".split()
if not resultfile.endswith(".json") or any(exclude in resultfile for exclude in excludes):
return False
return any(include in resultfile for include in includes)
def filter_resultdir(resultdir):
includes = "nodropout dropout0101 dropout0202 weightdecay10".split()
filtered = list(filter(filter_resultfile, listdir(resultdir)))
return filtered
filter_resultdir(metricdir)
# +
def extract_settings(resultfilename):
start = resultfilename.find("sgd_") + 4
return resultfilename[start:start + resultfilename[start:].find('-')]
list(map(extract_settings, filter_resultdir(metricdir)))
# +
def plot_ax(ax, metric, dictkey, title, label):
ax.plot(metric[dictkey], label=label)
ax.set_title(title)
ax.legend()
def plot_metrics(resultdir, figsize=(24, 8)):
fig, axes = plt.subplots(1, 4, figsize=figsize)
axes = axes.flatten()
filtered_files = filter_resultdir(resultdir)
for file in filtered_files:
label = extract_settings(file)
with open(joinpath(resultdir, file)) as resultjson:
metric = json.load(resultjson)
plot_ax(axes[0], metric, "train_loss", "train_crossentropy", label)
plot_ax(axes[1], metric, "valid_loss", "valid_crossentropy", label)
plot_ax(axes[2], metric, "train_score", "train_accuracy", label)
plot_ax(axes[3], metric, "valid_score", "valid_accuracy", label)
# axes[0].plot(metric['valid_loss'], label=label)
# axes[0].set_title("valid_crossentropyloss")
# axes[0].legend()
# axes[1].plot(metric['valid_score'], label=label)
# axes[1].set_title("valid_accuracy")
# axes[1].legend()
return fig
fig = plot_metrics(metricdir)
# -
fig.savefig(joinpath(basedir, "fig", datadomain, "regularization.png"), bbox_inches='tight')
# ## Result
datanames = """\
Multiderma
Mini_Plant Village
Medleaf
Mini_RESISC
Mini_RSICB
Mini_RSD
Insects
Plankton
Flowers
Textures
Texture_DTD
Texture_ALOT
OmniPrint_MD-mix
OmniPrint_MD6
OmniPrint_MD5bis\
""".split('\n')
# .replace(' ', '_')
datanames
allclasses20shots = \
"""
0.4486666667
0.8373333333
0.9240333333
0.7033333333
0.9077333333
0.4995666667
0.3993666667
0.6016483516
0.8583333333
0.9426666667
0.5417666667
0.9795333333
0.6328
0.6184666667
0.6807"""
allclasses20shots = np.fromstring(allclasses20shots, dtype=float, sep='\n')
allclasses20shots
# +
way5shot5 = \
"""
0.364
0.5259863946
0.5721904762
0.4639153439
0.5324867725
0.435978836
0.341038961
0.3918518519
0.6617142857
0.6017460317
0.5392592593
0.776
0.3088551165
0.3205170068
0.2883485309
"""
way5shot5 = np.fromstring(way5shot5, dtype=float, sep='\n')
way5shot5
# -
numberclasses= \
"""
51
37
27
45
45
46
114
91
102
64
47
250
706
703
706
"""
numberclasses = np.fromstring(numberclasses, dtype=int, sep='\n')
numberclasses
ci = \
"""
0.03052284514
0.02659124585
0.02234675902
0.02984350259
0.01890759861
0.03230963106
0.0201038516
0.02249184346
0.01513222585
0.01273602075
0.03185236061
0.003924679864
0.007951028213
0.008029487072
0.007689817936
"""
ci = np.fromstring(ci, dtype=float, sep='\n')
ci
resultdf = pd.DataFrame(
{
'dataname': datanames,
'all classes 20 shots': allclasses20shots,
'5 ways 5 shots': way5shot5,
'numberclasses': numberclasses,
'ci': ci
})
# index = datanames)
resultdf
# +
def plot_result(resultdf, figsize=(20, 3)):
fig = plt.figure(figsize=figsize)
for column in "all classes 20 shots;5 ways 5 shots".split(';'):
ax = sns.scatterplot(data=resultdf, y=column, x=range(len(resultdf)), label=column, s=100, alpha=0.9)
# ax = sns.barplot(data=resultdf, y=column, x=resultdf.index, label=column)
ax.set_xticks(range(len(resultdf)))
ax.set_xticklabels(resultdf.dataname.str.cat('(' + resultdf['numberclasses'].astype(str) + ')', sep=' ')
.str.replace(' ', '\n'))
# ax2 = ax.twinx()
# sns.scatterplot(data=resultdf, y='numberclasses', x=range(len(resultdf)), label='# of classes')
ax.set(ylabel="accuracy", xlabel="dataset and (# of classes)")
# ax.figure.legend()
return fig
# fig = plot_result(resultdf)
# fig.savefig(joinpath(basedir, "fig", "baseline-result.png"), bbox_inches='tight')
# +
#https://stackoverflow.com/questions/52028043/side-by-side-barplot
def plot_bar(resultdf, figsize=(20, 3)):
fig = plt.figure(figsize=figsize)
resultdf['dataname'] = resultdf['dataname'].str.replace('_', '\n')
resultmelt = pd.melt(resultdf["dataname;all classes 20 shots;5 ways 5 shots".split(';')], id_vars='dataname')
ax = sns.barplot(data=resultmelt, x='dataname', y='value', hue='variable')
ax.set_xticks(range(len(resultdf)))
ax.set_xticklabels(resultdf.dataname.str.cat('(' + resultdf['numberclasses'].astype(str) + ')', sep='_')
.str.replace('_', '\n'))
ax.set(ylabel="Accuracy", xlabel="Dataset and (# of classes)")
ax.legend().set_title(None)
return fig
fig = plot_bar(resultdf)
fig.savefig(joinpath(basedir, "fig", "baseline-result-bar.png"), bbox_inches='tight')
# -
|
src/plot/plot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Analyzing OpenFOAM Data in Python
# ## Saving model data as binary files
# The MATLAB scripts that I've written for loading the OpenFOAM output text files and saving out as binary .mat files...
# * Paper1_saveModelFreeSurf_MATfiles.m
# * Paper1_saveModelFields_MATfiles.m
#
# basically loops through the model folder and iteratively loads the postProcessing text files, and then outputs the data into a data structure.
# To do this same process in Python, my workflow must change slightly. Now, because the model files are so large for Paper2, I have to store them as ZIP files on an external hard drive. Hence, the overall workflow for my new Python script should be something like this:
#
# 0. Resample the T_w = 5 and 10 s models using Bash:
# * Zip model files on DISCO2 in sequence and save to NORRIS10TB
# * Consolidate model files on NORRIS10TB and NORRIS12TB so each directory is identical
# * Copy zipped files to TROPICS for backup
# * Bash script to unpack each file from external drive, run OpenFOAM postProcessing routine on modeling PC, then rezip file and move to new folder on external drive
# * Once reprocessing step is complete, delete "old" version of model files (will still be backed up on TROPICS).
# 1. In Python, use os module to copy ZIP file from external drive and unpack on modeling PC
# 2. In the postProcessing folder, read each text file into a numpy array. Then create a dictionary from each file.
# 3. Save the dictionary to disk using the **pickle** module to write to binary.
#
|
Paper2_OptimizingRestoration/Notebooks/Analyzing_OpenFOAM_with_Python.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Install required packages if not available
import sys
# !{sys.executable} -m pip install python #
# !{sys.executable} -m pip install matplotlib #
# !{sys.executable} -m pip install sklearn #
# !{sys.executable} -m pip install pandas #
# !{sys.executable} -m pip install plotly #
# !{sys.executable} -m pip install seaborn #
# -
# # Lesson 2: Regression
# ## Review
# **Question** What is regression?
#
# **Question** What kind of data do we usually use for regression?
# Let's review the example we saw last time. The code below reads our data points from a file and then creates a scatterplot.
# +
# import necessary libraries
import pandas
import numpy
import matplotlib.pyplot as plt
# read data from the synthetic data file
data = pandas.read_csv("../assets/regression-synthetic.csv")
# plot data and label the x-axis and y-axis
plt.scatter(data['apple'], data['prices'], color='green')
plt.title("Apples and Prices")
plt.xlabel("Apples (lb)")
plt.ylabel("Price ($)")
# -
# We were using this data as an example to see if given a few data points about apple quantities and their prices, if we could learn an equation (like the linear equation of the form $y = mx + c$) from the data
# In the last class, we saw how one black line that we learned was the "best fit" for the data. But this poses many questions.
# 1. How do we know that that line is the best fit?
# 2. Is it possible that there are other lines that are best for the data?
# We will answer all of these questions and more in detail as part of this lecture. First, let us see how to decide if any given line is the "best fit" for the data. In order to do so, we need to see what are the other candidate lines for our analysis and how to calculate error.
# Plot the linear regression line in the same plot
x = numpy.linspace(0, 12, 1000)
plt.scatter(data['apple'], data['prices'], color='green')
plt.plot(x, 2*x+1, color='red')
plt.plot(x, 1*x+2, color='blue')
plt.plot(x, 1.5*x+3, color='purple')
plt.title("Apples and Prices")
plt.xlabel("Apples (lb)")
plt.ylabel("Price ($)")
# **Question** Can you guess which line would be the best fitting line?
# +
# Best fitting line errors
x = numpy.linspace(0, 12, 1000)
plt.scatter(data['apple'], data['prices'], color='green')
plt.plot(x, 1*x+2, color='blue')
xData = numpy.array(data['apple'])
yData = numpy.array(data['prices'])
modelPredictions = numpy.array([1*x+2 for x in xData])
# now add individual line for each point
for i in range(len(xData)):
lineXdata = (xData[i], xData[i]) # same X
lineYdata = (yData[i], modelPredictions[i]) # different Y
plt.plot(lineXdata, lineYdata, color='black')
plt.title("Apples and Prices")
plt.xlabel("Apples (lb)")
plt.ylabel("Price ($)")
# -
# calculate error
error1 = abs(yData - modelPredictions)
print("Residual errors are: ", numpy.sqrt(numpy.mean(error1)))
# +
# Best fitting line errors
x = numpy.linspace(0, 12, 1000)
plt.scatter(data['apple'], data['prices'], color='green')
plt.plot(x, 1.5*x+3, color='purple')
xData = numpy.array(data['apple'])
yData = numpy.array(data['prices'])
modelPredictions = numpy.array([1.5*x+3 for x in xData])
# now add individual line for each point
for i in range(len(xData)):
lineXdata = (xData[i], xData[i]) # same X
lineYdata = (yData[i], modelPredictions[i]) # different Y
plt.plot(lineXdata, lineYdata, color='black')
plt.title("Apples and Prices")
plt.xlabel("Apples (lb)")
plt.ylabel("Price ($)")
# -
# calculate error
error2 = abs(yData - modelPredictions)
print("Residual errors are: ", numpy.sqrt(numpy.mean(error2)))
# +
# Best fitting line errors
x = numpy.linspace(0, 12, 1000)
plt.scatter(data['apple'], data['prices'], color='green')
plt.plot(x, 2*x+1, color='red')
xData = numpy.array(data['apple'])
yData = numpy.array(data['prices'])
modelPredictions = numpy.array([2*x+1 for x in xData])
# now add individual line for each point
for i in range(len(xData)):
lineXdata = (xData[i], xData[i]) # same X
lineYdata = (yData[i], modelPredictions[i]) # different Y
plt.plot(lineXdata, lineYdata, color='black')
plt.title("Apples and Prices")
plt.xlabel("Apples (lb)")
plt.ylabel("Price ($)")
# -
# calculate error
error3 = abs(yData - modelPredictions)
print("Residual errors are: ", numpy.sqrt(numpy.mean(error3)))
# That is how we determine that the red line is the "best fitting" line! It is the line with the least amount of residual error!
# While here, we have determined that linear regression is the best choice for prediction, keep in mind that for more complex data sets, a curved line could also fit the data very well. Whenever visualization is possible, we should try to see what we can try to learn
# # Regression for the World Happiness Dataset
# We're going to take the World Happiness Dataset we examined last time and apply regression to it.
#
# +
# Load World Happiness Data
import pandas as pd
df = pd.read_csv("../assets/happinessDataset/2015.csv")
# View first five rows of the dataset
df.head()
#Scale and normalize data
from sklearn.preprocessing import StandardScaler
df = df[["Happiness Score", 'Economy (GDP per Capita)', 'Family',
'Health (Life Expectancy)', 'Freedom', 'Trust (Government Corruption)',
'Generosity', 'Dystopia Residual']]
# Separate training and testing datasets
regression_data=df.sample(frac=0.9,random_state=200) #random state is a seed value
regression_test_data = df.drop(regression_data.index)
#regression_data = regression_data.iloc[:150]
#regression_test_data = regression_data.iloc[-8:]
ss = StandardScaler()
# -
# We've seen how to do classification and clustering on this dataset now. We will now see if we can predict happiness score for a new country by learning a linear regression model using just one feature.
# First, we visualize how the Happiness score changes with the economy. We can see how a linear fit would help us predict the happiness, given any value for the Economy variable for a new city.
plt.scatter(regression_data['Economy (GDP per Capita)'],regression_data['Happiness Score'])
plt.title('World Happiness Dataset')
plt.ylabel('Happiness Score')
plt.xlabel('Economy (GDP per capita) standardized')
# +
# import Linear model from libary
from sklearn import linear_model
# create a linear regression model variable
reg = linear_model.LinearRegression()
reg.fit(numpy.array(regression_data['Economy (GDP per Capita)']).reshape(-1,1), numpy.array(regression_data['Happiness Score']))
# plot data with the fitted line
plt.scatter(regression_data['Economy (GDP per Capita)'],regression_data['Happiness Score'])
plt.plot(regression_data['Economy (GDP per Capita)'], reg.coef_*regression_data['Economy (GDP per Capita)']+reg.intercept_, color='red')
plt.title('World Happiness Dataset')
plt.ylabel('Happiness Score')
plt.xlabel('Economy (GDP per capita) standardized')
# -
# The slope and intercept of the best fitting line is given by:
print("Slope: ", reg.coef_)
print("Intercept: ", reg.intercept_)
# Now, if you are given the economy data for a new country, you will be able to use the model to predict what the happiness score would be! Let's take an example.
# +
predictions = reg.predict(numpy.array(regression_test_data['Economy (GDP per Capita)']).reshape(-1,1))
true_data = numpy.array(regression_test_data['Happiness Score'])
# Measure mean square error between the true data and the predictions
mse = abs(true_data - predictions)
print("Residual errors are: ", numpy.sqrt(numpy.mean(mse)))
# -
# **Activity** Perform linear regression using the feature Health (Life Expectancy) to predict the happiness score.
|
lessons/regression-lesson.ipynb
|